@@ -112,7 +112,7 @@ def __init__(self, samples, names, idl=None, **kwargs):
112112 else :
113113 self .idl [name ] = list (idx )
114114 else :
115- raise TypeError ('incompatible type for idl[%s].' % ( name ) )
115+ raise TypeError ('incompatible type for idl[%s].' % name )
116116 else :
117117 for name , sample in sorted (zip (names , samples )):
118118 self .idl [name ] = range (1 , len (sample ) + 1 )
@@ -388,7 +388,7 @@ def details(self, ens_content=True):
388388 if self .tag is not None :
389389 print ("Description:" , self .tag )
390390 if not hasattr (self , 'e_dvalue' ):
391- print ('Result\t %3.8e' % ( self .value ) )
391+ print ('Result\t %3.8e' % self .value )
392392 else :
393393 if self .value == 0.0 :
394394 percentage = np .nan
@@ -446,7 +446,7 @@ def details(self, ens_content=True):
446446 my_string_list .append (my_string )
447447 print ('\n ' .join (my_string_list ))
448448
449- def reweight (self , weight ):
449+ def reweight (self , weight , all_configs = False ):
450450 """Reweight the obs with given rewighting factors.
451451
452452 Parameters
@@ -459,7 +459,7 @@ def reweight(self, weight):
459459 the reweighting factor on all configurations in weight.idl and not
460460 on the configurations in obs[i].idl. Default False.
461461 """
462- return reweight (weight , [self ])[0 ]
462+ return reweight (weight , [self ], all_configs = all_configs )[0 ]
463463
464464 def is_zero_within_error (self , sigma = 1 ):
465465 """Checks whether the observable is zero within 'sigma' standard errors.
@@ -1078,7 +1078,7 @@ def _expand_deltas(deltas, idx, shape, gapsize):
10781078 are found in idx, the data is expanded.
10791079 """
10801080 if isinstance (idx , range ):
1081- if ( idx .step == gapsize ) :
1081+ if idx .step == gapsize :
10821082 return deltas
10831083 ret = np .zeros ((idx [- 1 ] - idx [0 ] + gapsize ) // gapsize )
10841084 for i in range (shape ):
@@ -1188,6 +1188,10 @@ def derived_observable(func, data, array_mode=False, **kwargs):
11881188 of func. Use cautiously, supplying the wrong derivative will
11891189 not be intercepted.
11901190
1191+ array_mode: bool
1192+ If True, the function is applied to the full array of data.
1193+ Default: False
1194+
11911195 Notes
11921196 -----
11931197 For simple mathematical operations it can be practical to use anonymous
@@ -1210,7 +1214,7 @@ def derived_observable(func, data, array_mode=False, **kwargs):
12101214 for name in o .cov_names :
12111215 if name in allcov :
12121216 if not np .allclose (allcov [name ], o .covobs [name ].cov ):
1213- raise Exception ('Inconsistent covariance matrices for %s!' % ( name ) )
1217+ raise Exception ('Inconsistent covariance matrices for %s!' % name )
12141218 else :
12151219 allcov [name ] = o .covobs [name ].cov
12161220
@@ -1260,7 +1264,7 @@ def _compute_scalefactor_missing_rep(obs):
12601264 for mc_name in obs .mc_names :
12611265 mc_idl_d = [name for name in obs .idl if name .startswith (mc_name + '|' )]
12621266 new_mc_idl_d = [name for name in new_idl_d if name .startswith (mc_name + '|' )]
1263- if len ( mc_idl_d ) > 0 and len (mc_idl_d ) < len (new_mc_idl_d ):
1267+ if 0 < len (mc_idl_d ) < len (new_mc_idl_d ):
12641268 scalef_d [mc_name ] = sum ([len (new_idl_d [name ]) for name in new_mc_idl_d ]) / sum ([len (new_idl_d [name ]) for name in mc_idl_d ])
12651269 return scalef_d
12661270
@@ -1386,7 +1390,7 @@ def _reduce_deltas(deltas, idx_old, idx_new):
13861390 return np .array (deltas )[indices ]
13871391
13881392
1389- def reweight (weight , obs , ** kwargs ):
1393+ def reweight (weight , obs , all_configs = False ):
13901394 """Reweight a list of observables.
13911395
13921396 Parameters
@@ -1417,7 +1421,7 @@ def reweight(weight, obs, **kwargs):
14171421 new_samples .append ((w_deltas [name ] + weight .r_values [name ]) * (obs [i ].deltas [name ] + obs [i ].r_values [name ]))
14181422 tmp_obs = Obs (new_samples , sorted (obs [i ].names ), idl = [obs [i ].idl [name ] for name in sorted (obs [i ].names )])
14191423
1420- if kwargs . get ( ' all_configs' ) :
1424+ if all_configs :
14211425 new_weight = weight
14221426 else :
14231427 new_weight = Obs ([w_deltas [name ] + weight .r_values [name ] for name in sorted (obs [i ].names )], sorted (obs [i ].names ), idl = [obs [i ].idl [name ] for name in sorted (obs [i ].names )])
@@ -1471,8 +1475,8 @@ def correlate(obs_a, obs_b):
14711475 return o
14721476
14731477
1474- def covariance (obs , visualize = False , correlation = False , smooth = None , ** kwargs ):
1475- r''' Calculates the error covariance matrix of a set of observables.
1478+ def covariance (obs , visualize = False , correlation = False , smooth = None ):
1479+ r""" Calculates the error covariance matrix of a set of observables.
14761480
14771481 WARNING: This function should be used with care, especially for observables with support on multiple
14781482 ensembles with differing autocorrelations. See the notes below for details.
@@ -1503,7 +1507,7 @@ def covariance(obs, visualize=False, correlation=False, smooth=None, **kwargs):
15031507 For observables defined on a single ensemble our approximation is equivalent to assuming that the integrated autocorrelation time of an off-diagonal element is equal to the geometric mean of the integrated autocorrelation times of the corresponding diagonal elements.
15041508 $$\tau_{\mathrm{int}, ij}=\sqrt{\tau_{\mathrm{int}, i}\times \tau_{\mathrm{int}, j}}$$
15051509 This construction ensures that the estimated covariance matrix is positive semi-definite (up to numerical rounding errors).
1506- '''
1510+ """
15071511
15081512 length = len (obs )
15091513
@@ -1557,7 +1561,7 @@ def invert_corr_cov_cholesky(corr, inverrdiag):
15571561 if condn > 0.1 / np .finfo (float ).eps :
15581562 raise ValueError (f"Cannot invert correlation matrix as its condition number exceeds machine precision ({ condn :1.2e} )" )
15591563 if condn > 1e13 :
1560- warnings .warn ("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % ( condn ) , RuntimeWarning )
1564+ warnings .warn ("Correlation matrix may be ill-conditioned, condition number: {%1.2e}" % condn , RuntimeWarning )
15611565 chol = np .linalg .cholesky (corr )
15621566 chol_inv = scipy .linalg .solve_triangular (chol , inverrdiag , lower = True )
15631567
@@ -1714,6 +1718,8 @@ def import_jackknife(jacks, name, idl=None):
17141718 the N jackknife samples as first to Nth entry.
17151719 name : str
17161720 name of the ensemble the samples are defined on.
1721+ idl : list, optional
1722+ list of ranges or lists on which the samples are defined
17171723 """
17181724 length = len (jacks ) - 1
17191725 prj = (np .ones ((length , length )) - (length - 1 ) * np .identity (length ))
@@ -1789,7 +1795,7 @@ def cov_Obs(means, cov, name, grad=None):
17891795
17901796 Parameters
17911797 ----------
1792- mean : list of floats or float
1798+ means : list of floats or float
17931799 N mean value(s) of the new Obs
17941800 cov : list or array
17951801 2d (NxN) Covariance matrix, 1d diagonal entries or 0d covariance
@@ -1821,7 +1827,7 @@ def covobs_to_obs(co):
18211827 for i in range (len (means )):
18221828 ol .append (covobs_to_obs (Covobs (means [i ], cov , name , pos = i , grad = grad )))
18231829 if ol [0 ].covobs [name ].N != len (means ):
1824- raise ValueError ('You have to provide %d mean values!' % ( ol [0 ].N ) )
1830+ raise ValueError ('You have to provide %d mean values!' % ol [0 ].N )
18251831 if len (ol ) == 1 :
18261832 return ol [0 ]
18271833 return ol
@@ -1843,14 +1849,14 @@ def _determine_gap(o, e_content, e_name):
18431849
18441850
18451851def _check_lists_equal (idl ):
1846- '''
1852+ """
18471853 Use groupby to efficiently check whether all elements of idl are identical.
18481854 Returns True if all elements are equal, otherwise False.
18491855
18501856 Parameters
18511857 ----------
18521858 idl : list of lists, ranges or np.ndarrays
1853- '''
1859+ """
18541860 g = groupby ([np .nditer (el ) if isinstance (el , np .ndarray ) else el for el in idl ])
18551861 if next (g , True ) and not next (g , False ):
18561862 return True
0 commit comments