本文整理汇总了Python中numpy.nanvar函数的典型用法代码示例。如果您正苦于以下问题:Python nanvar函数的具体用法?Python nanvar怎么用?Python nanvar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nanvar函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plot_profile_TKE_wind
def plot_profile_TKE_wind(synth):
fig,ax=plt.subplots()
colors=get_colors(synth)
c=0
for key,value in synth.iteritems():
for v in value:
scase=str(key).zfill(2)
sleg=str(v).zfill(2)
synthfile=base_dir+'c'+scase+'/leg'+sleg+'.cdf'
U = read_synth(synthfile,'F2U')
V = read_synth(synthfile,'F2V')
Z = read_synth(synthfile,'z')
x=[]
y=[]
for n,z in enumerate(Z[1:15]):
u=U[:,:,n+1]
v=V[:,:,n+1]
u_var=np.nanvar(u)
v_var=np.nanvar(v)
TKE=(u_var+v_var)/2.
x.append(TKE)
y.append(z)
label='Case: '+scase+' Leg: '+sleg
ax.plot(x,y,'-',label=label,color=colors[c])
ax.set_ylim([0,4])
ax.set_xlabel('TKE [m2 s^-2]')
ax.set_ylabel('Altitude MSL [km]')
c+=1
plt.suptitle('Spatial TKE at P3 synth levels ')
plt.draw()
plt.legend()
开发者ID:rvalenzuelar,项目名称:pythonx,代码行数:32,代码来源:wind_field_2.py
示例2: calc_stresses
def calc_stresses(self, beamvel, beamAng):
"""
Calculate the stresses from the difference in the beam variances.
Reference: Stacey, Monosmith and Burau; (1999) JGR [104]
"Measurements of Reynolds stress profiles in unstratified
tidal flow"
"""
fac = 4 * np.sin(self['config']['beam_angle'] * deg2rad) * \
np.cos(self['config']['beam_angle'] * deg2rad)
# Note: Stacey defines the beams incorrectly for Workhorse ADCPs.
# According to the workhorse coordinate transformation
# documentation, the instrument's:
# x-axis points from beam 1 to 2, and
# y-axis points from beam 4 to 3.
# Therefore:
stress = ((np.nanvar(self.reshape(beamvel[0]), axis=-1) -
np.nanvar(self.reshape(beamvel[1]), axis=-1)) + 1j *
(np.nanvar(self.reshape(beamvel[2]), axis=-1) -
np.nanvar(self.reshape(beamvel[3]), axis=-1))
) / fac
if self.config.orientation == 'up':
# This comes about because, when the ADCP is 'up', the u
# and w velocities need to be multiplied by -1 (equivalent
# to adding pi to the roll). See the coordinate
# transformation documentation for more info.
#
# The uw (real) component has two minus signs, but the vw (imag)
# component only has one, therefore:
stress.imag *= -1
stress *= rotate.inst2earth_heading(self)
if self.props['coord_sys'] == 'principal':
stress *= np.exp(-1j * self.props['principal_angle'])
return stress.real, stress.imag
开发者ID:lkilcher,项目名称:dolfyn,代码行数:34,代码来源:base_legacy.py
示例3: plot_profile_variance
def plot_profile_variance(dbz,vvel,ht, ax,case,ncases):
dbz_variance=[]
vvel_variance=[]
count_gates=[]
global ti
global n
global colors
if n==0:
# colors=sns.color_palette('hls',ncases)
colors=sns.color_palette('Paired',ncases)
for i in range(len(ht)):
dbz_variance.append(np.nanvar(dbz[i,:]))
vvel_variance.append(np.nanvar(vvel[i,:]))
count_gates.append(vvel[i,:].size-np.sum(np.isnan(vvel[i,:])))
inid=datetime(*(reqdates[case]['ini']+[0,0]))
endd=datetime(*(reqdates[case]['end']+[0,0]))
ti.append('\nCase '+case+': '+inid.strftime('%Y-%b %dT%H:%M')+endd.strftime(' - %dT%H:%M UTC'))
if n<7:
marker='None'
# marker='o'
else:
marker='o'
dbzv=[0,180]
vvelv=[0,6]
if np.any(ax):
ax[0].plot(dbz_variance,ht,marker=marker,color=colors[n])
ax[1].plot(vvel_variance,ht,marker=marker,color=colors[n])
ax[2].plot(count_gates,ht,marker=marker,color=colors[n],label='case '+case)
n+=1
else:
fig,ax=plt.subplots(1,3,sharey=True,figsize=(12,8))
ax[0].plot(dbz_variance,ht,color=colors[n])
ax[1].plot(vvel_variance,ht,color=colors[n])
ax[2].plot(count_gates,ht,color=colors[n], label='case '+case)
ax[0].set_ylabel('Height MSL [km]')
ax[0].set_xlabel('Reflectivity [dBZ^2]')
ax[1].set_xlabel('Vertical velocity [m2 s^-2]')
ax[2].set_xlabel('Count good gates')
ax[0].set_xlim(dbzv)
ax[1].set_xlim(vvelv)
n+=1
return ax
if n==ncases and ncases==4:
plt.suptitle('SPROF time variance'+''.join(ti))
plt.subplots_adjust(top=0.85, left=0.05, right=0.95, wspace=0.05)
ax[2].legend(loc='lower left')
elif n==ncases and ncases>4:
plt.suptitle('SPROF time variance')
plt.subplots_adjust(top=0.9, left=0.05, right=0.95, wspace=0.06)
ax[2].legend()
plt.draw()
开发者ID:rvalenzuelar,项目名称:sprof_vis,代码行数:59,代码来源:statistical_sprof.py
示例4: test_nanvar
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
开发者ID:ContinuumIO,项目名称:numpy,代码行数:8,代码来源:test_nanfunctions.py
示例5: test_nanvar
def test_nanvar(eng):
original = arange(24).reshape((2, 3, 4)).astype(float64)
data = fromlist(list(original), engine=eng)
assert allclose(data.nanvar().shape, (1, 3, 4))
assert allclose(data.nanvar().toarray(), nanvar(original, axis=0))
original[0, 2, 3] = nan
original[1, 0, 2] = nan
original[1, 2, 2] = nan
data = fromlist(list(original), engine=eng)
assert allclose(data.nanvar().shape, (1, 3, 4))
assert allclose(data.nanvar().toarray(), nanvar(original, axis=0))
开发者ID:boazmohar,项目名称:thunder,代码行数:12,代码来源:test_images.py
示例6: bayes_precision
def bayes_precision(x, y, distribution='normal', posterior_width=0.08, num_iters=25000, inference='sampling'):
""" Bayes precision computation.
:param x: sample of a treatment group
:type x: pd.Series or list (array-like)
:param y: sample of a control group
:type y: pd.Series or list (array-like)
:param distribution: name of the KPI distribution model, which assumes a Stan model file with the same name exists
:type distribution: str
:param posterior_width: the stopping criterion, threshold of the posterior width
:type posterior_width: float
:param num_iters: number of iterations of bayes sampling
:type num_iters: int
:param inference: sampling or variational inference method for approximation the posterior
:type inference: str
:return: results of type EarlyStoppingTestStatistics (without p-value and stat. power)
:rtype: EarlyStoppingTestStatistics
"""
logger.info("Started running bayes precision with {} procedure, treatment group of size {}, "
"control group of size {}, {} distribution.".format(len(x), len(y), distribution, inference))
traces, n_x, n_y, mu_x, mu_y = _bayes_sampling(x, y, distribution=distribution,
num_iters=num_iters, inference=inference)
trace_normalized_effect_size = get_trace_normalized_effect_size(distribution, traces)
trace_absolute_effect_size = traces['delta']
credible_mass = 0.95
left_out = 1.0 - credible_mass
p1 = round(left_out/2.0, 5)
p2 = round(1.0 - left_out/2.0, 5)
credible_interval_delta = HDI_from_MCMC(trace_absolute_effect_size, credible_mass)
credible_interval_delta_normalized = HDI_from_MCMC(trace_normalized_effect_size, credible_mass)
stop = credible_interval_delta_normalized[1] - credible_interval_delta_normalized[0] < posterior_width
treatment_statistics = SampleStatistics(int(n_x), float(mu_x), float(np.nanvar(x)))
control_statistics = SampleStatistics(int(n_y), float(mu_y), float(np.nanvar(y)))
variant_statistics = BaseTestStatistics(control_statistics, treatment_statistics)
logger.info("Finished running bayes precision with {} procedure, treatment group of size {}, "
"control group of size {}, {} distribution.".format(len(x), len(y), distribution, inference))
return EarlyStoppingTestStatistics(variant_statistics.control_statistics,
variant_statistics.treatment_statistics,
float(mu_x - mu_y),
dict([(p * 100, v) for p, v in zip([p1, p2], credible_interval_delta)]),
None, None, stop)
开发者ID:zalando,项目名称:expan,代码行数:50,代码来源:early_stopping.py
示例7: test_nanvar
def test_nanvar(eng):
arr = array([arange(8), arange(8)]).astype(float64)
data = fromarray(arr, engine=eng)
val = data.nanvar().toarray()
expected = nanvar(data.toarray(), axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
arr[0, 4] = nan
arr[1, 3] = nan
arr[1, 4] = nan
data = fromarray(arr, engine=eng)
val = data.nanvar().toarray()
expected = nanvar(data.toarray(), axis=0)
assert allclose(val, expected, equal_nan=True)
assert str(val.dtype) == 'float64'
开发者ID:boazmohar,项目名称:thunder,代码行数:15,代码来源:test_series.py
示例8: test_GWAS
def test_GWAS(self):
Y = np.genfromtxt(self._liverPhenos)
# Loading npdump and first 1000 snps for speed
K = np.load(self._liverKinshipMatrix)
snps = np.load(self._liver1000SNPFile).T
vars = np.nanvar(snps, axis=0) #variances across the rows ignoring NaN, used to check which SNPs were not polymorphic across the given individuals
TS,PS = lmm.GWAS(Y,snps,K,REML=True,refit=True)
#SNPs that are not polymorphic (in the given individuals being tested) will have variance 0, this check ensures
#that only these SNPs have a return value of NaN
for i in range(len(PS)):
self.assertTrue( not math.isnan(PS[i]) or vars[i] == 0, "NaN found in results corresponding to polymorphic SNP")
results = np.array([TS,PS])
ansKey = np.load(self._liverTestFile)
#these results include np.nan values, so allclose cannot be used, also the results are similar with each
#run, but do vary, so we can only check for similarity to a precision of about 1e-06
for i in range(results.shape[0]):
for j in range(results.shape[1]):
a = results[i,j]
b = ansKey[i,j]
self.assertTrue( (np.isnan(a) and np.isnan(b)) or abs(a - b) < 1e-06 ,
"Mismatch on values: " + str(a) + " and " + str(b))
开发者ID:ngcrawford,项目名称:pylmm_zarlab,代码行数:27,代码来源:test_lmm.py
示例9: c
def c(self, P, h, bw):
"""Calculate the sill"""
c = np.nanvar(P[:, 2])
if h == 0:
return c
else:
return c - self.semivarh(P, h, bw)
开发者ID:m4sth0,项目名称:sauventory,代码行数:7,代码来源:variogram.py
示例10: compute
def compute(self, today, assets, out, close):
# get returns dataset
returns = ((close - np.roll(close, 1, axis=0)) / np.roll(close, 1, axis=0))[1:]
# get index of benchmark
benchmark_index = np.where((assets == 8554) == True)[0][0]
# get returns of benchmark
benchmark_returns = returns[:, benchmark_index]
# prepare X matrix (x_is - x_bar)
X = benchmark_returns
X_bar = np.nanmean(X)
X_vector = X - X_bar
X_matrix = np.tile(X_vector, (len(returns.T), 1)).T
# prepare Y matrix (y_is - y_bar)
Y_bar = np.nanmean(close, axis=0)
Y_bars = np.tile(Y_bar, (len(returns), 1))
Y_matrix = returns - Y_bars
# prepare variance of X
X_var = np.nanvar(X)
# multiply X matrix an Y matrix and sum (dot product)
# then divide by variance of X
# this gives the MLE of Beta
out[:] = (np.sum((X_matrix * Y_matrix), axis=0) / X_var) / (len(returns))
开发者ID:quantopian,项目名称:algorithm-component-library,代码行数:29,代码来源:quanta_lib.py
示例11: cal_stats
def cal_stats(in_fc, col_names):
"""Calculate stats for an array of double types, with nodata (nan, None)
: in the column.
:Requires:
:---------
: in_fc - input featureclass or table
: col_names - the columns... numeric (floating point, double)
:
:Notes:
:------ see the args tuple for examples of nan functions
: np.nansum(b, axis=0) # by column
: np.nansum(b, axis=1) # by row
: c_nan = np.count_nonzero(~np.isnan(b), axis=0) count nan if needed
"""
a = arcpy.da.FeatureClassToNumPyArray(in_fc, col_names) # "*")
b = a.view(np.float).reshape(len(a), -1)
if len(a.shape) == 1:
ax = 0
else:
ax = [1, 0][True] # ax = [1, 0][colwise] colwise= True
mask = np.isnan(b)
cnt = np.sum(~mask, axis=ax, dtype=np.intp, keepdims=False)
n_sum = np.nansum(b, axis=0)
n_mean = np.nanmean(b, axis=0)
n_var = np.nanvar(b, axis=0)
n_std = np.nanstd(b, axis=0)
sk, kurt = skew_kurt(b, avg=n_mean, var_x=n_var, std_x=n_std,
col=True, mom='both')
args = (col_names, cnt, n_sum, np.nanmin(b, axis=0), np.nanmax(b, axis=0),
np.nanmedian(b, axis=0), n_mean, n_std, n_var, sk, kurt)
return col_names, args
开发者ID:Dan-Patterson,项目名称:GIS,代码行数:31,代码来源:field_statistics.py
示例12: computeFisherScore
def computeFisherScore(data, class_ass, nb_classes):
'''
The Fisher Score assigns a rank to each of the features, with the goal of finding the subset of features of the data
such that in the data space spanned by the selected features, the distance between data points in different classes are
as large as possible and the distance between data points in the same class are as small as possible.
Input
- data: matrix of inputs, size N x M, where N is the number of trials and M is the number of features
- class_ass: array of class assignments, size 1 x N, where N is the number of trials
- nb_classes: number of classes
Output
- Fscores: array of scores, size 1 x M, for each of the features
'''
num_trials, num_features = data.shape
within_class_mean = np.zeros([nb_classes,num_features]) # mean for each feature within each class
within_class_var = np.zeros([nb_classes,num_features]) # variance for each feature within each class
num_points_within_class = np.zeros([1,nb_classes]) # number of points within each class
for i in range(nb_classes):
in_class = np.ravel(np.nonzero(class_ass == i))
num_points_within_class[0,i] = len(in_class)
class_data = data[in_class,:] # extract trails classified as belonging to this class
within_class_mean[i,:] = np.nanmean(class_data, axis=0) # length of mean vector should be equal to M, the number of features
within_class_var[i,:] = np.nanvar(class_data,axis=0)
between_class_mean = np.asmatrix(np.mean(within_class_mean,axis=0))
between_class_mean = np.dot(np.ones([nb_classes,1]), between_class_mean)
Fscores = np.dot(num_points_within_class,np.square(within_class_mean - between_class_mean))/np.dot(num_points_within_class,within_class_var)
return Fscores
开发者ID:srsummerson,项目名称:analysis,代码行数:31,代码来源:basicAnalysis.py
示例13: _fit_model
def _fit_model(self, fcol, dis):
"""Determine the best fit for one feature column given distribution name
Parameters
----------
fcol: feature column, array
dis: distribution name, String
Returns
----------
function: fit model with feature as argument
"""
if dis == 'ratio':
itfreq = itemfreq(fcol)
uniqueVars = itfreq[:,0]
freq = itfreq[:,1]
rat = freq/sum(freq)
rat = dict(zip(uniqueVars, rat.T))
func = lambda x: self. funcs[dis](x, rat)
if dis == 'poisson':
lamb = np.nanmean(fcol, axis = 0)
func = lambda x: self.funcs[dis](x, lamb)
if dis == 'norm':
sigma = np.nanvar(fcol, axis=0)
theta = np.nanmean(fcol, axis = 0)
func = lambda x: self.funcs[dis](x, sigma, theta)
return np.vectorize(func)
开发者ID:rexshihaoren,项目名称:MSPrediction-Python,代码行数:30,代码来源:naive_bayes.py
示例14: fit_cols
def fit_cols(self, attributes, x, n_vals):
"""
Return `EuclideanColumnsModel` with stored means and variances
for normalization and imputation.
"""
def nowarn(msg, cat, *args, **kwargs):
if cat is RuntimeWarning and (
msg == "Mean of empty slice"
or msg == "Degrees of freedom <= 0 for slice"):
if self.normalize:
raise ValueError("some columns have no defined values")
else:
orig_warn(msg, cat, *args, **kwargs)
self.check_no_discrete(n_vals)
# catch_warnings resets the registry for "once", while avoiding this
# warning would be annoying and slow, hence patching
orig_warn = warnings.warn
with patch("warnings.warn", new=nowarn):
means = np.nanmean(x, axis=0)
vars = np.nanvar(x, axis=0)
if self.normalize and not vars.all():
raise ValueError("some columns are constant")
return EuclideanColumnsModel(
attributes, self.impute, self.normalize, means, vars)
开发者ID:acopar,项目名称:orange3,代码行数:25,代码来源:distance.py
示例15: autocorrelation_hourly
def autocorrelation_hourly(data):
from matplotlib.pyplot import plot, xlabel, ylabel, show
from numpy import nanmean, nanvar, mean, multiply, arange
# We choose 7 days and plus-minus 6 hours as the possible periodicity
# in traffic.
START_PERIOD = 7*24 - 6
END_PERIOD = 7*24 + 6
V = replace_placeholder(data, value = nanmean(data))
# We don't take the variance of entries that we replaced with nanmean.
sigma2 = nanvar(data)
autocorr_dict = {period:0 for period in range(START_PERIOD,END_PERIOD+1)}
Deviations = V - nanmean(V, axis=0)
for period in range(START_PERIOD, END_PERIOD+1):
autocorr = nanmean([multiply(Deviations[t],Deviations[t+period])
for t in range(len(V)-period)])/sigma2
autocorr_dict[period] = autocorr
print(period)
# Peaks in plot correspond to high autocorrelation i.e. high
# periodicity trend.
plot(arange(START_PERIOD, END_PERIOD+1),
[autocorr_dict[period] for period in range(START_PERIOD, END_PERIOD+1)],
'o-')
ylabel('Average autocorellation over full links')
xlabel('Assumed period of data (in hours)')
show()
#legend(bbox_to_anchor=(1.35, 0.95))
return None
开发者ID:vaibhavskarve,项目名称:traffic-study,代码行数:29,代码来源:read_data.py
示例16: test_var
def test_var():
out = df.i32.reshape((2, 2, 5)).var(axis=2).T
eq(c.points(df, 'x', 'y', ds.var('i32')), out)
eq(c.points(df, 'x', 'y', ds.var('i64')), out)
out = np.nanvar(df.f64.reshape((2, 2, 5)), axis=2).T
eq(c.points(df, 'x', 'y', ds.var('f32')), out)
eq(c.points(df, 'x', 'y', ds.var('f64')), out)
开发者ID:WilfR,项目名称:datashader,代码行数:7,代码来源:test_pandas.py
示例17: get_FR_stats
def get_FR_stats(hdf, save=False, return_=False, plot=True):
eps = 10**-12
sc = hdf.root.task[:]['spike_counts']
mn = np.nanmean(sc[:, :, 0], axis=0)
vr = np.nanvar(sc[:, :, 0], axis=0)
ff = vr/(mn+eps)
if plot:
f, ax = plt.subplots()
ax.hist(np.mean(sc[:, :, 0], axis=0))
ax.set_title('Hist. of Mean FR.')
ax.set_xlabel('FR')
ax.set_ylabel('Counts')
f2, ax2 = plt.subplots()
try:
ax2.hist(ff)
except:
print 'error FF: ', ff
ax2.set_title('Hist. of Fano Factor')
ax2.set_xlabel('Fano Factor')
ax2.set_ylabel('Counts')
if save:
f.savefig(hdf.filename[:-4]+'_mnFR.png', format='png')
f2.savefig(hdf.filename[:-4]+'_FF.png', format='png')
if return_:
return np.mean(sc[:,:,0], axis=0), ff
开发者ID:pkhanna104,项目名称:fa_analysis,代码行数:28,代码来源:get_sim_tuning.py
示例18: ExponentialTransformErrVarShapingFactor
def ExponentialTransformErrVarShapingFactor(data, comparedata,G=10):
"""
This function use the variance of the error terms between observed and simulated data as a base to claculate the
likelihood.
.. math::
p=-G\\cdot Var(E(x))
The factor `G` comes from the DREAMPar model. So this factor can be changed according to the used model.
For more details see also: http://onlinelibrary.wiley.com/doi/10.1029/95WR03723/epdf.
`Usage:` Maximize the likelihood value guides to the best model.
:param data: observed measurements as a numerical list
:type data: list
:param comparedata: simulated data from a model which should fit the original data somehow
:type comparedata: list
:param G: DREAMPar model parameter `G`
:type G: float
:return: the p value as a likelihood
:rtype: float
"""
__standartChecksBeforeStart(data, comparedata)
errArr = np.array(__calcSimpleDeviation(data, comparedata))
return -G*np.nanvar(errArr)
开发者ID:kbstn,项目名称:spotpy,代码行数:29,代码来源:likelihoods.py
示例19: _compute_zr2011_dataframe
def _compute_zr2011_dataframe(self):
"""
Get the dataframe needed for the mid-range temperatures,
and add a mass bin for 6000 K using the gyrochronology relation.
"""
# Read in the dataframe from disk.
df = pd.read_csv('data/velocity_pdfs.csv', header=1)
# Compute equatorial velocities for a 6000 K star at this age.
teff = np.ones_like(self.age) * 6000.0
v_eq = self._gyro_velocities(teff, self.age).to(u.km/u.s).value
v_eq[v_eq > 500] = np.nan # Remove unphysical vsini values.
# Calculate approximate maxwellian parameters from the velocities.
alpha = np.sqrt(np.nanvar(v_eq) * np.pi / (3*np.pi - 8))
l = np.nanmedian(v_eq) - 2*alpha*np.sqrt(2/np.pi)
# Add a row to the dataframe with this information
df.loc[df.index.max()+1] = [1.0, 1.24, 0, 25, 100, alpha*np.sqrt(2), l]
# Calculate a few more columns for the dataframe
df['mid_mass'] = (df.mass_high + df.mass_low) / 2.0
df['slow_alpha'] = df.slow_mu / np.sqrt(2)
df['fast_alpha'] = df.fast_mu / np.sqrt(2)
df['slow_frac'] /= 100.0
df['fast_frac'] /= 100.0
# Sort so that interpolation works
df = df.sort_values(by='mid_mass').reset_index()
return df
开发者ID:kgullikson88,项目名称:BinaryInference,代码行数:31,代码来源:Completeness.py
示例20: cal_stats
def cal_stats(a):
"""Calculate stats for an array of double types, with nodata (nan, None)
in the column.
Notes
-----
see the args tuple for examples of nan functions::
>>> np.nansum(b, axis=0) # by column
>>> np.nansum(b, axis=1) # by row
>>> c_nan = np.count_nonzero(~np.isnan(b), axis=0) count nan if needed
"""
if len(a.shape) == 1:
ax = 0
else:
ax = [1, 0][True] # ax = [1, 0][colwise] colwise= True
mask = np.isnan(a)
n = len(a)
cnt = np.sum(~mask, axis=ax, dtype=np.intp, keepdims=False)
n_sum = np.nansum(a, axis=0)
n_min = np.nanmin(a, axis=0)
n_max = np.nanmax(a, axis=0)
n_mean = np.nanmean(a, axis=0)
n_med = np.nanmedian(a, axis=0)
n_std = np.nanstd(a, axis=0)
n_var = np.nanvar(a, axis=0)
col_names = ['N', 'n', 'sum', 'min', 'max', 'mean', 'median',
'std', 'var', 'skew', 'kurt']
sk, kurt = skew_kurt(a, avg=n_mean, var_x=n_var, std_x=n_std,
col=True, mom='both')
args = [n, cnt, n_sum, n_min, n_max, n_mean, n_med, n_std, n_var, sk, kurt]
z = list(zip(col_names, args))
s = "".join(["\n{:<6} {}".format(*i) for i in z])
return s
开发者ID:Dan-Patterson,项目名称:GIS,代码行数:34,代码来源:field_stats.py
注:本文中的numpy.nanvar函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论