本文整理汇总了Python中numpy.nan_to_num函数的典型用法代码示例。如果您正苦于以下问题:Python nan_to_num函数的具体用法?Python nan_to_num怎么用?Python nan_to_num使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nan_to_num函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plotheatmaps
def plotheatmaps(data, title=''):
local = get_local_full()
glob = get_global_full()
gden = [('%4.0f'%float(i)).lstrip('0') for i in glob['density']]
gcnt = [int(i) for i in glob['count']]
max_gden = max([float(i) for i in glob['density']])
for tbin in data.keys():
c = np.array(data[tbin])
# gcnt = np.sum(c, axis=1)
# lcnt = np.sum(c, axis=0)
lcnt = [int(i) for i in local[tbin]['count']]
lden = [float(i) for i in local[tbin]['density']]
lden_norm = [i / sum(lden) for i in lden]
lden_scaled = [i * max_gden for i in lden_norm]
denlab = [('%3.0f'%i) for i in lden_scaled]
print(local[tbin]['volume'])
glabels = ['%4d/%4s' % i for i in zip(gcnt,gden)]
llabels = ['%4d/%4s' % i for i in zip(lcnt,denlab)]
norm_c = np.nan_to_num(c / np.linalg.norm(c, axis=-1)[:, np.newaxis]).T
P.heatmap(norm_c, glabels, llabels, title+tbin+'_col')
d = c.T
norm_r = np.nan_to_num(d / np.linalg.norm(d, axis=-1)[:, np.newaxis])
P.heatmap(norm_r, glabels, llabels, title+tbin+'_row')
combined = (norm_c + norm_r) / 2
P.heatmap(combined, glabels, llabels, title+tbin+'_combined')
print(combined)
开发者ID:DaMSL,项目名称:ddc,代码行数:27,代码来源:scrapper.py
示例2: _to_raw
def _to_raw(self, data1, data2):
from matplotlib import pyplot as plt
from matplotlib.colors import Normalize
cmapdir = options.config.get("webgl", "colormaps")
cmap = plt.imread(os.path.join(cmapdir, "%s.png"%self.cmap))
norm1 = Normalize(self.vmin, self.vmax)
norm2 = Normalize(self.vmin2, self.vmax2)
d1 = np.clip(norm1(data1), 0, 1)
d2 = np.clip(1 - norm2(data2), 0, 1)
dim1 = np.round(d1 * (cmap.shape[1]-1))
# Nans in data seemed to cause weird interaction with conversion to uint32
dim1 = np.nan_to_num(dim1).astype(np.uint32)
dim2 = np.round(d2 * (cmap.shape[0]-1))
dim2 = np.nan_to_num(dim2).astype(np.uint32)
colored = cmap[dim2.ravel(), dim1.ravel()]
r, g, b, a = colored.T
r.shape = dim1.shape
g.shape = dim1.shape
b.shape = dim1.shape
a.shape = dim1.shape
# Preserve nan values as alpha = 0
aidx = np.logical_or(np.isnan(data1),np.isnan(data2))
a[aidx] = 0
# Code from master, to handle alpha input, prob better here but not tested.
# # Possibly move this above setting nans to alpha = 0;
# # Possibly multiply specified alpha by alpha in colormap??
# if 'alpha' in self.attrs:
# # Over-write alpha from colormap / nans with alpha arg if provided.
# # Question: Might it be important tokeep alpha as an attr?
# a = self.attrs.pop('alpha')
return r, g, b, a
开发者ID:gallantlab,项目名称:pycortex,代码行数:34,代码来源:view2D.py
示例3: time_std
def time_std(self):
if hasattr(self, '_time_std'):
return self._time_std
if self.savedir is not None:
try:
with open(join(self.savedir, 'time_std.pkl'),
'rb') as f:
time_std = pickle.load(f)
except IOError:
pass
else:
# Same protocol as the averages. Make sure the
# std is a single 4D (zyxc) array and if not just
# re-calculate the time std.
if isinstance(time_std, np.ndarray):
self._time_std = time_std
return self._time_std
sums = np.zeros(self.frame_shape)
sums_squares = np.zeros(self.frame_shape)
counts = np.zeros(self.frame_shape)
for frame in it.chain.from_iterable(self):
sums += np.nan_to_num(frame)
sums_squares += np.square(np.nan_to_num(frame))
counts[np.isfinite(frame)] += 1
means = old_div(sums, counts)
mean_of_squares = old_div(sums_squares, counts)
std = np.sqrt(mean_of_squares-np.square(means))
if self.savedir is not None and not self._read_only:
with open(join(self.savedir, 'time_std.pkl'), 'wb') as f:
pickle.dump(std, f, pickle.HIGHEST_PROTOCOL)
self._time_std = std
return self._time_std
开发者ID:deep-introspection,项目名称:sima,代码行数:33,代码来源:imaging.py
示例4: compare_derivatives
def compare_derivatives(self, var_in, var_out, rel_error=False):
model = self.model
# Numeric
Jn = model.calc_gradient(var_in, var_out, mode="fd",
return_format='array')
#print 'finite diff', Jn
# Analytic forward
Jf = model.calc_gradient(var_in, var_out, mode='fwd',
return_format='array')
#print 'forward', Jf
if rel_error:
diff = np.nan_to_num(abs(Jf - Jn) / Jn)
else:
diff = abs(Jf - Jn)
assert_rel_error(self, diff.max(), 0.0, 1e-3)
# Analytic adjoint
Ja = model.calc_gradient(var_in, var_out, mode='rev',
return_format='array')
# print Ja
if rel_error:
diff = np.nan_to_num(abs(Ja - Jn) / Jn)
else:
diff = abs(Ja - Jn)
assert_rel_error(self, diff.max(), 0.0, 1e-3)
开发者ID:thearn,项目名称:CADRE,代码行数:34,代码来源:test_derivatives.py
示例5: __lazy_cost_function__
def __lazy_cost_function__(H, Y):
result = 0.0
for i in range(0, Y.shape[0]):
a = np.nan_to_num(np.log2(H[i]) * Y[i])
b = np.nan_to_num((1. - Y[i]) * np.log2((1. - H[i])))
result += a + b
return result
开发者ID:henryzord,项目名称:machine_learning_2015,代码行数:7,代码来源:logistic_regression.py
示例6: test_mflist
def test_mflist():
ml = flopy.modflow.Modflow(model_ws=out_dir)
dis = flopy.modflow.ModflowDis(ml, 10, 10, 10, 10)
sp_data = {0: [[1, 1, 1, 1.0], [1, 1, 2, 2.0], [1, 1, 3, 3.0]],
1: [1, 2, 4, 4.0]}
wel = flopy.modflow.ModflowWel(ml, stress_period_data=sp_data)
m4ds = ml.wel.stress_period_data.masked_4D_arrays
sp_data = flopy.utils.MfList.masked4D_arrays_to_stress_period_data \
(flopy.modflow.ModflowWel.get_default_dtype(), m4ds)
assert np.array_equal(sp_data[0], ml.wel.stress_period_data[0])
assert np.array_equal(sp_data[1], ml.wel.stress_period_data[1])
# the last entry in sp_data (kper==9) should equal the last entry
# with actual data in the well file (kper===1)
assert np.array_equal(sp_data[9], ml.wel.stress_period_data[1])
pth = os.path.join('..', 'examples', 'data', 'mf2005_test')
ml = flopy.modflow.Modflow.load(os.path.join(pth, "swi2ex4sww.nam"),
verbose=True)
m4ds = ml.wel.stress_period_data.masked_4D_arrays
sp_data = flopy.utils.MfList.masked4D_arrays_to_stress_period_data \
(flopy.modflow.ModflowWel.get_default_dtype(), m4ds)
# make a new wel file
wel = flopy.modflow.ModflowWel(ml, stress_period_data=sp_data)
flx1 = m4ds["flux"]
flx2 = wel.stress_period_data.masked_4D_arrays["flux"]
flx1 = np.nan_to_num(flx1)
flx2 = np.nan_to_num(flx2)
assert flx1.sum() == flx2.sum()
开发者ID:modflowpy,项目名称:flopy,代码行数:33,代码来源:t004_test_utilarray.py
示例7: costf
def costf(self, train_data, train_targets):
'''The traindata should contain the training inputs and
train_targets the target vectors. Evaluates the cross entropy cost
with the current set of data and parameters'''
Y = self.Y(train_data)
J = -sum([dot(t, ly) for t,ly in zip(train_targets, np.nan_to_num(np.log(np.nan_to_num(Y))))])
return J
开发者ID:RationalAsh,项目名称:pattern_recognition_assignments,代码行数:7,代码来源:python_sol2.py
示例8: write_data_array
def write_data_array(self,output_file_name,times):
# find distance to next nearest time
f = h5py.File(output_file_name,'w')
names = self.keys()
lon = [self[n].meta['longitude'] for n in names]
lat = [self[n].meta['latitude'] for n in names]
positions = np.array([lon,lat]).transpose()
f['position'] = positions
f['name'] = names
f['time'] = times
f.create_dataset('mean',shape=(len(times),len(names),3),dtype=float)
f.create_dataset('mask',shape=(len(times),len(names)),dtype=bool)
f.create_dataset('covariance',shape=(len(times),len(names),3,3),dtype=float)
f.create_dataset('variance',shape=(len(times),len(names),3),dtype=float)
f.create_dataset('sigma',shape=(len(times),len(names),3),dtype=float)
for i,n in enumerate(names):
logger.info('writing displacement data for station %s' % n)
mean,sigma = self[n](times)
f['mean'][:,i,:] = mean
f['mask'][:,i] = np.any(np.isinf(sigma),axis=1)
f['covariance'][:,i,:,:] = np.array([np.diag(v) for v in np.nan_to_num(sigma**2)])
f['variance'][:,i,:] = np.nan_to_num(sigma**2)
f['sigma'][:,i,:] = np.nan_to_num(sigma)
f.close()
开发者ID:treverhines,项目名称:GPS,代码行数:25,代码来源:station.py
示例9: test_unity_3_withnan
def test_unity_3_withnan(self, boundary, nan_treatment,
normalize_kernel, preserve_nan):
'''
Test that a unit kernel with three elements returns the same array
(except when boundary is None). This version includes a NaN value in
the original array.
'''
x = np.array([1., np.nan, 3.], dtype='>f8')
y = np.array([0., 1., 0.], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
normalize_kernel=normalize_kernel,
preserve_nan=preserve_nan)
if preserve_nan:
assert np.isnan(z[1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([0., 0., 0.], dtype='>f8'))
else:
assert np.all(z == x)
开发者ID:SaraOgaz,项目名称:astropy,代码行数:26,代码来源:test_convolve.py
示例10: decayCoefObjectiveFn
def decayCoefObjectiveFn(x, Y, EX2):
"""
Computes the objective function for terms involving lambda in the M-step.
Checked.
Input:
x: value of lambda
Y: the matrix of observed values
EX2: the matrix of values of EX2 estimated in the E-step.
Returns:
obj: value of objective function
grad: gradient
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
y_squared = Y ** 2
Y_is_zero = np.abs(Y) < 1e-6
exp_Y_squared = np.exp(-x * y_squared)
log_exp_Y = np.nan_to_num(np.log(1 - exp_Y_squared))
exp_ratio = np.nan_to_num(exp_Y_squared / (1 - exp_Y_squared))
obj = sum(sum(Y_is_zero * (-EX2*x) + (1 - Y_is_zero) * log_exp_Y))
grad = sum(sum(Y_is_zero * (-EX2) + (1 - Y_is_zero) * y_squared * exp_ratio))
if type(obj) is np.float64:
obj = -np.array([obj])
if type(grad) is np.float64:
grad = -np.array([grad])
return obj, grad
开发者ID:Yue-Jiang,项目名称:ZIFA,代码行数:26,代码来源:ZIFA.py
示例11: compare_csv
def compare_csv(csv1, csv2, column_headers=True, eps=1e-3):
"""
"""
column_types = DTYPE_MAP[os.path.basename(csv1)]
da1 = read_csv(csv1, column_headers, column_types)
da2 = read_csv(csv2, column_headers, column_types)
# compare cloumn names
ret = da1.dtype.names == da2.dtype.names
# compare all string columns
fields = [field for field in da1.dtype.fields
if da1.dtype[field].kind == 'S']
if fields:
ret = ret and np.array_equal(da1[fields], da2[fields])
# compare all integer fields
fields = [field for field in da1.dtype.fields
if da1.dtype[field].kind == 'i']
if fields:
ret = ret and np.array_equal(da1[fields], da2[fields])
# compare all float type columns (with epsilon)
fields = [field for field in da1.dtype.fields
if da1.dtype[field].kind == 'f']
# make copy of float view, so that we can safely replace nan's
if fields:
fa1 = np.nan_to_num(da1[fields].view((float, len(fields))))
fa2 = np.nan_to_num(da2[fields].view((float, len(fields))))
ret = ret and np.allclose(fa1, fa2, rtol=0, atol=eps)
return ret
开发者ID:BCCVL,项目名称:org.bccvl.compute,代码行数:27,代码来源:test_R.py
示例12: setup_measureCrosstalk
def setup_measureCrosstalk(self, isTrimmed=False, nSources=8):
"""Generate a simulated set of exposures and test the measured
crosstalk matrix.
Parameters
----------
isTrimmed : `bool`, optional
Should the simulation use trimmed or untrimmed raw
exposures?
nSources : `int`, optional
Number of random simulated sources to generate in the
simulated exposures.
Returns
-------
coeffErr : `np.ndarray`
Array of booleans indicating if the measured and expected
crosstalk ratios are smaller than the measured uncertainty
in the crosstalk ratio.
"""
config = isrMock.IsrMockConfig()
config.rngSeed = 12345
config.doAddCrosstalk = True
config.doAddSky = True
config.doAddSource = True
config.skyLevel = 0.0
config.readNoise = 0.0
mcConfig = MeasureCrosstalkConfig()
mcConfig.threshold = 4000
mct = MeasureCrosstalkTask(config=mcConfig)
fullResult = []
config.isTrimmed = isTrimmed
# Generate simulated set of exposures.
for idx in range(0, 10):
config.rngSeed = 12345 + idx * 1000
# Allow each simulated exposure to have nSources random
# bright sources.
config.sourceAmp = (np.random.randint(8, size=nSources)).tolist()
config.sourceFlux = ((np.random.random(size=nSources) * 25000.0 + 20000.0).tolist())
config.sourceX = ((np.random.random(size=nSources) * 100.0).tolist())
config.sourceY = ((np.random.random(size=nSources) * 50.0).tolist())
exposure = isrMock.CalibratedRawMock(config=config).run()
result = mct.run(exposure, dataId=None)
fullResult.append(result)
# Generate the final measured CT ratios, uncertainties, pixel counts.
coeff, coeffSig, coeffNum = mct.reduce(fullResult)
# Needed because measureCrosstalk cannot find coefficients equal to 0.0
coeff = np.nan_to_num(coeff)
coeffSig = np.nan_to_num(coeffSig)
# Compare result against expectation used to create the simulation.
expectation = isrMock.CrosstalkCoeffMock().run()
coeffErr = abs(coeff - expectation) <= coeffSig
return coeffErr
开发者ID:lsst,项目名称:ip_isr,代码行数:60,代码来源:test_measureCrosstalk.py
示例13: compute_homogeneous_statistics
def compute_homogeneous_statistics(unit_statistic, unit_statistic_permutation, p_value_threshold, homogeneous_statistic='normalized MMD2u', verbose=True):
"""Compute p_values from permutations and create homogeneous statistics.
"""
# Compute p-values for each unit
print("Homogeneous statistic: %s" % homogeneous_statistic)
print("Computing MMD2u thresholds for each unit with p-value=%f" % p_value_threshold)
mmd2us_threshold = compute_statistic_threshold(unit_statistic_permutation, p_value_threshold)
print("Computing actual p-values at each unit on the original (unpermuted) data")
p_value = compute_pvalues_from_permutations(unit_statistic, unit_statistic_permutation)
print("Computing the p-value of each permutation of each unit.")
p_value_permutation = compute_pvalues_of_permutations(unit_statistic_permutation)
# Here we try to massage the unit statistic so that it becomes homogeneous across different units, to compute the cluster statistic later on
if homogeneous_statistic == '1-p_value': # Here we use (1-p_value) instead of the MMD2u statistic : this is perfectly homogeneous across units because the p_value is uniformly distributed, by definition
unit_statistic_permutation_homogeneous = 1.0 - p_value_permutation
unit_statistic_homogeneous = 1.0 - p_value
elif homogeneous_statistic == 'normalized MMD2u': # Here we use a z-score of MMD2u, which is good if its distribution normal or approximately normal
mmd2us_mean = unit_statistic_permutation.mean(1)
mmd2us_std = unit_statistic_permutation.std(1)
unit_statistic_permutation_homogeneous = np.nan_to_num((unit_statistic_permutation - mmd2us_mean[:,None]) / mmd2us_std[:,None])
unit_statistic_homogeneous = np.nan_to_num((unit_statistic - mmd2us_mean) / mmd2us_std)
elif homogeneous_statistic == 'unit_statistic': # Here we use the unit statistic assuming that it is homogeneous across units (this is not much true)
unit_statistic_permutation_homogeneous = unit_statistic_permutation
unit_statistic_homogeneous = unit_statistic
elif homogeneous_statistic == 'p_value': # Here we use p_value instead of the MMD2u statistic : this is perfectly homogeneous across units because the p_value is uniformly distributed, by definition
unit_statistic_permutation_homogeneous = p_value_permutation
unit_statistic_homogeneous = p_value
else:
raise Exception
return p_value, p_value_permutation, unit_statistic_homogeneous, unit_statistic_permutation_homogeneous
开发者ID:smkia,项目名称:cbpktst,代码行数:31,代码来源:cbpktst.py
示例14: forward_procedure
def forward_procedure(A, B, PI, O, wx, pubmsg):
T = len(O)
N = len(B)
alpha = numpy.zeros((N, T))
C = numpy.zeros(T)
alpha[:,0] = PI * [B[i](O[0]) for i in range(N)]
C[0] = 1.0/numpy.sum(alpha[:,0])
alpha[:,0] = C[0] * alpha[:,0]
ITERATIONS = T*4
count = 2*T
for t in xrange(1, T):
#B[i](O[:,t]) => numpy.prod(B[i](O[:,t]))
#b_o = numpy.array([numpy.prod(B[i](O[:,t])) for i in range(N)])
b_o = [B[i](O[t]) for i in range(N)]
alpha[:,t] = numpy.dot(alpha[:,t-1], A) * b_o
C[t] = numpy.nan_to_num(1.0/numpy.sum(alpha[:,t]))
alpha[:,t] = numpy.nan_to_num(alpha[:,t] * C[t])
if numpy.sum(alpha[:,t]) == 0:
alpha[:,t] = 0.0000000000001
if wx: wx.CallAfter(pubmsg, "hmm", msg="Running HMM Method... %2.0f%%" % (100.0*(count-1)/(ITERATIONS)))
count+=1
#print t, O[:,t], alpha[:,t]
log_Prob_Obs = - (numpy.sum(numpy.log(C)))
return(( log_Prob_Obs, alpha, C ))
开发者ID:mad-tamu,项目名称:transit,代码行数:33,代码来源:hmm_tools.py
示例15: AvgQE
def AvgQE(x, y, ye, bin, bintype=1, hardlimit=0, binmax=None):
'''Average values of scatter plot'''
def HelpMe(kk, BR, ii):
if len(kk) > 0:
return np.average(kk)
elif len(kk) == 0:
return (BR[ii-1] + BR[ii])/2.
x = array(x)
y = array(y)
w = 1 / array(ye)**2.
BinNo, BinsReturned = ReturnBins(x, bin, bintype=bintype, hardlimit=hardlimit, binmax=binmax)
#print BinNo, BinsReturned
#BinSize = np.max(BinNo)+1
BinSize = len(BinsReturned)
xavg = [HelpMe(x[BinNo == i], BinsReturned, i) for i in range(1, BinSize)]
xstd = [np.std(x[BinNo == i]) for i in range(1, BinSize)]
yavg = [np.average(y[BinNo == i], weights=w[BinNo == i]) for i in range(1, BinSize)]
ystd = [np.sqrt(1/np.sum(w[BinNo == i])) for i in range(1, BinSize)]
N = [y[BinNo == i].shape[0] for i in range(1, BinSize)]
xavg = np.array(xavg)
xstd = np.array(xstd)
yavg = np.array(yavg)
ystd = np.array(ystd)
N = np.array(N)
yavg = np.nan_to_num(yavg)
ystd = np.nan_to_num(ystd)
xstd = np.nan_to_num(xstd)
N = np.nan_to_num(N)
return xavg, xstd, yavg, ystd, N
开发者ID:vvinuv,项目名称:kappabias,代码行数:31,代码来源:MyFunc.py
示例16: test_unity_3x3_withnan
def test_unity_3x3_withnan(self, boundary):
'''
Test that a 3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[1., 2., 3.],
[4., np.nan, 6.],
[7., 8., 9.]], dtype='>f8')
y = np.array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]], dtype='>f8')
z = convolve(x, y, boundary=boundary, nan_treatment='fill',
preserve_nan=True)
assert np.isnan(z[1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]], dtype='>f8'))
else:
assert np.all(z == x)
开发者ID:SaraOgaz,项目名称:astropy,代码行数:28,代码来源:test_convolve.py
示例17: data_prepare
def data_prepare():
dataset1 = np.loadtxt('1.txt',dtype=float)
dataset1 = np.nan_to_num(normalize_cols(dataset1))
label1 = np.ones((len(dataset1),1)) # generate label1
dataset1 = np.concatenate((dataset1,label1),axis=1)
dataset2 = np.loadtxt('2.txt',dtype=float)
dataset2 = np.nan_to_num(normalize_cols(dataset2))
label2 = 2*np.ones((len(dataset2),1)) # generate label2
dataset2 = np.concatenate((dataset2,label2),axis=1)
dataset3 = np.loadtxt('3.txt',dtype=float)
dataset3 = np.nan_to_num(normalize_cols(dataset3))
label3 = 3*np.ones((len(dataset3),1)) # generate label3
dataset3 = np.concatenate((dataset3,label3),axis=1)
dataset4 = np.loadtxt('4.txt',dtype=float)
dataset4 = np.nan_to_num(normalize_cols(dataset4))
label4 = 4*np.ones((len(dataset4),1)) # generate label4
dataset4 = np.concatenate((dataset4,label4),axis=1)
dataset5 = np.loadtxt('5.txt',dtype=float)
dataset5 = np.nan_to_num(normalize_cols(dataset5))
label5 = 5*np.ones((len(dataset5),1)) # generate label5
dataset5 = np.concatenate((dataset5,label5),axis=1)
dataset = np.concatenate((dataset1,dataset2,dataset3,dataset4,dataset5,dataset5,dataset5),axis=0)
random.shuffle(dataset) #random shuffle dataset
return (dataset)
开发者ID:sundyCoder,项目名称:CSK,代码行数:29,代码来源:code-cnn.py
示例18: test_unity_3x3x3_withnan
def test_unity_3x3x3_withnan(self, boundary, nan_treatment):
'''
Test that a 3x3x3 unit kernel returns the same array (except when
boundary is None). This version includes a NaN value in the original
array.
'''
x = np.array([[[1., 2., 1.], [2., 3., 1.], [3., 2., 5.]],
[[4., 3., 1.], [5., np.nan, 2.], [6., 1., 1.]],
[[7., 0., 2.], [8., 2., 3.], [9., 2., 2.]]], dtype='>f8')
y = np.zeros((3, 3, 3), dtype='>f8')
y[1, 1, 1] = 1.
z = convolve(x, y, boundary=boundary, nan_treatment=nan_treatment,
preserve_nan=True)
assert np.isnan(z[1, 1, 1])
x = np.nan_to_num(z)
z = np.nan_to_num(z)
if boundary is None:
assert np.all(z == np.array([[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]],
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]], dtype='>f8'))
else:
assert np.all(z == x)
开发者ID:SaraOgaz,项目名称:astropy,代码行数:27,代码来源:test_convolve.py
示例19: attractive
def attractive(self):
# Potential energy and force r < self.r_c
r_low = np.arange(0, self.r_c, 0.02)
with np.errstate(all='ignore'):
v_att_low = np.zeros_like(r_low) - 1
v_rep_low = np.zeros_like(r_low) + 4.0*((self.sigma/r_low) ** 12 - (self.sigma/r_low) ** 6 + (1.0 / 4.0))
force_low = np.zeros_like(r_low) + 4.0*(12*(self.sigma**12)/(r_low**13) - (6*(self.sigma**6)/(r_low**7)))
# Potential energy and force r_c <= r <= r_c + w_c
r_mid = np.arange(r_low[len(r_low)-1] + 0.02, self.r_c + self.w_c, 0.02)
v_att_mid = np.zeros_like(r_mid) - (np.cos(np.pi*(r_mid - self.r_c)/(2.0*self.w_c)))**2
v_rep_mid = np.zeros_like(r_mid)
force_mid = np.zeros_like(r_mid) - np.cos(np.pi*(r_mid - self.r_c)/(2.0*self.w_c))*np.sin(np.pi*(r_mid - self.r_c)/(2.0*self.w_c))*(np.pi/self.w_c)
# For r > r_c + w_c
r_hi = np.arange(r_mid[len(r_mid)-1] + 0.02, 4.02, 0.02)
v_att_hi = np.zeros_like(r_hi)
v_rep_hi = np.zeros_like(r_hi)
force_hi = np.zeros_like(r_hi)
# Concatenate for full attractive forces
r = np.append(np.append(r_low, r_mid), r_hi)
v_attractive = np.append(np.append((v_att_low + v_rep_low), (v_att_mid + v_rep_mid)), v_att_hi+v_rep_hi)
force_attractive = np.append(np.append(force_low, force_mid), force_hi)
force_attractive = np.nan_to_num(force_attractive)
v_attractive = np.nan_to_num(v_attractive)
r[0] = 1.0e-6
return r, force_attractive, v_attractive
开发者ID:rganti,项目名称:membranes,代码行数:30,代码来源:tabulated_energies.py
示例20: linear_regression
def linear_regression(folder):
statsfldr = folder + statsfldrext
line_fit_log = [f for f in os.listdir(statsfldr) if re.search("linefit.txt", f)]
if not len(line_fit_log):
print "Calculating the linear regression\n"
av, cols, rows = open_as_array(statsfldr + "/average" + ext)
avg_array = av.ravel() # ravel converts the 2d array to a 1d array
avg_array = array(avg_array)
# Find the linear model for SNR as a function of AVERAGE, y = snr, x = avg
snr, cols, rows = open_as_array(statsfldr + "/SNR" + ext)
snr_array = snr.ravel()
yy = numpy.nan_to_num(snr_array)
print len(yy)
xx = numpy.nan_to_num(avg_array)
# y = numpy.array([a[:5] for a in yy])
s = 1 # shorten the data to per thousand original
length = len(yy)
mini = (length - length * s/1000) / 2
maxi = length - mini
print mini
print maxi
y = yy[mini:maxi]
print len(y)
x = xx[mini:maxi]
y[y > 4095] = 4095 # converts any obserdly high numbers to 4095
x[x > 4095] = 4095 # converts any obserdly high numbers to 4095
print 'this part takes time'
popt, pcov = curve_fit(fit_func_line, x, y) # curve fit needs a function to call to return the fit
write_to_log('\t' + str(datetime.datetime.now()) + ' Calculated the linear regression\n')
with open(statsfldr + "/linefit.txt", "w") as text_file: # "a" is to append, "w" is to overwrite
text_file.write(str(popt))
plotting(x,y,popt)
return
开发者ID:joe-warga,项目名称:python,代码行数:33,代码来源:snr_tif_recur_without-regress.py
注:本文中的numpy.nan_to_num函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论