本文整理汇总了Python中scipy.isnan函数的典型用法代码示例。如果您正苦于以下问题:Python isnan函数的具体用法?Python isnan怎么用?Python isnan使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了isnan函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _prepareICContents
def _prepareICContents(self):
allfilestr = ""
topstr = "function ics_ = " + self.name +"_ics()\n"
commentstr = "% Initial conditions for model " + self.name + "\n% Generated by PyDSTool for ADMC++ target\n\n"
bodystr = "ics_ = [ ...\n"
if self.initialconditions:
icnames = self.initialconditions.keys()
icnames.sort()
for i in range(len(icnames)-1):
if isnan(self.initialconditions[icnames[i]]):
val = str(0.0)
else:
val = str(self.initialconditions[icnames[i]])
bodystr += val + ", ... % " + icnames[i] + "\n"
if isnan(self.initialconditions[icnames[len(icnames)-1]]):
val = str(0.0)
else:
val = self.initialconditions[icnames[len(icnames)-1]]
bodystr += val + " % " + icnames[len(icnames)-1] + " ...\n"
bodystr += "];\n"
allfilestr = topstr + commentstr + bodystr
return allfilestr
开发者ID:BenjaminBerhault,项目名称:Python_Classes4MAD,代码行数:30,代码来源:ADMC_ODEsystem.py
示例2: ProcessData
def ProcessData(data):
data = data[::-1]
n = 100
growthOfThisData = 0
fitPrice = FitPrice(data)
if fitPrice == 0:
return
print("FitResult : " + str(fitPrice))
for i in range(0, len(data) - n):
if not (sp.isnan(data[i][1]) or sp.isnan(data[i][4]) or sp.isnan(data[i][5])):
if data[i][5] > 0:
maxPrice = MaxPriceInNextNDays(data, i, n, fitPrice)
minPrice = MinPriceInNextNDays(data, i, n, fitPrice)
currentPrice = data[i][4] / fitPrice
key = (currentPrice // 0.05) * 0.05
if maxPriceResult.has_key(key):
maxPriceResult[key] += maxPrice
numOfDataMax[key] += 1
else:
maxPriceResult[key] = maxPrice
numOfDataMax[key] = 1
if minPriceResult.has_key(key):
minPriceResult[key] += minPrice
numOfDataMin[key] += 1
else:
minPriceResult[key] = minPrice
numOfDataMin[key] = 1
开发者ID:StupidCodeGenerator,项目名称:PythonScripts,代码行数:27,代码来源:CurrentPrice_FitPrice.py
示例3: setup
def setup(self, conductance, quantity, super_pore_conductance):
r"""
This setup provides the initial data for the solver from the provided
properties.
It also creates the matrices A and b.
"""
# Assigning super_pore conductance for Neumann_group BC
if super_pore_conductance is None:
self.super_pore_conductance = []
else:
self.super_pore_conductance = super_pore_conductance
# Providing conductance values for the algorithm from the Physics name
if sp.size(self._phase) == 1:
self._conductance = 'throat.' + conductance.split('.')[-1]
self._quantity = 'pore.' + quantity.split('.')[-1]
# Check health of conductance vector
if self._phase.check_data_health(props=self._conductance).health:
self['throat.conductance'] = self._phase[self._conductance]
else:
raise Exception('The provided throat conductance has problems')
else:
raise Exception('The linear solver accepts just one phase.')
# Checking for the linear terms to be added to the coeff diagonal/RHS
diag_added_data = sp.zeros(self.Np)
RHS_added_data = sp.zeros(self.Np)
for label in self.labels():
if 'pore.source_' in label:
source_name = 'pore.' + \
(label.split('.')[-1]).replace('source_', '')
matching_physics = [phys for phys in self._phase._physics
if source_name in phys.models.keys()]
for phys in matching_physics:
x = phys.models[source_name]['x']
if x != '' and type(x) == str:
if x.split('.')[-1] != quantity.split('.')[-1]:
raise Exception('The quantity(pore.' +
x.split('.')[-1] +
'), provided by source term(' +
source_name + '), is different ' +
'from the main quantity(pore.' +
quantity.split('.')[-1] + ') in ' +
self.name + ' algorithm.')
source_name = label.replace('pore.source_', '')
if 'pore.source_linear_s1_' + source_name in self.props():
prop1 = 'pore.source_linear_s1_' + source_name
pores = ~sp.isnan(self[prop1])
diag_added_data[pores] = diag_added_data[pores] + \
self[prop1][pores]
prop2 = 'pore.source_linear_s2_' + source_name
pores = ~sp.isnan(self[prop2])
RHS_added_data[pores] = RHS_added_data[pores] + \
self[prop2][pores]
# Creating A and b based on the conductance values and new linear terms
logger.info('Creating Coefficient matrix for the algorithm')
d = diag_added_data
self.A = self._build_coefficient_matrix(modified_diag_pores=self.Ps,
diag_added_data=d)
logger.info('Creating RHS matrix for the algorithm')
self.b = self._build_RHS_matrix(modified_RHS_pores=self.Ps,
RHS_added_data=-RHS_added_data)
开发者ID:TomTranter,项目名称:OpenPNM,代码行数:60,代码来源:__GenericLinearTransport__.py
示例4: simulate
def simulate(self, X):
"""
@arguments
X -- 2d array of [sample_i][var_i] : float
@return
y -- 1d array of [sample_i] : float
"""
op = self.nonlin_op
ok = True
y_lin = self.simple_base.simulate(X)
if op == OP_ABS: ya = numpy.abs(y_lin)
elif op == OP_MAX0: ya = numpy.clip(y_lin, 0.0, INF)
elif op == OP_MIN0: ya = numpy.clip(y_lin, -INF, 0.0)
elif op == OP_LOG10:
#safeguard against: log() on values <= 0.0
mn, mx = min(y_lin), max(y_lin)
if mn <= 0.0 or scipy.isnan(mn) or mx == INF or scipy.isnan(mx):
ok = False
else:
ya = numpy.log10(y_lin)
elif op == OP_GTH: ya = numpy.clip(self.thr - y_lin, 0.0, INF)
elif op == OP_LTH: ya = numpy.clip(y_lin - self.thr, 0.0, INF)
else: raise 'Unknown op %d' % op
if ok: #could always do ** exp, but faster ways if exp is 0,1
y = ya
else:
y = INF * numpy.ones(X.shape[0], dtype=float)
return y
开发者ID:pukkapies,项目名称:ffx,代码行数:30,代码来源:core.py
示例5: zeroMeanUnitVarianz
def zeroMeanUnitVarianz(data=None,x=True):
if x:
return (data-data.mean(axis=0))/data.std(axis=0)
else:
mean = data[~sp.isnan(data)].mean(axis=0)
std = data[~sp.isnan(data)].std(axis=0)
return (data - mean)/std
开发者ID:dominikgrimm,项目名称:easyGWASCore,代码行数:7,代码来源:transformations.py
示例6: _do_one_outer_iteration
def _do_one_outer_iteration(self, **kwargs):
r"""
One iteration of an outer iteration loop for an algorithm
(e.g. time or parametric study)
"""
# Checking for the necessary values in Picard algorithm
nan_tol = sp.isnan(self['pore.source_tol'])
nan_max = sp.isnan(self['pore.source_maxiter'])
self._tol_for_all = sp.amin(self['pore.source_tol'][~nan_tol])
self._maxiter_for_all = sp.amax(self['pore.source_maxiter'][~nan_max])
if self._guess is None:
self._guess = sp.zeros(self._coeff_dimension)
t = 1
step = 0
# The main Picard loop
while t > self._tol_for_all and step <= self._maxiter_for_all:
X, t, A, b = self._do_inner_iteration_stage(guess=self._guess,
**kwargs)
logger.info('tol for Picard source_algorithm in step ' +
str(step) + ' : ' + str(t))
self._guess = X
step += 1
# Check for divergence
self._steps = step
if t >= self._tol_for_all and step > self._maxiter_for_all:
raise Exception('Iterative algorithm for the source term reached '
'to the maxiter: ' + str(self._maxiter_for_all) +
' without achieving tol: ' +
str(self._tol_for_all))
logger.info('Picard algorithm for source term converged!')
self.A = A
self.b = b
self._tol_reached = t
return X
开发者ID:TomTranter,项目名称:OpenPNM,代码行数:34,代码来源:__GenericLinearTransport__.py
示例7: main
def main():
data = sp.genfromtxt('./data/web_traffic.tsv', delimiter='\t')
x = data[:, 0]
y = data[:, 1]
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
fp1 = sp.polyfit(x, y, 1)
print('Model parameters for fp1 %s' % fp1)
f1 = sp.poly1d(fp1)
print('This is the error rate for fp1 %f' % error(f1, x, y))
fp2 = sp.polyfit(x, y, 2)
print('Model parameters for fp2 %s' % fp2)
f2 = sp.poly1d(fp2)
print('This is the error rate for fp2 %f' % error(f2, x, y))
plt.scatter(x, y,color= 'pink')
plt.title('My first impression')
plt.xlabel('Time')
plt.ylabel('#Hits')
plt.xticks([w * 7 * 24 for w in range(10)], ['week %i' % w for w in range(10)])
fx = sp.linspace(0, x[-1], 1000)
plt.plot(fx, f1(fx), linewidth=3,color='cyan')
plt.plot(fx, f2(fx), linewidth=3, linestyle='--',color= 'red')
plt.legend(['d = %i' %f1.order, 'd = %i' %f2.order], loc='upper left')
plt.autoscale(tight=True)
plt.grid()
plt.show()
开发者ID:pombredanne,项目名称:PythonProjects,代码行数:30,代码来源:tutorial.py
示例8: getFluxes
def getFluxes(val_mat, direction_mat, dist_mat, duxdy_mat, out_flux, inc):
import scipy;
import math;
speed_factor = 1;
angle_factor = 1;
inc_factor = 1;
dist_factor = 1;
strain_factor = 1;
duxdy_mat = duxdy_mat / (sum(duxdy_mat[~scipy.isnan(duxdy_mat)]));
cell_angles = scipy.flipud(scipy.array([[-1 * math.pi / 4, -1 * math.pi / 2, -3 * math.pi / 4], [0, scipy.nan, math.pi], [math.pi / 4, math.pi / 2, 3 * math.pi / 4]]));
# cell_angles = scipy.flipud(scipy.array([[3 * math.pi / 4, 1 * math.pi / 2, 1 * math.pi / 4], [math.pi, scipy.nan, 0], [-3 * math.pi / 4, -1 * math.pi / 2, -1 * math.pi / 4]]));
cell_incs = scipy.array([[(inc**2 + inc**2)**0.5, inc, (inc**2 + inc**2)**0.5], [inc, scipy.nan, inc], [(inc**2 + inc**2)**0.5, inc, (inc**2 + inc**2)**0.5]]);
cell_incs = (1 / cell_incs**inc_factor);
cell_incs = cell_incs / sum(cell_incs[~scipy.isnan(cell_incs)]);
vels_in = scipy.cos(cell_angles - direction_mat);
vels_in[1,1] = scipy.nan;
vels_in[vels_in < 0.00001] = scipy.nan;
vels_in = vels_in**angle_factor * val_mat**speed_factor * dist_mat**dist_factor * (1 / duxdy_mat**strain_factor) * cell_incs;
in_fluxes = (vels_in / sum(vels_in[~scipy.isnan(vels_in)]) * out_flux);
return in_fluxes;
开发者ID:whyjz,项目名称:CARST,代码行数:27,代码来源:bedTopo5.py
示例9: init_and_cleanup_data
def init_and_cleanup_data(path, delimiter):
data = sp.genfromtxt(path, delimiter=delimiter)
hours = data[:, 0] # contains the hours
webhits = data[:, 1] # contains the number of web hits at a particular hour
hours = hours[~sp.isnan(webhits)]
webhits = webhits[~sp.isnan(webhits)]
return (hours, webhits)
开发者ID:ciah0704,项目名称:building-ml-systems-with-python,代码行数:7,代码来源:chapter1.py
示例10: LDA_batch_normalization
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, ncomps): # this is actually the batch normalization method
tmp_output_folder = os.path.join(output_folder, 'tmp')
if not os.path.isdir(tmp_output_folder):
os.makedirs(tmp_output_folder)
barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
# Remove any remaining NaNs and Infs from the filtered matrix - they would screw
# up the LDA.
filtered_matrix[scipy.isnan(filtered_matrix)] = 0
filtered_matrix[scipy.isinf(filtered_matrix)] = 0
# For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
# so they can be added back into the matrix later (not implemented yet, and may never
# be - there should no longer be NaNs and Infs in the dataset)
# The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
# matrix multiplication to remove the specified number of components!
matrix_nan_inds = scipy.isnan(matrix)
matrix_nan_vals = matrix[matrix_nan_inds]
matrix_inf_inds = scipy.isinf(matrix)
matrix_inf_vals = matrix[matrix_inf_inds]
matrix[matrix_nan_inds] = 0
matrix[matrix_inf_inds] = 0
# Save both the small matrix (for determining the components to remove) and the
# full matrix for the matlab script
filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
np.savetxt(full_matrix_tmp_filename, matrix)
# Map the batch to integers for matlab, and write out to a file so matlab can read
# Note that yes, the batch_classes should match up with the filtered matrix, not
# the full matrix
batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
class_tmp_filename = os.path.join(tmp_output_folder, 'classes.txt')
writeList(batch_classes, class_tmp_filename)
output_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix_lda_normalized.txt')
runLDAMatlabFunc(filtered_matrix_filename = filtered_matrix_tmp_filename, \
matrix_filename = full_matrix_tmp_filename, \
class_filename = class_tmp_filename, \
ncomps = ncomps, \
output_filename = output_tmp_filename)
# The X norm that is returned is the full matrix. In the future, we could add in
# returning the components to remove so they can be visualized or applied to other
# one-off datasets
Xnorm = scipy.genfromtxt(output_tmp_filename)
## Dump the dataset out!
#output_filename = os.path.join(mtag_effect_folder, 'scaleddeviation_full_mtag_lda_{}.dump.gz'.format(ncomps))
#of = gzip.open(output_filename, 'wb')
#cPickle.dump([barcodes, conditions, Xnorm], of)
#of.close()
return [barcodes, conditions, Xnorm]
开发者ID:monprin,项目名称:BEAN-counter,代码行数:60,代码来源:svd_correction.py
示例11: create_models
def create_models(self):
import scipy,cPickle
from stellarpop import tools
from stellarpop.ndinterp import ndInterp
index = {}
shape = []
axes = {}
axes_index = 0
for key in self.axes_names:
index[key] = {}
shape.append(self.axes[key]['points'].size)
axes[axes_index] = self.axes[key]['eval']
axes_index += 1
for i in range(self.axes[key]['points'].size):
index[key][self.axes[key]['points'][i]] = i
models = {}
model = scipy.empty(shape)*scipy.nan
for f in self.filter_names:
models[f] = {}
for z in self.redshifts:
models[f][z] = model.copy()
for file in self.files:
f = open(file,'rb')
data = cPickle.load(f)
wave = cPickle.load(f)
f.close()
for key in data.keys():
obj = data[key]
jj = key
spec = obj['sed']
ind = []
for key in self.axes_names:
try:
ind.append([index[key][obj[key]]])
except:
print key,index[key]
print obj
df
for f in self.filter_names:
for i in range(len(self.redshifts)):
z = self.redshifts[i]
# correction is the units correction factor
correction = self.corrections[i]
sed = [wave,spec*correction]
mag = tools.ABFilterMagnitude(self.filters[f],sed,z)
if scipy.isnan(mag)==True:
df
models[f][z][ind] = mag
for f in self.filter_names:
for z in self.redshifts:
model = models[f][z].copy()
if scipy.isnan(model).any():
models[f][z] = None
else:
models[f][z] = ndInterp(axes,model)
return models
开发者ID:bnord,项目名称:LensPop,代码行数:60,代码来源:spsmodel.py
示例12: load_data
def load_data():
datas = sp.genfromtxt("web_traffic.tsv", delimiter='\t')
print datas[:10]
x = datas[:,0]
y = datas[:,1]
x = x[ ~sp.isnan(y)]
y = y[ ~sp.isnan(y)]
return x,y
开发者ID:xiholix,项目名称:buildingmachinelearning,代码行数:8,代码来源:first.py
示例13: __call__
def __call__(self,x1, x2, d1=[sp.NaN], d2=[sp.NaN],gets=False):
D1 = 0 if sp.isnan(d1[0]) else int(sum([8**x for x in d1]))
D2 = 0 if sp.isnan(d2[0]) else int(sum([8**x for x in d2]))
self.smodel=sp.empty(1)
r=libGP.k(x1.ctypes.data_as(ctpd),x2.ctypes.data_as(ctpd), cint(D1),cint(D2),cint(self.dim),self.ihyp.ctypes.data_as(ctpd),cint(self.Kindex),self.smodel.ctypes.data_as(ctpd))
if gets:
return [r,self.smodel[0]]
return r
开发者ID:markm541374,项目名称:GPc,代码行数:8,代码来源:GPdc.py
示例14: preProcess
def preProcess(self,
periodF0 = 0.06,
deltaF_div_F0 = True,
max_threshold = None,
min_threshold = None,
nan_to_zeros = True,
detrend = False,
#~ band_filter = None,
gaussian_filter = None,
f1 = None,
f2 = None,
**kargs):
images = self.images
if deltaF_div_F0:
ind = self.t()<=self.t_start+periodF0
m0 = mean(images[ind,:,:] , axis = 0)
images = (images-m0)/m0*1000.
if max_threshold is not None:
#~ images[images>max_threshold] = max_threshold
images[images>max_threshold] = nan
if min_threshold is not None:
#~ images[images<min_threshold] = min_threshold
images[images<min_threshold] = nan
if nan_to_zeros:
images[isnan(images) ] = 0.
if detrend and not nan_to_zeros:
m = any(isnan(images) , axis = 0)
images[isnan(images) ] = 0.
images = signal.detrend( images , axis = 0)
images[:,m] = nan
elif detrend and nan_to_zeros:
images = signal.detrend( images , axis = 0)
if gaussian_filter is not None:
images = ndimage.gaussian_filter( images , (0 , gaussian_filter , gaussian_filter))
if f1 is not None or f2 is not None:
from ..computing.filter import fft_passband_filter
if f1 is None: f1=0.
if f2 is None: f1=inf
nq = self.sampling_rate/2.
images = fft_passband_filter(images, f_low = f1/nq , f_high = f2/nq , axis = 0)
return images
开发者ID:AntoineValera,项目名称:SynaptiQs,代码行数:58,代码来源:imageserie.py
示例15: test_returns_nan_if_one_spike_train_is_empty
def test_returns_nan_if_one_spike_train_is_empty(self):
empty = create_empty_spike_train()
non_empty = neo.SpikeTrain(sp.array([1.0]) * pq.s, t_stop=2.0 * pq.s)
k = sigproc.GaussianKernel()
with warnings.catch_warnings():
warnings.simplefilter('ignore')
actual = stm.schreiber_similarity((empty, non_empty), k)
self.assertTrue(sp.isnan(actual[0, 0]))
self.assertTrue(sp.isnan(actual[0, 1]))
self.assertTrue(sp.isnan(actual[1, 0]))
开发者ID:NeuroArchive,项目名称:spykeutils,代码行数:10,代码来源:test_spike_train_metrics.py
示例16: get_data
def get_data():
data = sp.genfromtxt("input/web_traffic.tsv", delimiter="\t")
x = data[:, 0]
y = data[:, 1]
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
return (x, y,)
开发者ID:toothywalrus,项目名称:nlp,代码行数:10,代码来源:main.py
示例17: get_relative_prices
def get_relative_prices(walking_time, smoothed_prices):
x = walking_time.flatten()
y = smoothed_prices.flatten()
mask = sp.isnan(x) | sp.isnan(y)
spline = sp.interpolate.UnivariateSpline(x[~mask], y[~mask], s=len(x))
v = spline(x)
rel = (y - v).reshape(walking_time.shape)
return rel
开发者ID:andyljones,项目名称:house-price-map,代码行数:11,代码来源:main.py
示例18: LDA_batch_normalization
def LDA_batch_normalization(dataset, sample_table, batch_col, output_folder, n_comps): # this is actually the batch normalization method
tmp_output_folder = os.path.join(output_folder, 'tmp')
if not os.path.isdir(tmp_output_folder):
os.makedirs(tmp_output_folder)
barcodes, filtered_conditions, filtered_matrix, conditions, matrix = dataset
# Remove any remaining NaNs and Infs from the filtered matrix - they would screw
# up the LDA.
filtered_matrix[scipy.isnan(filtered_matrix)] = 0
filtered_matrix[scipy.isinf(filtered_matrix)] = 0
# For full matrix, also eliminate NaNs and Infs, BUT preserve the indices and values
# so they can be added back into the matrix later (not implemented yet, and may never
# be - there should no longer be NaNs and Infs in the dataset)
# The NaNs and Infs will mess up the final step of the MATLAB LDA script, which uses
# matrix multiplication to remove the specified number of components!
matrix_nan_inds = scipy.isnan(matrix)
matrix_nan_vals = matrix[matrix_nan_inds]
matrix_inf_inds = scipy.isinf(matrix)
matrix_inf_vals = matrix[matrix_inf_inds]
matrix[matrix_nan_inds] = 0
matrix[matrix_inf_inds] = 0
# Save both the small matrix (for determining the components to remove) and the
# full matrix for the matlab script
filtered_matrix_tmp_filename = os.path.join(tmp_output_folder, 'nonreplicating_matrix.txt')
full_matrix_tmp_filename = os.path.join(tmp_output_folder, 'full_matrix.txt')
np.savetxt(filtered_matrix_tmp_filename, filtered_matrix)
np.savetxt(full_matrix_tmp_filename, matrix)
# Map batch classes to integers
batch_classes = get_batch_classes(dataset = [barcodes, filtered_conditions, filtered_matrix], sample_table = sample_table, batch_col = batch_col)
# Checks number of classes and limits ncomps
a = [x > 0 for x in np.sum(np.absolute(filtered_matrix), axis=0)]
classes = np.asarray([batch_classes[i] for i in range(len(batch_classes)) if a[i]])
n_samples = filtered_matrix.shape[0]
n_classes = len(np.unique(classes))
if n_samples == n_classes:
print "ERROR: The number of samples is equal to the number of classes. Exiting"
if n_classes <= n_comps:
print "Fewer classes, " + str(n_classes) + ", than components. Setting components to " + str(n_classes-1)
n_comps = n_classes-1
# Runs LDA
#Xnorm = scikit_lda(filtered_matrix, matrix, batch_classes, n_comps)
Xnorm = outer_python_lda(filtered_matrix, matrix, batch_classes, n_comps)
return [barcodes, conditions, Xnorm, n_comps]
开发者ID:csbio,项目名称:BEAN-counter,代码行数:54,代码来源:batch_correction.py
示例19: get_cleaned_data
def get_cleaned_data():
data = sp.genfromtxt(os.path.join(DATA_DIR, 'web_traffic.tsv'), delimiter='\t')
x = data[:, 0]
y = data[:, 1]
print "Number of invalid entries: {}".format(sp.sum(sp.isnan(y)))
print "Removing invalid entries."
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
print "Number of invalid entries: {}".format(sp.sum(sp.isnan(y)))
return x, y
开发者ID:achiku,项目名称:syakyou,代码行数:11,代码来源:webstats.py
示例20: load_samples
def load_samples(fname):
""" Load training sample dataset """
data = sp.genfromtxt(fname, delimiter='\t')
x = data[:, 0]
y = data[:, 1]
print('Totally %i entries while %i invalid entries.' % (sp.shape(data)[0], sp.sum(sp.isnan(y))))
x = x[~sp.isnan(y)]
y = y[~sp.isnan(y)]
return (x, y)
开发者ID:kamidox,项目名称:machine-learning,代码行数:11,代码来源:analyze.py
注:本文中的scipy.isnan函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论