本文整理汇总了Python中numpy.float_函数的典型用法代码示例。如果您正苦于以下问题:Python float_函数的具体用法?Python float_怎么用?Python float_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了float_函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: assertDataAlmostEqual
def assertDataAlmostEqual(self, data, reference_filename, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
with open(reference_path, 'r') as reference_file:
stats = json.load(reference_file)
self.assertEqual(stats.get('shape', []), list(data.shape))
self.assertEqual(stats.get('masked', False),
ma.is_masked(data))
nstats = np.array((stats.get('mean', 0.), stats.get('std', 0.),
stats.get('max', 0.), stats.get('min', 0.)),
dtype=np.float_)
if math.isnan(stats.get('mean', 0.)):
self.assertTrue(math.isnan(data.mean()))
else:
data_stats = np.array((data.mean(), data.std(),
data.max(), data.min()),
dtype=np.float_)
self.assertArrayAllClose(nstats, data_stats, **kwargs)
else:
self._ensure_folder(reference_path)
stats = collections.OrderedDict([
('std', np.float_(data.std())),
('min', np.float_(data.min())),
('max', np.float_(data.max())),
('shape', data.shape),
('masked', ma.is_masked(data)),
('mean', np.float_(data.mean()))])
with open(reference_path, 'w') as reference_file:
reference_file.write(json.dumps(stats))
开发者ID:marqh,项目名称:iris,代码行数:30,代码来源:__init__.py
示例2: SmartGen
def SmartGen(device_id, k,device_user,generate_device_user_likelihood):
user_candidate = []
precision = []
candidates_n = estimation = candidates_l = 0.0
users = device_user.get(device_id)
likelihood =estimation_list = []
for user in users:
likelihood.append(generate_device_user_likelihood.get(user))
for k in range(1, len(users)):
for i in ~np.argsort(likelihood)[:k]:
user = users[i]
user_candidate.append(user)
candidates_n += len(AllCookie.get(user))
candidates_l += generate_device_user_likelihood.get(user)
for i in ~np.argsort(likelihood)[:k]:
user = users[i]
precision = np.float_(len(AllCookie.get(user))) / np.float_(candidates_n)
estimation += (generate_device_user_likelihood.get(user)*1.0 / candidates_l)* ((1.25*precision) / (0.25*precision + 1.0))
estimation_list.append(estimation)
k_final = ~np.argsort(estimation_list)
cookie_final = []
for i in ~np.argsort(likelihood)[:k_final]:
user = users[i]
cookie_candidates = HandleCookie.get(user)
for cookie in cookie_candidates:
cookie_final.append(cookie)
return cookie_final
开发者ID:Gearchen,项目名称:cross_device_demo,代码行数:27,代码来源:data_insight.py
示例3: compute_edf_distance
def compute_edf_distance(support1, support2):
bin_edges = numpy.empty((support1.shape[0] + support2.shape[0] + 2,))
bin_edges[0] = -numpy.inf
bin_edges[-1] = numpy.inf
bin_edges[1 : 1 + support1.shape[0]] = support1
bin_edges[1 + support1.shape[0] : 1 + support1.shape[0] + support2.shape[0]] = support2
bin_edges = numpy.sort(bin_edges)
#print bin_edges.shape
#print bin_edges
bin_edges = numpy.unique(bin_edges)
#bin_edges = get_unique_sorted_array(bin_edges, True)
#print bin_edges.shape
#print bin_edges
#print support1.shape
#print support2.shape
bin_counts1_i,bins1 = numpy.histogram(support1, bin_edges)
bin_counts2_i,bins2 = numpy.histogram(support2, bin_edges)
bin_counts1 = numpy.float_(bin_counts1_i)
bin_counts2 = numpy.float_(bin_counts2_i)
sum_counts1 = numpy.cumsum(bin_counts1) / numpy.sum(bin_counts1)
sum_counts2 = numpy.cumsum(bin_counts2) / numpy.sum(bin_counts2)
delta = numpy.abs(sum_counts1 - sum_counts2)
dist = numpy.max(delta)
return dist
开发者ID:bennihepp,项目名称:yaca,代码行数:32,代码来源:utils.py
示例4: __init__
def __init__(self, eps_par=numpy.float_(0.),
mask_var=numpy.float_(1),xlabel=None,ylabel=None):
super(MyEstimator, self).__init__()
# self.params=dict()
# self.params['']=
# self.params['eps_par']=eps_par
# self.params['mask_var']=mask_var
#these are the parameters
self.eps_par=eps_par
self.mask_var=mask_var
self.catastrophe=None
self.dm=None
self.max_distance=None
self.mask_scale=None
self.outlier_cut=0.95
self.optimize_frac = 0.1
self.xlabel=xlabel
self.ylabel=ylabel
self.zmin = 0.6
self.zmax=1.6
self.oiimin=6e-17
开发者ID:AlexGKim,项目名称:diffusionMap,代码行数:27,代码来源:oii.py
示例5: Init
def Init(self):
#boundary and domain condition
self.lat = io.read_PETSc_vec(self.config["-Metos3DBoundaryConditionInputDirectory"][0] + self.config["-Metos3DLatitudeFileFormat"][0])
dz = io.read_PETSc_vec(self.config["-Metos3DDomainConditionInputDirectory"][0] + self.config["-Metos3DLayerHeightFileFormat"][0])
z = io.read_PETSc_vec(self.config["-Metos3DDomainConditionInputDirectory"][0] + self.config["-Metos3DLayerDepthFileFormat"][0])
self.lsm = io.read_PETSc_mat(self.config["-Metos3DProfileInputDirectory"][0] + self.config["-Metos3DProfileMaskFile"][0])
self.fice = np.zeros((self.profiles,np.int_(self.config["-Metos3DIceCoverCount"][0])),dtype=np.float_)
for i in range(np.int_(self.config["-Metos3DIceCoverCount"][0])):
self.fice[:,i] = io.read_PETSc_vec(self.config["-Metos3DBoundaryConditionInputDirectory"][0] + (self.config["-Metos3DIceCoverFileFormat"][0] % i))
self.bc = np.zeros(2,dtype=np.float_)
self.dc = np.zeros((self.ny,2),dtype=np.float_)
self.dc[:,0] = z
self.dc[:,1] = dz
self.u = np.array(self.config["-Metos3DParameterValue"],dtype=np.float_)
self.dt = np.float_(self.config["-Metos3DTimeStep"][0])
self.nspinup = np.int_(self.config["-Metos3DSpinupCount"][0])
self.ntimestep = np.int_(self.config["-Metos3DTimeStepCount"][0])
self.matrixCount = np.int_(self.config["-Metos3DMatrixCount"][0])
self.U_PODN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixPODFileFormat"][0])
self.U_PODDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixPODFileFormat"][0])
self.U_DEIMN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixDEIMFileFormat"][0])
self.U_DEIMDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixDEIMFileFormat"][0])
self.DEIM_IndicesN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DDEIMIndicesFileFormat"][0])
self.DEIM_IndicesDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DDEIMIndicesFileFormat"][0])
self.AN = np.ndarray(shape=(self.matrixCount,self.U_PODN.shape[1],self.U_PODN.shape[1]), dtype=np.float_, order='C')
self.ADOP = np.ndarray(shape=(self.matrixCount,self.U_PODDOP.shape[1],self.U_PODDOP.shape[1]), dtype=np.float_, order='C')
for i in range(0,self.matrixCount):
self.AN[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixReducedFileFormat"][0] % i)
self.ADOP[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixReducedFileFormat"][0] % i)
self.PN = np.ndarray(shape=(self.matrixCount,self.U_PODN.shape[1],self.U_DEIMN.shape[1]), dtype=np.float_, order='C')
self.PDOP = np.ndarray(shape=(self.matrixCount,self.U_PODDOP.shape[1],self.U_DEIMDOP.shape[1]), dtype=np.float_, order='C')
for i in range(0,self.matrixCount):
self.PN[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixReducedDEINFileFormat"][0] % i)
self.PDOP[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixReducedDEINFileFormat"][0] % i)
#precomputin the interplaton indices for a year
[self.interpolation_a,self.interpolation_b,self.interpolation_j,self.interpolation_k] = util.linearinterpolation(2880,12,0.0003472222222222)
self.yN = np.ones(self.ny,dtype=np.float_) * np.float_(self.config["-Metos3DTracerInitValue"])[0]
self.yDOP = np.ones(self.ny,dtype=np.float_) * np.float_(self.config["-Metos3DTracerInitValue"])[1]
self.y_redN = np.dot(self.U_PODN.T,self.yN)
self.y_redDOP = np.dot(self.U_PODDOP.T,self.yDOP)
self.qN = np.zeros(self.DEIM_IndicesN.shape[0],dtype=np.float_)
self.qDOP = np.zeros(self.DEIM_IndicesDOP.shape[0],dtype=np.float_)
self.J,self.PJ = util.generateIndicesForNonlinearFunction(self.lsm,self.profiles,self.ny)
self.out_pathN = self.config["-Metos3DTracerOutputDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][0]
self.out_pathDOP = self.config["-Metos3DTracerOutputDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][1]
self.monitor_path = self.config["-Metos3DTracerMointorDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][0]
开发者ID:neeljp,项目名称:pod_deim,代码行数:60,代码来源:ROM_ndop.py
示例6: rotatev_aroundz
def rotatev_aroundz(p,a): #
x=p[0]
y=p[1]
z=p[2]
cs=np.float_(np.cos(a))
cn=np.float_(np.sin(a))
return np.float_(np.array([cs*x-cn*y,cn*x+cs*y,z]))
开发者ID:anvlason,项目名称:dtm_py,代码行数:7,代码来源:retina_model_v2.py
示例7: rotatev_aroundx
def rotatev_aroundx(p,a): #
x=p[0]
y=p[1]
z=p[2]
cs=np.float_(np.cos(a))
cn=np.float_(np.sin(a))
return np.float_(np.array([x,cs*y-cn*z,cn*y+cs*z]))
开发者ID:anvlason,项目名称:dtm_py,代码行数:7,代码来源:retina_model_v2.py
示例8: loadDevices
def loadDevices(trainfile,DictHandle,DictDevice,DictDevType,DictDevOs,DictCountry,DictAnnC1,DictAnnC2):
NumRows = 0
with open(trainfile,'rb') as csvfile:
spamreader=csv.reader(csvfile,delimiter=',')
spamreader.next()
for row in spamreader:
NumRows = NumRows + 1
XDevices = np.zeros((NumRows,11))
NumRows = 0
with open(trainfile,'rb') as csvfile:
spamreader=csv.reader(csvfile,delimiter=',')
spamreader.next()
for row in spamreader:
XDevices[NumRows,0]=DictHandle[row[0]]
XDevices[NumRows,1]=DictDevice[row[1]]
XDevices[NumRows,2]=DictDevType[row[2]]
XDevices[NumRows,3]=DictDevOs[row[3]]
XDevices[NumRows,4]=DictCountry[row[4]]
XDevices[NumRows,5]=np.float_(row[5])
XDevices[NumRows,6]=DictAnnC1[row[6]]
XDevices[NumRows,7]=DictAnnC2[row[7]]
XDevices[NumRows,8]=np.float_(row[8])
XDevices[NumRows,9]=np.float_(row[9])
XDevices[NumRows,10]=np.float_(row[10])
NumRows = NumRows + 1
return XDevices
开发者ID:mlifemaker,项目名称:ICDM2015,代码行数:31,代码来源:CookieLibrary.py
示例9: pval_KalZtest
def pval_KalZtest(n1,N1,n2,N2):
"""Compute p-value using Kal Z-test for count data.
Compute pval using Z-test, as published in
Kal et al, 1999, Mol Biol Cell 10:1859.
Z = (p1-p2) / sqrt( p0 * (1-p0) * (1/N1 + 1/N2) )
where p1 = n1/N1, p2=n2/N2, and p0=(n1+n2)/(N1+N2)
You reject if |Z| > Z_a/2 where a is sig lev. Here
we return the p-value itself.
"""
if n1==0 and n2==0:
return 1.0
n1 = np.float_(n1)
N1 = np.float_(N1)
n2 = np.float_(n2)
N2 = np.float_(N2)
p0 = (n1+n2)/(N1+N2)
p1 = n1/N1
p2 = n2/N2
Z = (p1-p2) / np.sqrt( p0 * (1-p0) * ((1/N1) + (1/N2)) )
pval = 2 * sp.stats.norm.cdf(-1*abs(Z))
return pval
开发者ID:hjanime,项目名称:pytools,代码行数:29,代码来源:countdata.py
示例10: matrix_mul
def matrix_mul(X1, X2, shard_size=5000):
""" Calculate matrix multiplication for big matrix,
X1 and X2 are sliced into pieces with shard_size rows(columns)
then multiplied together and concatenated to the proper size
"""
X1 = np.float_(X1)
X2 = np.float_(X2)
X1_shape = X1.shape
X2_shape = X2.shape
assert X1_shape[1] == X2_shape[0]
X1_iter = X1_shape[0] // shard_size + 1
X2_iter = X2_shape[1] // shard_size + 1
all_result = np.zeros((1,))
for X1_id in range(X1_iter):
result = np.zeros((1,))
for X2_id in range(X2_iter):
partial_result = np.matmul(
X1[X1_id * shard_size:min((X1_id + 1) *
shard_size, X1_shape[0]), :],
X2[:, X2_id * shard_size:min((X2_id + 1) *
shard_size, X2_shape[1])])
# calculate matrix multiplicatin on slices
if result.size == 1:
result = partial_result
else:
result = np.concatenate((result, partial_result), axis=1)
# concatenate the slices together
del partial_result
if all_result.size == 1:
all_result = result
else:
all_result = np.concatenate((all_result, result), axis=0)
del result
return all_result
开发者ID:ktaneishi,项目名称:deepchem,代码行数:34,代码来源:transformers.py
示例11: linearinterpolation
def linearinterpolation(nstep,ndata,dt):
"""used for lienar interpolation between transportation matrices.
returns weights alpha, beta and indices of matrices.
Parameters
-------
nstep : int Number of timesteps
ndata : int Number of matrices
Returns
-------
alpha,beta : array
coefficients for interpolation
jalpha,jbeta : array
indices for interpolation
"""
t = np.zeros(nstep,dtype=np.float_)
for i in range(nstep):
t[i] = np.fmod(0 + i*dt, 1.0)
beta = np.array(nstep,dtype=np.float_)
alpha = np.array(nstep,dtype=np.float_)
w = t * ndata+0.5
beta = np.float_(np.fmod(w, 1.0))
alpha = np.float_(1.0-beta)
jalpha = np.fmod(np.floor(w)+ndata-1.0,ndata).astype(int)
jbeta = np.fmod(np.floor(w),ndata).astype(int)
return alpha,beta,jalpha,jbeta
开发者ID:neeljp,项目名称:pod_deim,代码行数:30,代码来源:util.py
示例12: readSnap
def readSnap(self,f):
snap = Snap()
snap.time = 0
snap.box = []
snap.atoms = []
snap.natoms = 0
for i, line in enumerate(f):
if i > 8 and i < 8 + snap.natoms:
snap.atoms.append(line.split())
elif i == 3:
snap.natoms = int(line.split()[0])
elif i == 5 or i == 6 or i == 7:
snap.box.append(np.float_(line.split()))
elif i == 4:
if len(line.split()) == 3:
snap.boundary = []
else:
snap.boundary = line.split()[3:]
elif i == 8:
snap.descriptor = line.split()[2:]
elif i == 1:
snap.time = int(line.split()[0])
elif i == 8 + snap.natoms:
snap.atoms.append(line.split())
break
snap.atoms = np.float_(snap.atoms)
snap.box = np.array(snap.box)
return snap
开发者ID:anyuzx,项目名称:myPackage,代码行数:28,代码来源:dump.py
示例13: slidingMax
def slidingMax(x,y,dx):
x = np.float_(x)
y = np.float_(y)
LX = len(x)
ymax = np.ones(LX)*y.min()
code=\
"""
int j;
int i;
int j0;
int inloop;
j0=1;
for (i=0; i<LX; i++){
j=j0;
inloop=0;
while ((x(j)<=x(i)+dx/2) && (j<LX) ) {
if ((x(j)>=x(i)-dx/2) && (x(j)<=x(i)+dx/2)) {
if (y(j)>ymax(i)) {
ymax(i) = y(j);
}
inloop=1;
}
if (inloop==0) {
j0=j; // memorize where we started before
}
j++;
}
}
"""
err = weave.inline(code,
['x', 'y', 'dx','LX','ymax'],
type_converters=converters.blitz,
compiler = 'gcc')
return ymax
开发者ID:amerand,项目名称:PRIMA,代码行数:35,代码来源:slidop.py
示例14: check_numpy_scalar_argument_return_generic
def check_numpy_scalar_argument_return_generic(self):
f = PyCFunction('foo')
f += Variable('a1', numpy.int_, 'in, out')
f += Variable('a2', numpy.float_, 'in, out')
f += Variable('a3', numpy.complex_, 'in, out')
foo = f.build()
args = 2, 1.2, 1+2j
results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
assert_equal(foo(*args),results)
args = [2], [1.2], [1+2j]
assert_equal(foo(*args),results)
args = [2], [1.2], [1,2]
assert_equal(foo(*args),results)
f = PyCFunction('foo')
f += Variable('a1', 'npy_int', 'in, out')
f += Variable('a2', 'npy_float', 'in, out')
f += Variable('a3', 'npy_complex', 'in, out')
foo = f.build()
args = 2, 1.2, 1+2j
results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
assert_equal(foo(*args),results)
args = [2], [1.2], [1+2j]
assert_equal(foo(*args),results)
args = [2], [1.2], [1,2]
assert_equal(foo(*args),results)
开发者ID:dagss,项目名称:f2py-g3,代码行数:26,代码来源:test_py_support.py
示例15: from_edf
def from_edf(fname, compression=None, below_water=False, lon=None, lat=None):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from ctd import DataFrame
>>> cast = DataFrame.from_edf('../test/data/XBT.EDF.gz',
... compression='gzip')
>>> fig, ax = cast['temperature'].plot()
>>> ax.axis([20, 24, 19, 0])
>>> ax.grid(True)
"""
f = read_file(fname, compression=compression)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
elif line.startswith("Longitude"):
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Longitude not recognized.")
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
cast = read_table(
f, header=None, index_col=None, names=names, dtype=float, skiprows=skiprows, delim_whitespace=True
)
f.close()
cast.set_index("depth", drop=True, inplace=True)
cast.index.name = "Depth [m]"
name = basename(fname)[1]
if below_water:
cast = remove_above_water(cast)
return CTD(cast, longitude=lon, latitude=lat, serial=serial, name=name, header=header)
开发者ID:shaunwbell,项目名称:python-ctd,代码行数:60,代码来源:ctd.py
示例16: allan
def allan(freq,noise, t):
#first, convert to phase noise
phase=np.float_(noise*1.934*(10**14)/2./(freq**2.))
#now integrate over all values to get allan variance
transferFunc = np.float_(np.sin(np.pi*phase*t)**4/(np.pi*t*phase)**2)
out = 2*integrate.trapz(phase*transferFunc,freq)
return np.sqrt(out)
开发者ID:aasgreen,项目名称:micropython,代码行数:7,代码来源:machzenanalysis.py
示例17: smooth_spreadsheet_with_rwr
def smooth_spreadsheet_with_rwr(restart, network_sparse, run_parameters):
""" Simulates a random walk with restarts.
alpha=0.7, max_iteration=100, tol=1.e-4
Args:
restart: restart array of any size.
network_sparse: adjancy matrix stored in sparse format.
run_parameters: parameters dictionary with alpha, restart_tolerance,
number_of_iteriations_in_rwr and
max_iteration: maximum number of random walap_vals. (default = 100)
tol: convergence tolerance. (default = 1.e-4)
Returns:
smooth_1: smoothed restart data.
step: number of iterations used
"""
tol = np.float_(run_parameters["restart_tolerance"])
alpha = np.float_(run_parameters["restart_probability"])
smooth_0 = restart
smooth_r = (1. - alpha) * restart
for step in range(0, int(run_parameters["number_of_iteriations_in_rwr"])):
smooth_1 = alpha * network_sparse.dot(smooth_0) + smooth_r
deltav = LA.norm(smooth_1 - smooth_0, 'fro')
if deltav < tol:
break
smooth_0 = smooth_1
return smooth_1, step
开发者ID:XICHEN24,项目名称:KnowEnG_NBS_Base,代码行数:28,代码来源:toolbox.py
示例18: read_data
def read_data (self):
"""Reads in data required for the given input_type."""
re_int = '\d+'
re_ints = '\\b\d+\\b'
re_floats = '[+-]? *(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][+-]?\d+)?'
f_name = self.input_path + "l2errs+convergence_standard.txt"
var_names = list()
with open(f_name) as f:
for line in f:
if ('n_vars' in line):
n_vars = np.int_(re.search(re_int,line).group())
if ('ml_max' in line):
ml_max = np.int_(re.search(re_int,line).group())
if ('p_max' in line):
p_max = np.int_(re.search(re_int,line).group())
# Inialize np arrays now that dimensions are known
block_size2 = (ml_max+1,p_max+1)
block_size3 = (ml_max+1,p_max+1,n_vars)
cases_run = np.int_( np.zeros(block_size2))
h = np.float_(np.zeros(block_size2))
l2_errors = np.float_(-float('inf')+np.zeros(block_size3))
conv_orders = np.float_(-float('inf')+np.zeros(block_size3))
if ('Cases Run' in line):
for i in range(0,ml_max+1):
line = f.readline()
cases_run[i,:] = np.int_([int(s) for s in re.findall(re_ints, line)])
if ('Mesh Size' in line):
assign_block(h,cases_run,f)
if ('L2 Errors' in line):
for k in range(0,n_vars):
skip_lines(1,f)
line = f.readline()
var_names.append(line.replace('\n',''))
assign_block(l2_errors[:,:,k],cases_run,f)
if ('Convergence Orders' in line):
for k in range(0,n_vars):
skip_lines(2,f)
assign_block(conv_orders[:,:,k],cases_run,f)
self.n_vars = n_vars
self.ml_max = ml_max
self.p_max = p_max
self.var_names = var_names
self.cases_run = cases_run
self.h = h
self.l2_errors = l2_errors
self.conv_orders = conv_orders
开发者ID:PhilipZwanenburg,项目名称:DPGSolver,代码行数:59,代码来源:convert_data.py
示例19: gray2real01
def gray2real01(img):
x = img.shape[0]
y = img.shape[1]
fxy = np.zeros((x,y),dtype=np.float_)
for i in xrange(0,x):
for j in xrange(0,y):
fxy[i,j] = np.float_(img[i][j])/np.float_(255)
return fxy
开发者ID:d-klein,项目名称:image-hash,代码行数:8,代码来源:Image.py
示例20: initialize_constants
def initialize_constants(self):
"""
Initialize constants that depend on the instance.
"""
# maximum pheromone value
self.PH_MAX = np.float_(self.num_vars / (1.0 - self.PH_REDUCE_FACTOR))
# minimum pheromone value
self.PH_MIN = np.float_(self.PH_MAX / self.num_lits)
开发者ID:domoritz,项目名称:SoSAT,代码行数:8,代码来源:algorithm.py
注:本文中的numpy.float_函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论