本文整理汇总了Python中numpy.divide函数的典型用法代码示例。如果您正苦于以下问题:Python divide函数的具体用法?Python divide怎么用?Python divide使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了divide函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: multistate_distribution
def multistate_distribution(data, parameters, limit,
normalize_likelihood_level_cell_counts = True):
data_grandpa, data_parent, data_children = data
sigma, b, a_grandpa, a_parent, a_children = parameters
normalization_factor = normalize(sigma, a_grandpa, b, limit)
grandpa_dist = [steady_state_distribution(x, sigma, a_grandpa, b, normalization_factor) for x in data_grandpa]
normalization_factor = normalize(sigma, a_parent, b, limit)
parent_dist = [steady_state_distribution(x, sigma, a_parent, b, normalization_factor) for x in data_parent]
normalization_factor = normalize(sigma, a_children, b, limit)
children_dist = [steady_state_distribution(x, sigma, a_children, b, normalization_factor) for x in data_children]
grandpa_dist = np.array(grandpa_dist, dtype = float)
parent_dist = np.array(parent_dist, dtype = float)
children_dist = np.array(children_dist, dtype = float)
if normalize_likelihood_level_cell_counts:
grandpa_dist = np.divide(grandpa_dist, float(data_grandpa.size))
parent_dist = np.divide(parent_dist, float(data_parent.size))
children_dist = np.divide(children_dist, float(data_children.size))
return grandpa_dist, parent_dist, children_dist
开发者ID:GGiecold,项目名称:PySCUBA,代码行数:25,代码来源:SCUBA_core.py
示例2: ratio_err
def ratio_err(top,bottom,top_low,top_high,bottom_low,bottom_high):
#uses simple propagation of errors (partial derivatives)
#note it returns errorbars, not interval
#-make sure input is numpy arrays-
top = np.array(top)
top_low = np.array(top_low)
top_high = np.array(top_high)
bottom = np.array(bottom)
bottom_low = np.array(bottom_low)
bottom_high = np.array(bottom_high)
#-calculate errorbars-
top_errlow = np.subtract(top,top_low)
top_errhigh = np.subtract(top_high,top)
bottom_errlow = np.subtract(bottom,bottom_low)
bottom_errhigh = np.subtract(bottom_high,bottom)
#-calculate ratio_low-
ratio_low = np.sqrt( np.square(np.divide(top_errlow,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errlow)) )
#-calculate ratio_high-
ratio_high = np.sqrt( np.square(np.divide(top_errhigh,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errhigh)) )
# ratio_high = ((top_errhigh/bottom)**2.0 + (top/(bottom**2.0))*bottom_errhigh)**2.0)**0.5
# return two vectors, err_low and err_high
return ratio_low,ratio_high
开发者ID:kariannfrank,项目名称:sn1987a,代码行数:26,代码来源:spectra_results_0.py
示例3: HS
def HS(im1, im2, alpha, ite,):
#set up initial velocities
uInitial = np.zeros([im1.shape[0],im1.shape[1]])
vInitial = np.zeros([im1.shape[0],im1.shape[1]])
# Set initial value for the flow vectors
u = uInitial
v = vInitial
# Estimate derivatives
[fx, fy, ft] = computeDerivatives(im1, im2)
# Averaging kernel
kernel=np.matrix([[1/12, 1/6, 1/12],[1/6, 0, 1/6],[1/12, 1/6, 1/12]])
print fx[100,100],fy[100,100],ft[100,100]
# Iteration to reduce error
for i in range(ite):
# Compute local averages of the flow vectors
uAvg = cv2.filter2D(u,-1,kernel)
vAvg = cv2.filter2D(v,-1,kernel)
uNumer = (fx.dot(uAvg) + fy.dot(vAvg) + ft).dot(ft)
uDenom = alpha + fx**2 + fy**2
u = uAvg - np.divide(uNumer,uDenom)
# print np.linalg.norm(u)
vNumer = (fx.dot(uAvg) + fy.dot(vAvg) + ft).dot(ft)
vDenom = alpha + fx**2 + fy**2
v = vAvg - np.divide(vNumer,vDenom)
return (u,v)
开发者ID:alexlib,项目名称:Optical-Flow-LucasKanade-HornSchunck,代码行数:34,代码来源:HSPY.py
示例4: visResults
def visResults (m, result_dir, varying_para_values = r_values, xlabel= 'Radius Scale Factor', basenum = 0):
# plot the raw results
plotResults(m,result_dir, varying_para_values,xlabel, 'Fitted')
# plot the normalized results
norm_m = np.zeros(m.shape)
for i in range(len(varying_para_values)):
norm_m[:,:,i] = np.divide(m[:,:,i], m[:,:,basenum]) # r=1.0 is the original model fitting results
plotResults(norm_m,result_dir,varying_para_values,xlabel,'Normalized')
for i in range(len(varying_para_values)):
norm_m[:,:,i] = 100 * np.divide(m[:,:,i]- m[:,:,basenum], m[:,:,basenum]) # r=1.0 is the original model fitting results
plotResults(norm_m,result_dir,varying_para_values,xlabel,'Difference')
CmTotal = np.zeros((sample_size, len(varying_para_values)))
for i in range(len(varying_para_values)):
CmTotal[:,i] = m[:,4,i] * (m[:,1,i]+ m[:,2,i]) # Cm * (A1+A2) p=0.0 is the original model fitting results
df_CmTotal = pd.DataFrame(CmTotal, columns = varying_para_values)
my_box_plot(df_CmTotal, result_dir+ "/total_capacitance.png", xlabel,'Total Capacitance')
RmUnit = np.zeros((sample_size, len(varying_para_values)))
for i in range(len(varying_para_values)):
RmUnit[:,i] = m[:,5,i] / (m[:,1,i]+ m[:,2,i]) # Rm / (A1+A2)
df_RmUnit = pd.DataFrame(RmUnit, columns = varying_para_values)
my_box_plot(df_RmUnit, result_dir+ "/total_Rm.png", xlabel,'Unit Membrane Resistance')
开发者ID:XiaoxiaoLiu,项目名称:morphology_analysis,代码行数:30,代码来源:modified_morph_fitting_results_vis.py
示例5: CalcGamma
def CalcGamma(MeanVec,VarVec,wVec,X):
#gamma = np.zeros(shape=(8,X.shape[0]),dtype='float128')
for a in range(X.shape[0]):
summ = 0
for i in range(NoM):
power = np.square(np.subtract(X[a],MeanVec[i]))
## print 'poweris \n',power
## print 'v o\n',VarVec[i]
denp = np.multiply(-2,VarVec[i])
## print denp
power = np.divide(power,denp)
## print power
power = np.sum(power)
## print 'power is \n',power
power = exp(power)
## print power
prodVarVec = np.prod(VarVec[i])
den = 1/(2*math.pi)**(NoM/2)*np.sqrt(prodVarVec)
## print den
gamma[i][a] = wVec[i]*np.divide(power,den)
## print gamma
summ = summ + gamma[i][a]
## print gamma[i][a]
for i in range(NoM):
gamma[i][a] = gamma[i][a]/summ
return gamma
开发者ID:Touheed20,项目名称:GMM_voice,代码行数:29,代码来源:3q.py
示例6: __EM
def __EM(self):
old_log_like = -np.inf
threshold = 1e-15
probability = 0
while True:
# E step
probability = self.__probability()
expectation = np.multiply(probability, self.prior)
expectation = np.divide(expectation, expectation.sum(axis=1))
# M step: updata parameters
sumk = expectation.sum(axis=0)
self.prior = sumk / self.x.shape[0]
self.mean = np.diag(np.array(np.divide(1, sumk)).flatten()) * \
expectation.T * self.x
for i in range(self.k):
x_shift = self.x - self.mean[i, :]
self.sigma[:, :, i] = x_shift.T * \
np.diag(np.array(expectation[:, i]).flatten()) * x_shift /\
sumk[0, i]
new_log_like = np.log(probability * self.prior.T).sum()
if np.abs(new_log_like - old_log_like) < threshold:
break
old_log_like = new_log_like
return probability
开发者ID:HAN-Yuqiang,项目名称:MathHomework,代码行数:26,代码来源:hw3.py
示例7: sw_sums
def sw_sums(a, b):
abw = apply_scale(w, a, b)
np.divide(abw, 1 + abw, out = abw)
abw[np.isnan(abw)] = 1
swr = abw.sum(1, keepdims = True)
swc = abw.sum(0, keepdims = True)
return swr, swc
开发者ID:othercriteria,项目名称:StochasticBlockmodel,代码行数:7,代码来源:BinaryMatrix.py
示例8: normalize
def normalize(self, mode="integral"):
"""
Force normalization of filter kernel.
Parameters
----------
mode : {'integral', 'peak'}
One of the following modes:
* 'integral' (default)
Kernel normalized such that its integral = 1.
* 'peak'
Kernel normalized such that its peak = 1.
"""
# There are kernel that sum to zero and
# the user should be warned in this case
if np.isinf(self._normalization):
warnings.warn(
"Kernel cannot be normalized because the " "normalization factor is infinite.", AstropyUserWarning
)
return
if np.abs(self._normalization) > MAX_NORMALIZATION:
warnings.warn(
"Normalization factor of kernel is " "exceptionally large > {0}.".format(MAX_NORMALIZATION),
AstropyUserWarning,
)
if mode == "integral":
self._array *= self._normalization
if mode == "peak":
np.divide(self._array, self._array.max(), self.array)
self._normalization = 1.0 / self._array.sum()
开发者ID:JotMan,项目名称:astropy,代码行数:30,代码来源:core.py
示例9: get_gaussian_weight_patch
def get_gaussian_weight_patch(gauss_shape=(19, 19), gauss_sigma_frac=.3,
gauss_norm_01=True):
r"""
2d gaussian image useful for plotting
Returns:
ndarray: patch
CommandLine:
python -m vtool.coverage_kpts --test-get_gaussian_weight_patch
Example:
>>> # ENABLE_DOCTEST
>>> from vtool.coverage_kpts import * # NOQA
>>> # build test data
>>> # execute function
>>> patch = get_gaussian_weight_patch()
>>> # verify results
>>> result = str(patch)
>>> print(result)
"""
# Perdoch uses roughly .95 of the radius
radius = gauss_shape[0] / 2.0
sigma = gauss_sigma_frac * radius
# Similar to SIFT's computeCircularGaussMask in helpers.cpp
# uses smmWindowSize=19 in hesaff for patch size. and 1.6 for sigma
# Create gaussian image to warp
patch = ptool.gaussian_patch(shape=gauss_shape, sigma=sigma)
if gauss_norm_01:
np.divide(patch, patch.max(), out=patch)
return patch
开发者ID:Erotemic,项目名称:vtool,代码行数:31,代码来源:coverage_kpts.py
示例10: ndcg_multi
def ndcg_multi(X, Y, Ks):
assert(X.size == Y.size and all(X.indices == Y.indices) and all(X.indptr == Y.indptr))
n = Y.shape[1]
res = zeros(len(Ks))
nvalid = 0
Xdata = X.data
Ydata = Y.data
indices = Y.indices
indptr = Y.indptr
for i in xrange(n):
[j0, j1] = [indptr[i], indptr[i + 1]]
if j0 == j1: # skip empty column
continue
nvalid += 1
Xi = Xdata[j0:j1]
Yi = Ydata[j0:j1]
I = argsort(-Xi)
Yi_pred = numpy.exp(Yi[I])-1.0
Yi_best = numpy.exp(-(sort(-Yi)))-1.0
Wi = numpy.log(numpy.exp(1) + arange(j1 - j0))
Yi_pred = numpy.divide(Yi_pred, Wi)
Yi_best = numpy.divide(Yi_best, Wi)
for k in xrange(len(Ks)):
K = Ks[k]
Ki = min([K, j1 - j0])
res[k] += sum(Yi_pred[0:Ki]) / sum(Yi_best[0:Ki])
assert(nvalid > 0)
res /= nvalid
return res
开发者ID:TeweiLuo,项目名称:CMFTest,代码行数:29,代码来源:cfeval.py
示例11: get_summed_cohp_by_label_and_orbital_list
def get_summed_cohp_by_label_and_orbital_list(self, label_list, orbital_list, divisor=1):
"""
Returns a COHP object that includes a summed COHP divided by divisor
Args:
label_list: list of labels for the COHP that should be included in the summed cohp
orbital_list: list of orbitals for the COHPs that should be included in the summed cohp (same order as label_list)
divisor: float/int, the summed cohp will be divided by this divisor
Returns:
Returns a COHP object including a summed COHP
"""
# check if cohps are spinpolarized or not
first_cohpobject = self.get_orbital_resolved_cohp(label_list[0], orbital_list[0])
summed_cohp = first_cohpobject.cohp.copy()
summed_icohp = first_cohpobject.icohp.copy()
for ilabel, label in enumerate(label_list[1:], 1):
cohp_here = self.get_orbital_resolved_cohp(label, orbital_list[ilabel])
summed_cohp[Spin.up] = np.sum([summed_cohp[Spin.up], cohp_here.cohp.copy()[Spin.up]], axis=0)
if Spin.down in summed_cohp:
summed_cohp[Spin.down] = np.sum([summed_cohp[Spin.down], cohp_here.cohp.copy()[Spin.down]], axis=0)
summed_icohp[Spin.up] = np.sum([summed_icohp[Spin.up], cohp_here.icohp.copy()[Spin.up]], axis=0)
if Spin.down in summed_icohp:
summed_icohp[Spin.down] = np.sum([summed_icohp[Spin.down], cohp_here.icohp.copy()[Spin.down]], axis=0)
divided_cohp = {}
divided_icohp = {}
divided_cohp[Spin.up] = np.divide(summed_cohp[Spin.up], divisor)
divided_icohp[Spin.up] = np.divide(summed_icohp[Spin.up], divisor)
if Spin.down in summed_cohp:
divided_cohp[Spin.down] = np.divide(summed_cohp[Spin.down], divisor)
divided_icohp[Spin.down] = np.divide(summed_icohp[Spin.down], divisor)
return Cohp(efermi=first_cohpobject.efermi, energies=first_cohpobject.energies, cohp=divided_cohp,
are_coops=first_cohpobject.are_coops,
icohp=divided_icohp)
开发者ID:gmatteo,项目名称:pymatgen,代码行数:35,代码来源:cohp.py
示例12: _process_sample
def _process_sample (self, ap1, ap2, ap3, triple, tflags):
"""We have computed one independent phase closure triple in one timeslot.
"""
# Frequency-resolved:
np.divide (triple, np.abs (triple), triple)
phase = np.angle (triple)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap1, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap2, phase, tflags + 0.)
self.ap_spec_stats_by_ddid[self.cur_ddid].accum (ap3, phase, tflags + 0.)
# Frequency-averaged:
triple = np.dot (triple, tflags) / tflags.sum ()
phase = np.angle (triple)
self.global_stats_by_time.accum (self.cur_time, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap1, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap2, phase)
self.ap_stats_by_ddid[self.cur_ddid].accum (ap3, phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap2), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap1, ap3), phase)
self.bp_stats_by_ddid[self.cur_ddid].accum ((ap2, ap3), phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap1, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap2, phase)
self.ap_time_stats_by_ddid[self.cur_ddid].accum (self.cur_time, ap3, phase)
开发者ID:pkgw,项目名称:pwkit,代码行数:28,代码来源:closures.py
示例13: getGammaAngle
def getGammaAngle(appf,cAtom,oAtom,hAtom):
# first determine the nAtom
aminoGroup = appf.select('resnum ' + str(cAtom.getResnum()))
for at in aminoGroup:
if(at.getName() == 'N'):
nAtom = at
# get coordinates
cCoords = cAtom.getCoords()
oCoords = oAtom.getCoords()
hCoords = hAtom.getCoords()
nCoords = nAtom.getCoords()
# get necessary vectors
oc = np.subtract(oCoords,cCoords)
nc = np.subtract(nCoords,cCoords)
ho = np.subtract(hCoords,oCoords)
n1 = np.cross(oc,nc)
n1_unit = np.divide(n1,np.linalg.norm(n1))
# get projection of H-O in O-C direction
oc_unit = np.divide(oc,np.linalg.norm(oc))
#print oc_unit
hproj = np.dot(ho,oc_unit)
# get projection of H-O onto N-C-O plane
out = np.dot(ho,n1_unit)
n2 = np.cross(np.multiply(n1_unit,out),oc)
#print n2
ho_ip = np.subtract(ho,np.multiply(n1_unit,out))
test = np.dot(n2,ho_ip)
#print test
ang = hproj/np.linalg.norm(ho_ip)
ang = math.acos(ang)
ang = ang*180/math.pi
#if(test < 0):
# ang = ang * -1
return ang
开发者ID:fedsimon,项目名称:DigBioProj_One,代码行数:34,代码来源:main.py
示例14: compile_stats
def compile_stats():
logging.info('Loading data...')
for i, filename in enumerate(os.listdir(DATA_DIR)):
if i > MAX_FILES:
break
full_name = DATA_DIR + "/" + filename
print full_name
with open(full_name) as f:
data = np.load(f)
if len(data["input"].shape) == 3 and len(data["output"].shape) == 3:
X = data["input"]
y = data["output"]
if COLLECT_ACTION_PCT:
probs = np.divide(np.sum(y, (0,1)).astype(np.float64),
np.sum(y, (0,1,2)))
player_to_stats[filename] = probs
elif COLLECT_VPIP_PFR:
# Assumes actions are (fold, check, call, raise)
actions = X[:,:,11:15]
if (np.sum(actions, (0,1,2)) < 100):
continue
probs = np.divide(np.sum(actions, (0,1)).astype(np.float64),
np.sum(actions, (0,1,2)))
pfr = probs[3]
vpip = probs[3] + probs[2]
player_to_stats[filename] = np.array([vpip, pfr])
开发者ID:session-id,项目名称:poker-predictor,代码行数:27,代码来源:player_stats.py
示例15: _generate
def _generate(l, k, g, beta, M, e, A, mu, intercept):
p = beta.shape[0]
if intercept:
gradL1 = grad_l1(beta[1:, :])
gradL2 = grad_l2_squared(beta[1:, :])
gradGLmu = grad_glmu(beta[1:, :], A, mu)
else:
gradL1 = grad_l1(beta)
gradL2 = grad_l2_squared(beta)
gradGLmu = grad_glmu(beta, A, mu)
alpha = -(l * gradL1 + k * gradL2 + g * gradGLmu)
Mte = np.dot(M.T, e)
if intercept:
alpha = np.divide(alpha, Mte[1:, :])
else:
alpha = np.divide(alpha, Mte)
X = np.ones(M.shape)
if intercept:
for i in xrange(p - 1):
X[:, i + 1] = M[:, i + 1] * alpha[i, 0]
else:
for i in xrange(p):
X[:, i] = M[:, i] * alpha[i, 0]
y = np.dot(X, beta) - e
return X, y
开发者ID:irwenqiang,项目名称:pylearn-parsimony,代码行数:31,代码来源:l1_l2_glmu.py
示例16: minimum_pension
def minimum_pension(self, trim_wages_reg, trim_wages_all, pension_reg, pension_all):
''' MICO du régime général : allocation différentielle
RQ : ASPA et minimum vieillesse sont gérés par OF
Il est attribué quels que soient les revenus dont dispose le retraité en plus de ses pensions : loyers, revenus du capital, activité professionnelle...
+ mécanisme de répartition si cotisations à plusieurs régimes
TODO: coder toutes les évolutions et rebondissements 2004/2008'''
P = reduce(getattr, self.param_name.split('.'), self.P)
# pension_RG, pension, trim_RG, trim_cot, trim
trimesters = trim_wages_reg['trimesters']
trim_regime = trimesters['regime'].sum() + sum(trim_wages_reg['maj'].values())
coeff = minimum(1, divide(trim_regime, P.prorat.n_trim))
if P.mico.dispositif == 0:
# Avant le 1er janvier 1983, comparé à l'AVTS
min_pension = self.P.common.avts
return maximum(min_pension - pension_reg,0)*coeff
elif P.mico.dispositif == 1:
# TODO: Voir comment gérer la limite de cumul relativement complexe (Doc n°5 du COR)
mico = P.mico.entier
return maximum(mico - pension_reg,0)*coeff
elif P.mico.dispositif == 2:
# A partir du 1er janvier 2004 les périodes cotisées interviennent (+ dispositif transitoire de 2004)
nb_trim = P.prorat.n_trim
trim_regime = trimesters['regime'].sum() #+ sum(trim_wages_regime['maj'].values())
trim_cot_regime = sum(trimesters[key].sum() for key in trimesters.keys() if 'cot' in key)
mico_entier = P.mico.entier*minimum(divide(trim_regime, nb_trim), 1)
maj = (P.mico.entier_maj - P.mico.entier)*divide(trim_cot_regime, nb_trim)
mico = mico_entier + maj*(trim_cot_regime >= P.mico.trim_min)
return (mico - pension_reg)*(mico > pension_reg)*(pension_reg>0)
开发者ID:simonrabate,项目名称:Til-Pension,代码行数:28,代码来源:regime_prive.py
示例17: mc2mvsk
def mc2mvsk(args):
'''convert central moments to mean, variance, skew, kurtosis
'''
mc, mc2, mc3, mc4 = args
skew = np.divide(mc3, mc2**1.5)
kurt = np.divide(mc4, mc2**2.0) - 3.0
return (mc, mc2, skew, kurt)
开发者ID:Code-fish,项目名称:statsmodels,代码行数:7,代码来源:moment_helpers.py
示例18: basicconn
def basicconn(skf,X,y):
total_score = 0
for train_index, test_index in skf:
#print("TRAIN:", train_index, "TEST:", test_index)
# Feature selection
#selectf = SelectFpr().fit(X[train_index],y[train_index])
#selectf = SelectKBest(f_classif, k=750).fit(X[train_index],y[train_index])
#tmp_x = selectf.transform(X[train_index])
# Train
#clf = RandomForestClassifier(n_estimators=20)
#clf = clf.fit(tmp_x, y[train_index])
#clf.feature_importances_
# SVM
#clf = svm.LinearSVC()
#clf = svm.SVC()
#clf.fit(tmp_x, y[train_index])
clf = plib.classif(X[train_index], y[train_index])
#clf.support_vec()
# Test
#pred = clf.predict(selectf.transform(X[test_index]))
pred = clf.predict(X[test_index])
print "Target : ", y[test_index]
print "Prediction : ", pred
matchs = np.equal(pred, y[test_index])
score = np.divide(np.sum(matchs), np.float64(matchs.size))
total_score = score + total_score
return np.divide(total_score, skf.n_folds)
开发者ID:cdansereau,项目名称:Proteus,代码行数:27,代码来源:prediction.py
示例19: getRadiationAtLatLong
def getRadiationAtLatLong(path, latitude, longitude):
suitableLatLong = True
#open the file and get the header info such as coords, cell size, numrows and cols.
bottomLeftLatLong = getBottomLeftLatLong(path)
cellSize = getCellSize(path)
colsRows = getColsRows(path)
minLatitude = np.float(bottomLeftLatLong[0] - np.multiply(cellSize, np.float(colsRows[0])))
maxLongitude = np.float(bottomLeftLatLong[1] + np.multiply(cellSize, np.float(colsRows[1])))
maxLatitude = bottomLeftLatLong[0]
minLongitude = bottomLeftLatLong[1]
topRightLatitude = bottomLeftLatLong[0] + np.multiply(cellSize,(colsRows[1]- 1))
topRightLongitude = bottomLeftLatLong[1]
print "Minimum lat, long = ("+str(minLatitude)+","+str(minLongitude)+")"
print "Top right lat, long = ("+str(topRightLatitude)+","+str(topRightLongitude)+")"
print "Requested lat, long = ("+str(latitude)+","+str(longitude)+")"
# Verify that lat and long are reasonable so we don't explode the file.
if(latitude >= minLatitude and latitude <= maxLatitude and longitude >= minLongitude and longitude <= maxLongitude):
xcoord = int(np.round(np.divide(np.float(topRightLongitude - longitude), cellSize)))
ycoord = int(np.round(np.divide(np.float(topRightLatitude - latitude), cellSize)))
data = np.loadtxt(path, dtype='float',skiprows=6, usecols=None, unpack=False)
print "Table Coordinates: "+str(ycoord)+","+str(xcoord)
print "Size of Table: "+str(data.shape[0])+","+str(data.shape[1])
radiation = data[ycoord, xcoord]
else:
print "Latitude and longitude are invalid for this file."
radiation = -1
return radiation
开发者ID:lukasmarshall,项目名称:solar-data-processing,代码行数:32,代码来源:timeSeries.py
示例20: sampleNextInternal_bak
def sampleNextInternal_bak(self, variables):
y_tilde = self.samplerEngine.getVariable('nrl').varYtilde
beta = (y_tilde * y_tilde).sum(0)/2
gammaSamples = np.random.gamma((self.ny - 1.)/2, 1, self.nbVox)
np.divide(beta, gammaSamples, self.currentValue)
开发者ID:Solvi,项目名称:pyhrf,代码行数:7,代码来源:noise.py
注:本文中的numpy.divide函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论