本文整理汇总了Python中scipy.ceil函数的典型用法代码示例。如果您正苦于以下问题:Python ceil函数的具体用法?Python ceil怎么用?Python ceil使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ceil函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: rescale_target_superpixel_resolution
def rescale_target_superpixel_resolution(E_target):
'''Rescale the target field to the superpixel resolution (currently only 4x4 superpixels implemented)'''
superpixelSize = 4
ny,nx = scipy.shape(E_target)
maskCenterX = scipy.ceil((nx+1)/2)
maskCenterY = scipy.ceil((ny+1)/2)
nSuperpixelX = int(nx/superpixelSize)
nSuperpixelY = int(ny/superpixelSize)
FourierMaskSuperpixelResolution = fourier_mask(ny,nx,superpixelSize)
E_target_ft = fft.fftshift(fft.fft2(fft.ifftshift(E_target)))
#Apply mask
E_target_ft = FourierMaskSuperpixelResolution*E_target_ft
#Remove zeros outside of mask
E_superpixelResolution_ft = E_target_ft[(maskCenterY - scipy.ceil((nSuperpixelY-1)/2)-1):(maskCenterY + scipy.floor((nSuperpixelY-1)/2)),(maskCenterX - scipy.ceil((nSuperpixelX-1)/2)-1):(maskCenterX + scipy.floor((nSuperpixelX-1)/2))]
# Add phase gradient to compensate for anomalous 1.5 pixel shift in real
# plane
phaseFactor = [[(scipy.exp(2*1j*pi*((k+1)/nSuperpixelY+(j+1)/nSuperpixelX)*3/8)) for j in range(nSuperpixelX)] for k in range(nSuperpixelY)] # QUESTION
E_superpixelResolution_ft = E_superpixelResolution_ft*phaseFactor
# Fourier transform back to DMD plane
E_superpixelResolution = fft.fftshift(fft.ifft2(fft.ifftshift(E_superpixelResolution_ft)))
return E_superpixelResolution
开发者ID:mmcqed,项目名称:DMD,代码行数:34,代码来源:miscDMD.py
示例2: process
def process(y, eeg, EPOCH_LENGTH, EPOCH_OFFSET, NUM_FOLDS, p=None):
sr = eeg['sample_rate']
events = eeg['events']
event_types = events['uniqueLabel']
ns = int(ceil(EPOCH_LENGTH*sr))
#Identify artifacts
artifact_indexes = zeros((y.shape[0],1))
artifact_indexes[eeg['artifact_indexes']]=1
num_occurances, events = remove_corrupted_events(event_types, events, artifact_indexes, ns)
#Shift signal to account for negative response
zpadpre=zeros((int(ceil(EPOCH_OFFSET*sr)), 1))
zpadpost=zeros((int(ceil((EPOCH_LENGTH-EPOCH_OFFSET)*sr)), 1))
y = concatenate((zpadpre, y, zpadpost))
artifact_indexes = concatenate((zpadpre, artifact_indexes, zpadpost))
result = np.empty((2, NUM_FOLDS, len(event_types), 2))
if not p==None:
reg_parent_conn, reg_child_conn = mp.Pipe()
av_parent_conn, av_child_conn = mp.Pipe()
these_args = (y, events, artifact_indexes, ns, num_occurances, NUM_FOLDS,)
res_reg = p.apply_async(cross_validate_regression, these_args)
res_av = p.apply_async(cross_validate_average, these_args)
result[0,:,:,:]=res_av.get()
result[1,:,:,:]=res_reg.get()
else:
result[0,:,:,:] = cross_validate_average(y, events, artifact_indexes, ns, num_occurances, NUM_FOLDS);
result[1,:,:,:] = cross_validate_regression(y, events, artifact_indexes, ns, num_occurances, NUM_FOLDS);
return result
开发者ID:ankaniisc,项目名称:EEGLAB2Hadoop,代码行数:35,代码来源:process.py
示例3: cmd_ylim
def cmd_ylim(mu):
if scipy.ceil(mu) - mu < mu - scipy.floor(mu):
cmax = scipy.ceil(mu) + 1
else:
cmax = scipy.ceil(mu)
cmin = cmax - 3
return cmin, cmax
开发者ID:cristobal-sifon,项目名称:astro,代码行数:7,代码来源:redsequence_v1.py
示例4: estimated_waiting_time
def estimated_waiting_time(self):
deltatime = timedelta(days = 7)
t1 = datetime.now() - deltatime
possible_hit_jobs_total = Session.query(Job)\
.filter(Job.date_submitted > t1)\
.filter(Job.failed == False)\
.filter(Job.date_completed == None)\
.filter(Job.batchid == None)\
.order_by('-id')\
.limit(100)\
.all()
ids = [i.id for i in possible_hit_jobs_total]
if self.id not in ids:
return 24*60*60
else:
m1 = ids.index(self.id)
m2 = len(possible_hit_jobs_total)
N = 12
if (N - 1) <= m1:
if m1 >= 50:
p1 = 0.0
p2 = 0.0
else:
p1 = 1.0/2.0*round(sum([scipy.misc.comb(m1,ii) for ii in range(0,N-1 + 1)]))/scipy.power(2,m1)
p2 = 0.0
else:
if (m2-m1) >= 50:
p1 = 0.5
p2 = 0.0
else:
p1 = 0.5
p2 = 1.0/2.0*round(sum([scipy.misc.comb(m2-m1,ii) for ii in range(0,N-1-m1 + 1)]))/scipy.power(2,m2-m1)
p = p1 + p2
print m1, m2, p1, p2, p
return int(ceil((self.n_spacers - self.n_completed_spacers)/2.0) * ceil(1.0/p * 60.0))
开发者ID:yinqingl,项目名称:zlab_crisprtool,代码行数:35,代码来源:interactive_database.py
示例5: plot_gridsearch_scores_per_metric
def plot_gridsearch_scores_per_metric(self, grid_scores):
cols = int(sp.ceil(sp.sqrt(len(self.metrics_to_use))))
rows = int(sp.ceil(len(self.metrics_to_use) / cols))
for i, metric in enumerate(self.metrics_to_use):
plt.subplot(rows, cols, i + 1)
self.plot_gridsearch_scores(grid_scores, {self.metric_key: metric})
plt.title(metric_defs[metric][0])
开发者ID:jgosmann,项目名称:spyke-metrics-extra,代码行数:7,代码来源:classification.py
示例6: __init__
def __init__(self,centerFrequency = 440.2*1e6, bMag = 0.4e-4, nspec=64, sampfreq=50e3,collfreqmin=1e-2,alphamax=30.0,dFlag=False,f=None):
""" Constructor for the class.
Inputs :
centerFrequency: The radar center frequency in Hz.
bMag: The magnetic field magnitude in Teslas.
nspec: the number of points of the spectrum.
sampfreq: The sampling frequency of the A/Ds in Hz
collfreqmin: (Default 1e-2) The minimum collision frequency needed to incorporate it into Gordeyev
integral calculations in units of K*sqrt(Kb*Ts/ms) for each ion species.
alphamax: (Default 30) The maximum magnetic aspect angle in which the B-field will be taken into account.
dFlag: A debug flag, if set true will output debug text. Default is false.
f: A numpy array of frequeny points, in Hz, the spectrum will be formed over. Default is
None, at that point the frequency vector will be formed using the number of points for the spectrum
and the sampling frequency to create a linearly sampled frequency vector. """
self.bMag = bMag
self.dFlag = dFlag
self.collfreqmin = collfreqmin
self.alphamax = alphamax
self.K = 2.0*sp.pi*2*centerFrequency/v_C_0 #The Bragg scattering vector, corresponds to half the radar wavelength.
if f is None:
minfreq = -sp.ceil((nspec-1.0)/2.0)
maxfreq = sp.floor((nspec-1.0)/2.0+1)
self.f = sp.arange(minfreq,maxfreq)*(sampfreq/(2*sp.ceil((nspec-1.0)/2.0)))
else:
self.f=f
self.omeg = 2.0*sp.pi*self.f
开发者ID:zhufengGNSS,项目名称:ISRSpectrum,代码行数:27,代码来源:ISRSpectrum.py
示例7: getRegion
def getRegion(self,size=3e4,min_nSNPs=1,chrom_i=None,pos_min=None,pos_max=None):
"""
Sample a region from the piece of genotype X, chrom, pos
minSNPnum: minimum number of SNPs contained in the region
Ichrom: restrict X to chromosome Ichrom before taking the region
cis: bool vector that marks the sorted region
region: vector that contains chrom and init and final position of the region
"""
if (self.chrom is None) or (self.pos is None):
bim = plink_reader.readBIM(self.bfile,usecols=(0,1,2,3))
chrom = SP.array(bim[:,0],dtype=int)
pos = SP.array(bim[:,3],dtype=int)
else:
chrom = self.chrom
pos = self.pos
if chrom_i is None:
n_chroms = chrom.max()
chrom_i = int(SP.ceil(SP.rand()*n_chroms))
pos = pos[chrom==chrom_i]
chrom = chrom[chrom==chrom_i]
ipos = SP.ones(len(pos),dtype=bool)
if pos_min is not None:
ipos = SP.logical_and(ipos,pos_min<pos)
if pos_max is not None:
ipos = SP.logical_and(ipos,pos<pos_max)
pos = pos[ipos]
chrom = chrom[ipos]
if size==1:
# select single SNP
idx = int(SP.ceil(pos.shape[0]*SP.rand()))
cis = SP.arange(pos.shape[0])==idx
region = SP.array([chrom_i,pos[idx],pos[idx]])
else:
while 1:
idx = int(SP.floor(pos.shape[0]*SP.rand()))
posT1 = pos[idx]
posT2 = pos[idx]+size
if posT2<=pos.max():
cis = chrom==chrom_i
cis*= (pos>posT1)*(pos<posT2)
if cis.sum()>min_nSNPs: break
region = SP.array([chrom_i,posT1,posT2])
start = SP.nonzero(cis)[0].min()
nSNPs = cis.sum()
if self.X is None:
rv = plink_reader.readBED(self.bfile,useMAFencoding=True,start = start, nSNPs = nSNPs,bim=bim)
Xr = rv['snps']
else:
Xr = self.X[:,start:start+nSnps]
return Xr, region
开发者ID:PMBio,项目名称:limix,代码行数:59,代码来源:simulator.py
示例8: plot_benchmark_results
def plot_benchmark_results(self):
plt.figure()
cols = int(sp.ceil(sp.sqrt(len(self.results))))
rows = int(sp.ceil(float(len(self.results)) / cols))
for i, m in enumerate(self.results):
plt.subplot(rows, cols, i + 1)
self.plot_times(self.results[m])
plt.title(metrics[m][0])
开发者ID:jgosmann,项目名称:spyke-metrics-extra,代码行数:8,代码来源:benchmark_spike_train_metrics.py
示例9: get_cb_ticks
def get_cb_ticks(values):
min_tick = sp.nanmin(values)
max_tick = sp.nanmax(values)
med_tick = min_tick + (max_tick - min_tick) / 2.0
if max_tick > 1.0:
min_tick = sp.ceil(min_tick)
max_tick = sp.floor(max_tick)
med_tick = sp.around(med_tick)
else:
min_tick = sp.ceil(min_tick * 100.0) / 100.0
max_tick = sp.floor(max_tick * 100.0) / 100.0
med_tick = sp.around(med_tick, 2)
return [min_tick, med_tick, max_tick]
开发者ID:jgosmann,项目名称:spyke-metrics-extra,代码行数:13,代码来源:section3.1.py
示例10: fourier_mask
def fourier_mask(ny,nx,resolution):
# Create circular aperture around the center
maskCenterX = int(scipy.ceil((nx+1)/2))
maskCenterY = int(scipy.ceil((ny+1)/2))
### Code optimization purposes
angle = ny/nx
nres = (ny/resolution/2)**2
###
return [[(( i+1 - maskCenterY)**2 + (angle*( j+1 - maskCenterX))**2 < nres) for j in range(nx)] for i in range(ny)]
开发者ID:mmcqed,项目名称:DMD,代码行数:13,代码来源:miscDMD.py
示例11: traj_ensemble_quantiles
def traj_ensemble_quantiles(traj_set, quantiles=(0.025, 0.5, 0.975)):
"""
Return a list of trajectories, each one corresponding the a given passed-in
quantile.
"""
all_values = scipy.array([traj.values for traj in traj_set])
sorted_values = scipy.sort(all_values, 0)
q_trajs = []
for q in quantiles:
# Calculate the index corresponding to this quantile. The q is because
# Python arrays are 0 indexed
index = q * (len(sorted_values) - 1)
below = int(scipy.floor(index))
above = int(scipy.ceil(index))
if above == below:
q_values = sorted_values[below]
else:
# Linearly interpolate...
q_below = (1.0*below)/(len(sorted_values)-1)
q_above = (1.0*above)/(len(sorted_values)-1)
q_values = sorted_values[below] + (q - q_below)*(sorted_values[above] - sorted_values[below])/(q_above - q_below)
q_traj = copy.deepcopy(traj_set[0])
q_traj.values = q_values
q_trajs.append(q_traj)
return q_trajs
开发者ID:Colbert-Sesanker,项目名称:Networks,代码行数:27,代码来源:Ensembles.py
示例12: eliminateSmallClusters
def eliminateSmallClusters(self, mskDds, clusterSzThreshFraction):
"""
Performs a labelling of non-mask-value connected components of
the :samp:`mskDds` image and eliminates clusters/objects
which have number of voxels which is less
than :samp:`clusterSzThreshFraction*mango.count_non_masked(mskDds)`.
:type mskDds: :obj:`mango.Dds`
:param mskDds: This image is modified by eliminating small clusters/objects
(by setting small-cluster-voxels to value :samp:`mskDds.mtype.maskValue()`.
:type clusterSzThreshFraction: :obj:`float`
:param clusterSzThreshFraction: Value in interval :samp:`[0,1]`. Threshold fraction of
total non-masked :samp:`mskDds` voxels for eliminating small clusters/objects.
"""
elimClustersSmallerThan = int(sp.ceil(mango.count_non_masked(mskDds)*clusterSzThreshFraction))
segDds = mango.ones_like(mskDds, mtype="segmented")
mango.copy_masked(mskDds, segDds)
rootLogger.info("eliminateSmallClusters: Labeling mskDds masked connected components...")
lblDds = mango.image.label(segDds, 1)
rootLogger.info("eliminateSmallClusters: Done labeling mskDds masked connected components...")
self.writeIntermediateDds("_111MskDdsLabels", lblDds)
rootLogger.info("eliminateSmallClusters: Eliminating clusters of size range [%s, %s]..." % (0, elimClustersSmallerThan))
lblDds = mango.image.eliminate_labels_by_size(lblDds, val=lblDds.mtype.maskValue(), minsz=0, maxsz=elimClustersSmallerThan)
rootLogger.info("eliminateSmallClusters: Done eliminating clusters in size range [%s, %s]." % (0, elimClustersSmallerThan))
rootLogger.info("eliminateSmallClusters: Copying small-cluster mask to mskDds...")
mango.copy_masked(lblDds, mskDds)
rootLogger.info("eliminateSmallClusters: Done copying small-cluster mask to mskDds.")
开发者ID:pymango,项目名称:pymango,代码行数:26,代码来源:label_spherical_cavities.py
示例13: _hough_transform
def _hough_transform(img, angles):
rows, cols = img.shape
# determine the number of bins
d = sp.ceil(sp.hypot(*img.shape))
nr_bins = 2 * d
bins = sp.linspace(-d, d, nr_bins)
# create the accumulator
out = sp.zeros((nr_bins, len(angles)), dtype=sp.float64)
# compute the sines/cosines
cos_theta = sp.cos(angles)
sin_theta = sp.sin(angles)
# constructe the x and y values
y = []
x = []
for i in xrange(rows):
y += [i] * cols
x += range(cols)
y = sp.array(y)
x = sp.array(x)
# flatten image
flattened_img = img.flatten()
for i, (c, s) in enumerate(zip(cos_theta, sin_theta)):
distances = x * c + y * s
bin_indices = (sp.round_(distances) - bins[0]).astype(sp.uint8)
bin_sums = sp.bincount(bin_indices, flattened_img)
out[:len(bin_sums), i] = bin_sums
return out
开发者ID:sunnyrjuneja,项目名称:ai_tidbits,代码行数:34,代码来源:detect_skew2.py
示例14: fileLog
def fileLog(self):
if self.ui.tabWidget.currentIndex():
self.oktoLoad()
return
else:
dir = (os.path.dirname(self.filename)
if self.filename is not None else ".")
self.filetuple = QFileDialog.getOpenFileName(self,\
"Open Log File", dir,\
"Data (*.log)\nAll Files (*.*)")
self.filename = self.filetuple[0]
fname = self.filename
if fname:
self.logProcessor.processLog(fname)
self.loadFile(fname)
self.updateStatus('New Log file opened.')
[self.logProcessor.timeS, self.logProcessor.av, self.logProcessor.error] = self.logProcessor.Allan.allanDevMills(self.logProcessor.offsets)
self.type = 3
self.sizeOff = len(self.logProcessor.offsets)
if(self.sizeOff%84 != 0):
self.exceeds = self.sizeOff%84
self.numberOFTicks = scipy.ceil((float)(self.sizeOff)/84)
self.ui.spinBox.setRange(1,self.numberOFTicks)
开发者ID:thiagodefreitas,项目名称:NetworkTime,代码行数:25,代码来源:ntpstats.py
示例15: stability_selection
def stability_selection(X, K, y, mu, n_reps, f_subset, **kwargs):
"""
run stability selection2
Input:
X: Snp matrix: n_s x n_f
y: phenotype: n_s x 1
K: kinship matrix: n_s x n_s
mu: l1-penalty
n_reps: number of repetitions
f_subset: fraction of datasets that is used for creating one bootstrap
output:
selection frequency for all Snps: n_f x 1
"""
time_start = time.time()
[n_s, n_f] = X.shape
n_subsample = scipy.ceil(f_subset * n_s)
freq = scipy.zeros(n_f)
for i in range(n_reps):
print 'Iteration %d' % i
idx = scipy.random.permutation(n_s)[:n_subsample]
res = train(X[idx], K[idx][:, idx], y[idx], mu, **kwargs)
snp_idx = (res['weights'] != 0).flatten()
freq[snp_idx] += 1.
freq /= n_reps
time_end = time.time()
time_diff = time_end - time_start
print '... finished in %.2fs' % (time_diff)
return freq
开发者ID:yangta1995,项目名称:LinearMixedModel,代码行数:33,代码来源:lmm_lasso.py
示例16: _msge_with_gradient_overdetermined
def _msge_with_gradient_overdetermined(data, delta, xvschema, skipstep, p):
""" Calculate the mean squared generalization error and it's gradient for
overdetermined equation system.
"""
(l, m, t) = data.shape
d = None
l, k = 0, 0
nt = sp.ceil(t / skipstep)
for trainset, testset in xvschema(t, skipstep):
(a, b) = _construct_var_eqns(sp.atleast_3d(data[:, :, trainset]), p)
(c, d) = _construct_var_eqns(sp.atleast_3d(data[:, :, testset]), p)
e = sp.linalg.inv(sp.eye(a.shape[1]) * delta ** 2 +
a.transpose().dot(a))
ba = b.transpose().dot(a)
dc = d.transpose().dot(c)
bae = ba.dot(e)
baee = bae.dot(e)
baecc = bae.dot(c.transpose().dot(c))
l += sp.sum(baecc * bae - 2 * bae * dc) + sp.sum(d ** 2)
k += sp.sum(baee * dc - baecc * baee) * 4 * delta
return l / (nt * d.size), k / (nt * d.size)
开发者ID:bjura,项目名称:scot,代码行数:26,代码来源:var.py
示例17: load_network
def load_network(N=10,k=6,network='Random'):
netdata=[]
if network!='Random':
for line in file(network):
if line[0]!='#':
line=line.split()
if len(line)==3:
netdata.append(line)
data_array=sp.array(netdata)
N=max(max(map(int,data_array[:,0])),max(map(int,data_array[:,1])))
else:
for n1 in range(N):
for nei in range(k):
n2=n1
nvals=[n1]
while n2 in nvals:
n2=int(sp.ceil(N*sp.random.random()))
nvals.append(n2)
netdata.append([n1,n2,1.0])
data_array=sp.array(netdata)
A_matrix=sp.mat(sp.zeros((int(N),int(N))))
for row in data_array:
A_matrix[int(row[0])-1,int(row[1])-1]=float(row[2])
return A_matrix
开发者ID:sideshownick,项目名称:NetWorks,代码行数:25,代码来源:network_functions.py
示例18: f_PSD_from_file
def f_PSD_from_file(filename, fLow, fNyq, deltaF):
"""
Read a detector ascii ASD file and return the PSD and frequency vector
for use with ffts.
"""
f_in,S_in = numpy.loadtxt(filename, unpack=True)
f = numpy.linspace(fLow,fNyq,scipy.ceil((fNyq-fLow)/deltaF)+1)
S = pylab.interp(f, f_in, S_in)
# packing is of the form:
# [0 deltaF 2*deltaF ... fNyquist-deltaF fNyquist -fNyquist+deltaF ... -2*deltaF -deltaF]
PSD = scipy.zeros(2*(fNyq/deltaF), dtype='float')+scipy.inf
PSD[round(fLow/deltaF):fNyq/deltaF+1] = S**2
if -round(fLow/deltaF) == 0:
PSD[fNyq/deltaF+1:] = S[-2:0:-1]**2
else:
PSD[fNyq/deltaF+1:-round(fLow/deltaF)] = S[-2:0:-1]**2
f = f_for_fft(fLow, fNyq, deltaF)
nNyq = round(fNyq/deltaF)
nLow = round(fLow/deltaF)
PSD = scipy.zeros(2*nNyq)+scipy.inf
PSD[nLow:nNyq+1] = S**2
if -nLow == 0:
PSD[nNyq+1:] = S[-2:0:-1]**2
else:
PSD[nNyq+1:-nLow] = S[-2:0:-1]**2
return f,PSD
开发者ID:Solaro,项目名称:lalsuite,代码行数:26,代码来源:coherent_inspiral_metric_detector_details.py
示例19: plotopticsonly
def plotopticsonly(allsky_data,plotdir,m,ax,fig,latlim,lonlim):
""" Make a set of pots when only all sky is avalible."""
maxplot = len(allsky_data.times)
strlen = int(sp.ceil(sp.log10(maxplot))+1)
fmstr = '{0:0>'+str(strlen)+'}_'
optictimes = allsky_data.times
plotnum=0
firstbar = True
optbnds = [300,1100]
for iop in range(len(optictimes)):
(slice3,cbar3) = slice2DGD(allsky_data,'alt',150,optbnds,title='',
time = iop,cmap='gray',gkey = 'image',fig=fig,ax=ax,cbar=True,m=m)
slice3.set_norm(colors.PowerNorm(gamma=0.6,vmin=optbnds[0],vmax=optbnds[1]))
if firstbar:
firstbar=False
cbaras = plt.colorbar(slice3,ax=ax,orientation='horizontal')
cbaras.set_label('All Sky Scale')
plt.title(insertinfo('All Sky $tmdy $thmsehms',posix=allsky_data.times[iop,0],posixend=allsky_data.times[iop,1]))
print('Ploting {0} of {1} plots'.format(plotnum,maxplot))
plt.savefig(os.path.join(plotdir,fmstr.format(plotnum)+'ASonly.png'))
plotnum+=1
slice3.remove()
开发者ID:jswoboda,项目名称:MahaliPlotting,代码行数:25,代码来源:plotdata.py
示例20: _msge_with_gradient_overdetermined
def _msge_with_gradient_overdetermined(self, data, delta, xvschema, skipstep):
""" Calculate the mean squared generalization error and it's gradient for overdetermined equation system.
"""
(l, m, t) = data.shape
d = None
l, k = 0, 0
nt = sp.ceil(t / skipstep)
for s in range(0, t, skipstep):
#print(s,drange)
trainset, testset = xvschema(s, t)
(a, b) = self._construct_eqns(sp.atleast_3d(data[:, :, trainset]))
(c, d) = self._construct_eqns(sp.atleast_3d(data[:, :, testset]))
#e = sp.linalg.inv(np.eye(a.shape[1])*delta**2 + a.transpose().dot(a), overwrite_a=True, check_finite=False)
e = sp.linalg.inv(sp.eye(a.shape[1]) * delta ** 2 + a.transpose().dot(a))
ba = b.transpose().dot(a)
dc = d.transpose().dot(c)
bae = ba.dot(e)
baee = bae.dot(e)
baecc = bae.dot(c.transpose().dot(c))
l += sp.sum(baecc * bae - 2 * bae * dc) + sp.sum(d ** 2)
k += sp.sum(baee * dc - baecc * baee) * 4 * delta
return l / (nt * d.size), k / (nt * d.size)
开发者ID:BioinformaticsArchive,项目名称:SCoT,代码行数:27,代码来源:var.py
注:本文中的scipy.ceil函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论