本文整理汇总了Python中numpy.log2函数的典型用法代码示例。如果您正苦于以下问题:Python log2函数的具体用法?Python log2怎么用?Python log2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log2函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: Classification
def Classification(theta, tag_dict, feature_dict, features):
"""
Predict the tag with the value of theta for naive bayes model
theta value is used to predict the tag
"""
total = sum(tag_dict.values())
prob = []
for tag in range(2):
tag = str(tag)
probability = np.log2(tag_dict[tag] / total)
searchFlag = 1
for featureID in range(len(features)):
feature = features[featureID]
item = (tag, feature, featureID)
if item not in feature_dict:
searchFlag = 0
break
tmpProb = np.log2(feature_dict[item] / tag_dict[tag])
probability = probability + tmpProb
prob.append(probability)
if searchFlag == 0:
break
if searchFlag == 0:
if tag == '1':
predict_tag = '0'
else:
predict_tag = '1'
else:
ratio = 2.0 ** (prob[1] - prob[0])
if ratio >= theta:
predict_tag = '1'
else:
predict_tag = '0'
return predict_tag
开发者ID:JoshuaW1990,项目名称:simpleDNAandProt,代码行数:35,代码来源:nbm1.py
示例2: mutual_info
def mutual_info(X, Y):
""" Calculate the mutual information between X and Y.
Note: The input X, Y may look like this:
X = [event1, event2, event3, ..., eventn]
Y = [event1, event2, event3, ..., eventn]
Every event_i represents the outcome of that random variable.
Args:
X: An array-like random variable.
Y: An array-like random variable.
Returns:
The mutual information score between X and Y.
"""
X_np = np.array(X)
Y_np = np.array(Y)
P_t1 = np.average(X_np)
P_t0 = 1 - P_t1
P_c1 = np.average(Y_np)
P_c0 = 1 - P_c1
P_t1c1 = np.average(X_np & Y_np)
P_t1c0 = np.average(X_np & ~Y_np)
P_t0c1 = np.average(~X_np & Y_np)
P_t0c0 = np.average(~X_np & ~Y_np)
mi = P_t1c1 * np.log2(P_t1c1 / (P_t1 * P_c1)) +\
P_t1c0 * np.log2(P_t1c0 / (P_t1 * P_c0)) +\
P_t0c1 * np.log2(P_t0c1 / (P_t0 * P_c1)) +\
P_t0c0 * np.log2(P_t0c0 / (P_t0 * P_c0))
return mi if not np.isnan(mi) else 0
开发者ID:rickchung,项目名称:topic-modeling-study,代码行数:35,代码来源:mi_word_tag.py
示例3: __cqt_filter_fft
def __cqt_filter_fft(sr, fmin, n_bins, bins_per_octave, tuning,
filter_scale, norm, sparsity, hop_length=None,
window='hann'):
'''Generate the frequency domain constant-Q filter basis.'''
basis, lengths = filters.constant_q(sr,
fmin=fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
filter_scale=filter_scale,
norm=norm,
pad_fft=True,
window=window)
# Filters are padded up to the nearest integral power of 2
n_fft = basis.shape[1]
if (hop_length is not None and
n_fft < 2.0**(1 + np.ceil(np.log2(hop_length)))):
n_fft = int(2.0 ** (1 + np.ceil(np.log2(hop_length))))
# re-normalize bases with respect to the FFT window length
basis *= lengths[:, np.newaxis] / float(n_fft)
# FFT and retain only the non-negative frequencies
fft_basis = fft.fft(basis, n=n_fft, axis=1)[:, :(n_fft // 2)+1]
# sparsify the basis
fft_basis = util.sparsify_rows(fft_basis, quantile=sparsity)
return fft_basis, n_fft, lengths
开发者ID:baifengbai,项目名称:librosa,代码行数:33,代码来源:constantq.py
示例4: getBits
def getBits(self,cell):
zero=[-self.markerArea[i]/2. for i in [0,1]]
bitx=[int(i) for i in bin(int(cell[0]))[::-1][:-2]]
bity=[int(i) for i in bin(int(cell[1]))[::-1][:-2]]
s0=int(np.log2(self.cellsPerBlock[0]*self.noBlocks[0]))
s1=int(np.log2(self.cellsPerBlock[1]*self.noBlocks[1]))
for i in range(s0-len(bitx)):
bitx.append(0)
for i in range(s1-len(bity)):
bity.append(0)
tx=np.zeros(s0,dtype=np.bool)
ty=np.zeros(s1,dtype=np.bool)
px=np.empty((s0,2))
py=np.empty((s1,2))
for i,b in enumerate(bitx):
x=zero[0]+mod(i+1,self.noBitsX)*self.bitDistance
y=zero[1]+((i+1)/self.noBitsY)*self.bitDistance
px[i]=(x,y)
tx[i]=b
for i,b in enumerate(bity):
x=zero[0]+(self.noBitsX-mod(i+1,self.noBitsX)-1)*self.bitDistance
y=zero[1]+(self.noBitsY-(i+1)/self.noBitsY-1)*self.bitDistance
py[i]=(x,y)
ty[i]=b
return px,py,tx,ty
开发者ID:drueffer,项目名称:apage_rom,代码行数:25,代码来源:PatternGenerator.py
示例5: hurstexp_welchper
def hurstexp_welchper(data, samp=1.05, f_max=0, consider_fBm=False):
"""
These functions compute the Hurst exponent of a signal using the
Welch periodogram
data : your signal
samp : sampling rate in Hz 1 for an fMRI series
f_max: the higher frequency you want to take into account
"""
#data could be two dimensional(but no more...) in that cas time should
#be on second position
time_series = TimeSeries(data=data, sampling_rate=samp)
spectral_analysis = SpectralAnalyzer(time_series)
frq, pwr = spectral_analysis.psd
#We need to take only the small frequency, but the exact choice is a
#bit arbitrary we need to have alpha between 0 and 1
if f_max==0:
masker = frq > 0
else:
masker = np.all([(frq > 0), (frq < f_max)], axis=0)
log2frq = np.log2(frq[masker])
log2pwr = np.log2(pwr.T[masker])
tmp = np.polyfit(log2frq, log2pwr, deg=1)
if consider_fBm:
return (1 - tmp[0]) / 4, {'aest': tmp[1], 'log2frq': log2frq, 'log2pwr': log2pwr}
return (1 - tmp[0]) / 2, {'aest': tmp[1], 'log2frq': log2frq, 'log2pwr': log2pwr}
开发者ID:JFBazille,项目名称:ICode,代码行数:25,代码来源:hexp_welchp.py
示例6: prepare_logged
def prepare_logged(x, y):
"""
Transform `x` and `y` to a log scale while dealing with zeros.
This function scales `x` and `y` such that the points that are zero in one
array are set to the min of the other array.
When plotting expression data, frequently one sample will have reads in
a particular feature but the other sample will not. Expression data also
tends to look better on a log scale, but log(0) is undefined and therefore
cannot be shown on a plot. This function allows these points to be shown,
piled up along one side of the plot.
:param x,y: NumPy arrays
"""
xi = np.log2(x)
yi = np.log2(y)
xv = np.isfinite(xi)
yv = np.isfinite(yi)
global_min = min(xi[xv].min(), yi[yv].min())
global_max = max(xi[xv].max(), yi[yv].max())
xi[~xv] = global_min
yi[~yv] = global_min
return xi, yi
开发者ID:rbeagrie,项目名称:metaseq,代码行数:28,代码来源:plotutils.py
示例7: rms_total
def rms_total(x, window_size=256):
n_windows = int(pow(2,numpy.log2(len(x))-numpy.log2(window_size)))
rms_tot = numpy.zeros(n_windows)
for i in range(n_windows):
w = x[i*window_size:(i+1)*window_size]
rms_tot[i] = rms(w,window_size)
return rms_tot
开发者ID:Lathomas42,项目名称:Envelope_Detection,代码行数:7,代码来源:rms.py
示例8: DFA
def DFA(indata,scale,q,m):
y = np.cumsum(indata-indata.mean()) #Equation 1 in paper
RMSt = [] #Temporary RMS variable: contain F(s,v) value
F = [] #F: Fluctuation function
N = len(indata)
print 'len indata: ',N
for i in range(len(scale)):
ns = int(np.floor(len(y)/scale[i])) #number of segments: Ns = int(N/s)
for v in range(2*ns):
if v < ns:
index_start = v*scale[i]
index_end = (v+1)*scale[i]
else:
index_start = N - (v-ns)*scale[i]-scale[i]
index_end = N - (v-ns)*scale[i]
index = range(index_start,index_end) #calculate index for each segment
yv = y[index_start:index_end] #Extract values of time series for each segments
c = np.polyfit(index,yv,m)
fit = np.polyval(c,index)
RMSt.append(math.sqrt(np.mean((yv-fit)**2))) #Equation 2. But calculating only F(v,s) not F(v,s)**2
RMS = np.asarray(RMSt) #Convert RMSt to array
qRMS = RMS**q
F.append(np.mean(qRMS)**(1.0/q)) #Equation 4
del RMSt[:] #Reset RMSt[:]
C = np.polyfit(np.log2(scale),np.log2(F),1)
H = C[0] #Hurst parameter
return (H,scale,F)
开发者ID:atamazian,项目名称:traffic-proc-tools,代码行数:27,代码来源:DFA_2Ns.py
示例9: _hist_bin_doane
def _hist_bin_doane(x):
"""
Doane's histogram bin estimator.
Improved version of Sturges' formula which works better for
non-normal data. See
stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
Parameters
----------
x : array_like
Input data that is to be histogrammed, trimmed to range. May not
be empty.
Returns
-------
h : An estimate of the optimal bin width for the given data.
"""
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
if sigma > 0.0:
# These three operations add up to
# g1 = np.mean(((x - np.mean(x)) / sigma)**3)
# but use only one temp array instead of three
temp = x - np.mean(x)
np.true_divide(temp, sigma, temp)
np.power(temp, 3, temp)
g1 = np.mean(temp)
return x.ptp() / (1.0 + np.log2(x.size) +
np.log2(1.0 + np.absolute(g1) / sg1))
return 0.0
开发者ID:aragilar,项目名称:numpy,代码行数:32,代码来源:histograms.py
示例10: CostFunction
def CostFunction(self, X, y, regularization_parameter, count_sigmas = False):
X = np.matrix(X)
y = np.matrix(y)
outs = self.FeedForward(X)
J = 0
for thetas in self.Thetas:
J = J + np.sum(np.square(thetas[:, 1]))
J = J * regularization_parameter
#linear
#J = J + np.sum(np.square(outs - y))
#logistic
J = J + np.sum(- (np.multiply(y, np.log2(outs)) + np.multiply((1 - y), np.log2(1 - outs))))
J = J / len(X)
if count_sigmas == False:
return J
#linear
#self.Sigmas[-1] = np.multiply(np.multiply((outs - y), outs), 1 - outs) * 2
#logistic
self.Sigmas[-1] = (outs - y) / math.log1p(1)
for i in reversed(range(0, self.n_hidden_layers)):
self.Sigmas[i] = np.multiply(self.activations[i + 1][:, 1:], 1 - self.activations[i + 1][:, 1:])
self.Sigmas[i] = np.multiply(self.Sigmas[i], self.Sigmas[i + 1] * self.Thetas[i + 1][:, 1:])
return J
开发者ID:avyzainis,项目名称:Neural-Network,代码行数:29,代码来源:NeuralNetworkLayered.py
示例11: center_zoom
def center_zoom(Lngs, Lats):
# Find the bounding box
minLon, minLat, maxLon, maxLat = min(Lngs), min(Lats), max(Lngs), max(Lats)
deltaLon, deltaLat = (maxLon - minLon), (maxLat - minLat)
centerLon = minLon + .5*deltaLon
centerLat = minLat + .5*deltaLat
zoomxfac = 3600.
zoomyfac = 2925.
if deltaLon != 0:
pixXperdeg = (512.0/deltaLon)
else:
pixXperdeg = 1.
if deltaLat != 0:
pixYperdeg = (512.0/deltaLat)
else:
pixYperdeg = 1.
# conversion to zoom
dx = pixXperdeg/zoomyfac
dy = pixYperdeg/zoomyfac
zx = np.floor(12+np.log2(dx))
zy = np.floor(12+np.log2(dy))
zoom = min(zx, zy)
if zoom < 10:
zoom = 10
if zoom > 19:
zoom = 19
return centerLon, centerLat, zoom
开发者ID:ssalha,项目名称:craigslistxx,代码行数:30,代码来源:mapHelper.py
示例12: smartirs_wglobal
def smartirs_wglobal(docfreq, totaldocs, global_scheme):
"""Calculate global document weight based on the weighting scheme specified in `global_scheme`.
Parameters
----------
docfreq : int
Document frequency.
totaldocs : int
Total number of documents.
global_scheme : {'n', 't', 'p'}
Global transformation scheme.
Returns
-------
float
Calculated global weight.
"""
if global_scheme == "n":
return 1.
elif global_scheme == "t":
return np.log2(1.0 * totaldocs / docfreq)
elif global_scheme == "p":
return max(0, np.log2((1.0 * totaldocs - docfreq) / docfreq))
开发者ID:RaRe-Technologies,项目名称:gensim,代码行数:25,代码来源:tfidfmodel.py
示例13: smartirs_wlocal
def smartirs_wlocal(tf, local_scheme):
"""Calculate local term weight for a term using the weighting scheme specified in `local_scheme`.
Parameters
----------
tf : int
Term frequency.
local : {'n', 'l', 'a', 'b', 'L'}
Local transformation scheme.
Returns
-------
float
Calculated local weight.
"""
if local_scheme == "n":
return tf
elif local_scheme == "l":
return 1 + np.log2(tf)
elif local_scheme == "a":
return 0.5 + (0.5 * tf / tf.max(axis=0))
elif local_scheme == "b":
return tf.astype('bool').astype('int')
elif local_scheme == "L":
return (1 + np.log2(tf)) / (1 + np.log2(tf.mean(axis=0)))
开发者ID:RaRe-Technologies,项目名称:gensim,代码行数:26,代码来源:tfidfmodel.py
示例14: plot_profiles
def plot_profiles(prots, eluts, sp='Hs', plot_sums=True, shape=None,
min_count=1):
"""
shape: (m,n) = m rows, n columns
eluts: [el.NormElut(f, sp, norm_rows=False, norm_cols=False) for f in
fs]
"""
import plotting as pl
gt = seqs.GTrans()
use_eluts = elutions_containing_prots(eluts, sp, seqs.names2ids(prots),
min_count)
shape = shape if shape else ut.sqrt_shape(len(use_eluts)+1)
fig = pl.figure()
for i,e in enumerate(use_eluts):
sp_target = ut.shortname(e.filename)[:2]
pl.subplot(shape[0],shape[1],i+1)
pl.title(ut.shortname(e.filename))
pids = [gt.name2id[p] for p in prots]
protsmax = max([np.max(e.normarr[r]) for p in pids if p in e.baseid2inds for
r in e.baseid2inds[p]])
plot_prots(e, pids, e.baseid2inds, protsmax)
if plot_sums:
# plot total spectral counts normalized to match biggest peak
sums = np.sum(e.normarr,axis=0)
fmax = np.max(sums)
pl.plot(range(sums.shape[1]),
np.log2(sums[0,:]).T*np.log2(protsmax)*len(pids)/np.log2(fmax),
color='k', linestyle='-', linewidth=.5)
# make legend with all prots
pl.subplot(shape[0],shape[1],0)
for p in prots: pl.plot(0,label=p)
pl.legend()
开发者ID:marcottelab,项目名称:infer_complexes,代码行数:32,代码来源:evidence.py
示例15: means2idxarrays
def means2idxarrays(g1, g2, i_bins, c_bins, difference):
'''take two arrays of values and return the initial values
and differences as numpy digitised arrays'''
if difference == "relative":
# calculate difference between mean values for group1 and group2
# g1 and g2 always the same length
change = [g2[x] - g1[x] for x in range(0, len(g1))]
initial = g1
elif difference == "logfold":
change = [np.log2((g2[x] + 1.0) / (g1[x] + 1.0))
for x in range(0, len(g1))]
initial = [np.log2(g1[x] + 1.0) for x in range(0, len(g1))]
elif difference == "abs_logfold":
change = [abs(np.log2((g2[x] + 1.0) / (g1[x] + 1.0)))
for x in range(0, len(g1))]
initial = [max(np.log2(g1[x] + 1.0), np.log2(g2[x] + 1.0))
for x in range(0, len(g1))]
# return arrays of len(change) with the index position in c_bins
# corresponding to the bin in which the value of change falls
change_idx = np.digitize(change, c_bins, right=True)
initial_idx = np.digitize(initial, i_bins, right=True)
return(change_idx, initial_idx)
开发者ID:CGATOxford,项目名称:cgat,代码行数:27,代码来源:Counts.py
示例16: TtoJ
def TtoJ(T,Q=1,B=None,phibwratio=None):
"""
Compute the maximal wavelet scale J such that for a filter bank
the largest wavelet is of bandwidth approximately T.
Parameters:
-----------
T: int
Time bandwidth for window
Q: int
Number of wavelets per octave
B: int
The reciprocal per-octave bandwidth of the wavelets
phibwratio: float
ratio between the lowpass filter phi and the lowest-frequency wavelet. Default is 2 if Q is 1 and otherwise 1.
Returns
--------
J: int
Number of logarithmically spaced wavelets
"""
if B is None: B = Q
if phibwratio is None:
if type(Q) == np.ndarray:
phibwratio=1.+(Q==1).astype(int)
else:
phibwratio=1+int(Q==1)
if type(Q) == np.ndarray:
return 1+ (np.log2(T/(4*B/phibwratio))*Q+.5).astype(int)
else:
return 1+ int(np.log2(T/(4*B/phibwratio))*Q+.5)
开发者ID:markstoehr,项目名称:phoneclassification,代码行数:33,代码来源:filterbank.py
示例17: stop
def stop(k,k_l,k_r):
gain = E-T[T_min]
def count(lst): return len(Counter(lst).keys())
delta = np.log2(float(3**count(k)-2)) - (
count(k)*measure(k)-count(k_l)*measure(k_l)-count(k_r)*measure(k_r))
# print(gain, (np.log2(N-1)+delta)/N)
return gain<(np.log2(N-1)+delta)/N or T_min==0
开发者ID:rahlk,项目名称:RAAT,代码行数:7,代码来源:Discretize.py
示例18: reconstructWPT
def reconstructWPT(self,new_wp,wavelet,listleaves):
""" Create a new wavelet packet tree by copying in the data for the leaves and then performing
the idwt up the tree to the root.
Assumes that listleaves is top-to-bottom, so just reverses it.
"""
# Sort the list of leaves into order bottom-to-top, left-to-right
working = listleaves.copy()
working = working[-1::-1]
level = int(np.floor(np.log2(working[0] + 1)))
while level > 0:
first = 2 ** level - 1
while working[0] >= first:
# Note that it assumes that the whole list is backwards
parent = (working[0] - 1) // 2
p = self.ConvertWaveletNodeName(parent)
new_wp[p].data = pywt.idwt(new_wp[self.ConvertWaveletNodeName(working[1])].data,new_wp[self.ConvertWaveletNodeName(working[0])].data, wavelet)[:len(new_wp[p].data)]
# Delete these two nodes from working
working = np.delete(working, 1)
working = np.delete(working, 0)
# Insert parent into list of nodes at the next level
ins = np.where(working > parent)
if len(ins[0]) > 0:
ins = ins[0][-1] + 1
else:
ins = 0
working = np.insert(working, ins, parent)
level = int(np.floor(np.log2(working[0] + 1)))
return new_wp
开发者ID:smarsland,项目名称:birdscape,代码行数:31,代码来源:WaveletFunctions.py
示例19: __init__
def __init__(self, data, noDataValue):
#dataValues = [x[0] for x in dataTable if x[0] <> noDataValue]
#data = sorted(dataValues)
d = data[data["DataValue"]!= noDataValue].describe(percentiles = [.10,.25,.5,.75,.90])
count = self.NumberofObservations = d["DataValue"]["count"]
self.NumberofCensoredObservations = data[data["CensorCode"]!= "nc"].count()
self.ArithemticMean = round(d["DataValue"]["mean"], 5)
sumval = 0
sign = 1
for dv in data["DataValue"]:
if dv == 0:
sumval = sumval + numpy.log2(1)
else:
if dv < 0:
sign = sign * -1
sumval = sumval + numpy.log2(numpy.absolute(dv))
if count > 0:
self.GeometricMean = round(sign * (2 ** float(sumval / float(count))), 5)
self.Maximum = round(d["DataValue"]["max"], 5)
self.Minimum = round(d["DataValue"]["min"], 5)
self.StandardDeviation = round(d["DataValue"]["std"], 5)
self.CoefficientofVariation = round(data[data["DataValue"]!= noDataValue].var(), 5)
##Percentiles
self.Percentile10 = round(d["DataValue"]["10%"], 5)
self.Percentile25 = round(d["DataValue"]["25%"], 5)
self.Percentile50 = round(d["DataValue"]["50%"], 5)
self.Percentile75 = round(d["DataValue"]["75%"], 5)
self.Percentile90 = round(d["DataValue"]["90%"], 5)
开发者ID:elmachine8,项目名称:ODMToolsPython,代码行数:33,代码来源:logicPlotOptions.bak.py
示例20: test_quasigraph
def test_quasigraph(self, plot=False):
sol = self.solver
errz = []
errl = []
ks = np.arange(1,5)
for k in ks:
self.scheme.h = pow(2,-k)
sol.initialize(u0=self.u0,time=1, name='{0}_{1}'.format(type(self).__name__, k))
sol.run()
zexact = sol.system.exact(sol.final_time(),self.u0)[0]
lexact = sol.system.exact(sol.final_time(),self.u0)[2]
df = sol.final()[0] - zexact
logerrz = np.log2(np.abs(df))
logerrl = np.log2(np.abs(sol.final()[2] - lexact))
errz.append(logerrz)
errl.append(logerrl)
plt.clf()
plt.subplot(1,2,1)
plt.title('z')
regz = order.linear_regression(ks,errz,do_plot=True)
plt.plot(ks,errz,'o-')
plt.legend()
plt.subplot(1,2,2)
plt.title(u'λ')
regl = order.linear_regression(ks,errl,do_plot=True)
plt.plot(ks,errl,'o-')
plt.legend()
oz = -regz[0]
ol = -regl[0]
nt.assert_greater(ol, self.expected_orders[0] - self.tol)
nt.assert_greater(oz, self.expected_orders[1] - self.tol)
return sol
开发者ID:LongyanU,项目名称:odelab,代码行数:32,代码来源:test_rkdae.py
注:本文中的numpy.log2函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论