本文整理汇总了Python中scipy.diff函数的典型用法代码示例。如果您正苦于以下问题:Python diff函数的具体用法?Python diff怎么用?Python diff使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了diff函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: diff
def diff(self, n=1):
"""Calculate the n-th order differential of the function."""
# 2012-06-27 - 2012-07-11
x, y = self.diff(n=n-1).xy() if n > 1 else self.xy()
x = self._filter_double(x)
return type(self)(x=.5*(x[1:]+x[:-1]),
y=scipy.diff(y)/scipy.diff(x))
开发者ID:proggy,项目名称:cofunc,代码行数:7,代码来源:__init__.py
示例2: gradient2
def gradient2(f):
dM = g.grid_M[1] - g.grid_M[0]
dD = g.grid_D[1] - g.grid_D[0]
g1 = scipy.diff(f, 1, 0) / dM
g2 = scipy.diff(f, 1, 1) / dD
g3 = addNanRow(g1)
g4 = addNanCol(g2)
return [g3, g4]
开发者ID:Twizanex,项目名称:bellman,代码行数:8,代码来源:ponzi4.py
示例3: entropy2
def entropy2(values):
"""Calculate the entropy of vector values.
values will be flattened to a 1d ndarray."""
values = sp.asarray(values).flatten()
p = sp.diff(sp.c_[0,sp.diff(sp.sort(values)).nonzero(), values.size])/float(values.size)
H = (p*sp.log2(p)).sum()
return -H
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:9,代码来源:Vectorization.py
示例4: entropy2
def entropy2(values):
"""Calculate the entropy of vector values.
values will be flattened to a 1d ndarray."""
values = values.flatten()
M = len(sp.unique(values))
p = sp.diff(sp.c_[sp.diff(sp.sort(values)).nonzero(), len(values)])/float(len(values))
H = -((p*sp.log2(p)).sum())
return H
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:10,代码来源:entropy.py
示例5: test_respects_refractory_period
def test_respects_refractory_period(self):
refractory = 100 * pq.ms
st = self.invoke_gen_func(
self.highRate, max_spikes=1000, refractory=refractory)
self.assertGreater(
sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))),
refractory.rescale(pq.s).magnitude)
st = self.invoke_gen_func(
self.highRate, t_stop=10 * pq.s, refractory=refractory)
self.assertGreater(
sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))),
refractory.rescale(pq.s).magnitude)
开发者ID:NeuroArchive,项目名称:spykeutils,代码行数:12,代码来源:test_spike_train_generation.py
示例6: qrs_detect
def qrs_detect(self, qrslead=0):
"""Detect QRS onsets using modified PT algorithm
"""
# If ecg is a vector, it will be used for qrs detection.
# If it is a matrix, use qrslead (default 0)
if len(self.data.shape) == 1:
self.raw_ecg = self.data
else:
self.raw_ecg = self.data[:,qrslead]
# butterworth bandpass filter 5 - 15 Hz
self.filtered_ecg = self._bpfilter(self.raw_ecg)
# differentiate
self.diff_ecg = scipy.diff(self.filtered_ecg)
# take absolute value (was square in original PT implementation)
self.abs_ecg = abs(self.diff_ecg)
# integrate
self.int_ecg = self._mw_integrate(self.abs_ecg)
# Construct buffers with last 8 values
self._initializeBuffers(self.int_ecg)
# collect all unique local peaks in the integrated ecg
peaks = self.peakDetect(self.int_ecg)
# classify each peak as QRS or noise
self.checkPeaks(peaks, self.int_ecg)
# compensate for delay during integration
self.QRSpeaks -= 40 * (self.samplingrate / 1000)
return self.QRSpeaks
开发者ID:Basildcruz,项目名称:ecgtk,代码行数:33,代码来源:ecgtk.py
示例7: detect_signals
def detect_signals():
vector, label = weeklydataset_sg_ndata(
"/media/4AC0AB31C0AB21E5/Documents and Settings/Claudio/Documenti/Thesis/Workloads/MSClaudio/ews/access_log-20110805.csv",
[],
)
x, target = aggregatebymins_sg_ndata(vector[1])
starttime = time.time()
y = array(target)
t = array(x)
thr = max(y) * 2 / 3
print thr
I = pylab.find(y > thr)
# print I
# pylab.plot(t,y, 'b',label='signal')
# pylab.plot(t[I], y[I],'ro',label='detections')
# pylab.plot([0, t[len(t)-1]], [thr,thr], 'g--')
J = pylab.find(diff(I) > 1)
argpeak = []
targetpeak = []
for K in split(I, J + 1):
ytag = y[K]
peak = pylab.find(ytag == max(ytag))
# pylab.plot(peak+K[0],ytag[peak],'sg',ms=7)
argpeak.append(peak + K[0])
targetpeak.append(ytag[peak])
eta = time.time() - starttime
print "time elapsed %f" % eta
return list(itertools.chain(*argpeak)), list(itertools.chain(*targetpeak))
开发者ID:pchronz,项目名称:GenericWorkloadModeler,代码行数:31,代码来源:spike.py
示例8: execute
def execute(self):
self.power_mat, self.thermal_expectation = self.full_calculation()
n_chan = self.power_mat.shape[1]
n_freq = self.power_mat.shape[0]
# Calculate the the mean channel correlations at low frequencies.
low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real
# Factorize it into preinciple components.
e, v = linalg.eigh(low_f_mat)
self.low_f_mode_values = e
# Make sure the eigenvalues are sorted.
if sp.any(sp.diff(e) < 0):
raise RuntimeError("Eigenvalues not sorted.")
self.low_f_modes = v
# Now subtract out the noisiest channel modes and see what is left.
n_modes_subtract = 10
mode_subtracted_power_mat = sp.copy(self.power_mat.real)
mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq))
for ii in range(n_modes_subtract):
mode = v[:,-ii]
amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1)
amp = sp.sum(amp * mode, 1)
to_subtract = amp[:,None,None] * mode[:,None] * mode
mode_subtracted_power_mat -= to_subtract
auto_power = mode_subtracted_power_mat.view()
auto_power.shape = (n_freq, n_chan**2)
auto_power = auto_power[:,::n_chan + 1]
mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1)
self.subtracted_auto_power = mode_subtracted_auto_power
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:28,代码来源:noise_power.py
示例9: scanSound
def scanSound(self, source, minnotel):
binarized = source
scale = 60. / self.wavetempo * (binarized[0].size / self.duration)
noise_length = scale*minnotel
antinoised = sp.zeros_like(binarized)
for i in range(sp.shape(binarized)[0]):
new_line = binarized[i, :].copy()
diffed = sp.diff(new_line)
ones_keys = sp.where(diffed == 1)[0]
minus_keys = sp.where(diffed == -1)[0]
if(ones_keys.size != 0 and minus_keys.size != 0):
if(ones_keys[0] > minus_keys[0]):
new_line = self.cutNoise(
(0, minus_keys[0]), noise_length, new_line)
minus_keys = sp.delete(minus_keys, 0)
if(ones_keys[-1] > minus_keys[-1]):
new_line = self.cutNoise(
(ones_keys[-1], new_line.size-1), noise_length, new_line)
ones_keys = sp.delete(ones_keys, -1)
for j in range(sp.size(ones_keys)):
new_line = self.cutNoise(
(ones_keys[j], minus_keys[j]), noise_length, new_line)
antinoised[i, :] = new_line
return antinoised
开发者ID:mackee,项目名称:utakata,代码行数:31,代码来源:utakata_time_freq.py
示例10: get_fft
def get_fft(self, fs, taps, Npts):
Ts = 1.0/fs
fftpts = fftpack.fft(taps, Npts)
self.freq = scipy.arange(0, fs, 1.0/(Npts*Ts))
self.fftdB = 20.0*scipy.log10(abs(fftpts))
self.fftDeg = scipy.unwrap(scipy.angle(fftpts))
self.groupDelay = -scipy.diff(self.fftDeg)
开发者ID:GREO,项目名称:gnuradio-git,代码行数:7,代码来源:gr_filter_design.py
示例11: get_indices
def get_indices(arr, vals, disp=False):
"""
Get the indices of all the elements between vals[0] and vals[1].
Alternatively also between vals[2] and vals[3] if they are given.
Input:
arr : the array in which to look for the elements
vals : a list with either 2 or 4 values that corresponds
limits inbetween which the indices of the values
Optional argument(s):
disp : Bolean parameter, if True it displays start and end
index and the number of channels inbetween. Only works
for value lists of length 2.
Assumes the values in 'arr' is the mid values and that it is evenly
spaced for all values.
********************** Important! **********************************
The output indices are Python friendly, i.e. they are 0-based. Take
when using the indices in other software e.g. GILDAS, MIRIAD, which
are 1-based.
--------------------------------------------------------------------
oOO Changelog OOo
*2012/02
Added more documentation, "important" notice about indexing
*2011/07
Removed +1 in the output indices to be compatible with rest of
module, where Pythons 0-based indexing is used.
*2010/12
Doc written
*2010/06
Funciton created
"""
from scipy import concatenate, where, array, diff
dx = abs(0.5 * diff(arr)[0])
if len(vals) == 4:
v1, v2, v3, v4 = vals + array([-1, 1, -1, 1]) * dx
# if the user wants two velocity areas to calculate noise
low = where((arr >= v1) * (arr <= v2))[0]
high = where((arr >= v3) * (arr <= v4))[0]
channels = concatenate((low, high))
elif len(vals) == 2:
v1, v2 = vals + array([-1, 1]) * dx
# channels = where((arr>=v1)*(arr<v2))[0]+1
# this is because if +1 it is FITS/Fortran safe
# changed: removed +1 for consistency in program
channels = where((arr >= v1) * (arr <= v2))[0]
#
if disp and len(vals) == 2:
first, last = channels.min(), channels.max()
n = last - first + 1
print "\nFirst: %d,\n Last: %d\n Nchan: %d\n" % (first, last, n)
return channels
开发者ID:vilhelmp,项目名称:adapy,代码行数:60,代码来源:helpers.py
示例12: decode
def decode(file_name):
border.rotate(file_name)
image = Image.open("temp.png")
q = border.find("temp.png")
ind = sp.argmin(sp.sum(q, 1), 0)
up_left = q[ind, 0] + 2
up_top = q[ind, 1] + 2
d_right = q[ind+1, 0] - 3
d_bottom = q[ind-1, 1] - 3
box = (up_left, up_top, d_right, d_bottom)
region = image.crop(box)
h_sum = sp.sum(region, 0)
m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
s = sp.average(sp.diff(m))
m = int(round(d_right - up_left)/s)
if m % 3 != 0:
m += 3 - m % 3
n = int(round(d_bottom - up_top)/s)
if n % 4 != 0:
n += 4 - n % 4
s = int(round(s))+1
region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
region.save("0.png")
pix = region.load()
matrix = mix.off(rec.matrix(pix, s, m, n))
str2 = hamming.decode(array_to_str(matrix))
return hamming.bin_to_str(str2)
开发者ID:aroundnothing,项目名称:optar,代码行数:30,代码来源:picture.py
示例13: qrsDetect
def qrsDetect(self, qrslead=0):
"""Detect QRS onsets using modified PT algorithm
"""
# If ecg is a vector, it will be used for qrs detection.
# If it is a matrix, use qrslead (default 0)
if len(self.data.shape) == 1:
self.raw_ecg = self.data
else:
self.raw_ecg = self.data[:,qrslead]
self.filtered_ecg = self.bpfilter(self.raw_ecg)
self.diff_ecg = scipy.diff(self.filtered_ecg)
self.sq_ecg = abs(self.diff_ecg)
self.int_ecg = self.mw_integrate(self.sq_ecg)
# Construct buffers with last 8 values
self._initializeBuffers(self.int_ecg)
peaks = self.peakDetect(self.int_ecg)
self.checkPeaks(peaks, self.int_ecg)
# compensate for delay during integration
self.QRSpeaks = self.QRSpeaks - 40 * (self.samplingrate / 1000)
#print ("length of qrs peaks and ecg", len(self.QRSpeaks), len(self.raw_ecg))
#print(self.QRSpeaks)
return self.QRSpeaks
开发者ID:likeMyCode,项目名称:ECGDiagnose,代码行数:27,代码来源:qrsdetect.py
示例14: continuous_phase
def continuous_phase(phase, axis=0, center=False):
"""Add and subtract 2 pi such that the phase in the array is
as continuous as possible, along first or given axis. Optionally,
it also centers the phase data so that the average is smallest."""
phase = _n.array(phase, copy=0)
rowshape = list(phase.shape)
if len(rowshape) > 0:
rowshape[axis] = 1
slip = _n.concatenate([ _n.zeros(rowshape),
scipy.diff(phase, axis=axis) ],
axis=axis)
slip = _n.around(slip/(2*_n.pi))
cumslip = scipy.cumsum(slip, axis=axis)
phase = phase - 2*_n.pi*cumslip
else:
pass
if center:
offset = _n.around(scipy.average(phase, axis=axis)/(2*_n.pi))
offset = _n.reshape(offset, rowshape)
offset = _n.repeat(offset, cumslip.shape[axis], axis=axis)
phase = phase - 2*_n.pi*offset
return phase
开发者ID:chandranorth,项目名称:usadelRicatti,代码行数:29,代码来源:util.py
示例15: whittaker
def whittaker(inY,inL=15,inD=2):
"""
cette fonction permet de lisser le signal d'entrée en utilisant le filtre de Whittaker.
ref: Eilers, P.H.C. (2003) "A perfect smoother", Analytical Chemistry, 75, 3631 – 3636.
Entrée:
inY: le signal à lisser
inL: correspond au parmètre de lissage. Plus il est grand plus le lissage est élevé. par défaut à 15
comme dans l'article :
Geng, L.; Ma, M.; Wang, X.; Yu, W.; Jia, S.; Wang, H. Comparison of Eight Techniques
for Reconstructing Multi-Satellite Sensor Time-Series NDVI Data Sets in the Heihe River Basin, China.
Remote Sens. 2014, 6, 2024-2049.
inD: ordre des differences de pénalités
"""
m=sp.size(inY)
E=sp.eye(m)
D=sp.diff(E,inD)
Z=E+ (inL*sp.dot(D,sp.transpose(D)))
ws=sp.linalg.solve(Z,inY)
return ws
开发者ID:Xdarii,项目名称:QGIS_Traitement_and_Pheno,代码行数:26,代码来源:whittaker.py
示例16: add_boundaries
def add_boundaries(self):
r'''
This method uses ``clone_pores`` to clone the surface pores (labeled
'left','right', etc), then shifts them to the periphery of the domain,
and gives them the label 'right_face', 'left_face', etc.
'''
x,y,z = self['pore.coords'].T
Lc = sp.amax(sp.diff(x)) #this currently works but is very fragile
offset = {}
offset['front'] = offset['left'] = offset['bottom'] = [0,0,0]
offset['back'] = [x.max()+Lc/2,0,0]
offset['right'] = [0,y.max()+Lc/2,0]
offset['top'] = [0,0,z.max()+Lc/2]
scale = {}
scale['front'] = scale['back'] = [0,1,1]
scale['left'] = scale['right'] = [1,0,1]
scale['bottom'] = scale['top'] = [1,1,0]
for label in ['front','back','left','right','bottom','top']:
ps = self.pores(label)
self.clone_pores(pores=ps,apply_label=[label+'_boundary','boundary'])
#Translate cloned pores
ind = self.pores(label+'_boundary')
coords = self['pore.coords'][ind]
coords = coords*scale[label] + offset[label]
self['pore.coords'][ind] = coords
开发者ID:Maggie1988,项目名称:OpenPNM,代码行数:29,代码来源:__Cubic__.py
示例17: getStepWindow
def getStepWindow(t, v):
# return time and voltage vectors during the stimulus period only
# find the point of maximum voltage, and cut off everything afterwards
maxInd, maxV = max(enumerate(v), key=lambda x: x[1])
minInd, minV = min(enumerate(v), key=lambda x: x[1])
if maxV - v[0] > v[0] - minV:
# this is a positive step
t = t[:maxInd]
v = scipy.array(v[:maxInd])
else:
# this is a negative step, flip it for now
t = t[:minInd]
v = v[0] - scipy.array(v[:minInd])
# re-center time to start at the point of maximum voltage change
diffV = diff(v)
dVInd, maxDV = max(enumerate(diffV), key=lambda x: x[1])
dVInd -= 1
while diffV[dVInd] > 0:
dVInd -= 1
dVInd += 1
t -= t[dVInd]
v -= v[dVInd]
return t, v, dVInd
开发者ID:CosmoJG,项目名称:quantifying-morphology,代码行数:27,代码来源:peelLength.py
示例18: setUp
def setUp(self) :
# Read in just to fiugre out the band structure.
this_test_file = 'testdata/testfile_guppi_rotated.fits'
Reader = fitsGBT.Reader(this_test_file, feedback=0)
Blocks = Reader.read((0,),())
bands = ()
for Data in Blocks:
n_chan = Data.dims[3]
Data.calc_freq()
freq = Data.freq
delta = abs(sp.mean(sp.diff(freq)))
centre = freq[n_chan//2]
band = int(centre/1e6)
bands += (band,)
map = sp.zeros((n_chan, 15, 11))
map = algebra.make_vect(map, axis_names=('freq', 'ra', 'dec'))
map.set_axis_info('freq', centre, -delta)
map.set_axis_info('ra', 218, -0.2)
map.set_axis_info('dec', 2, 0.2)
algebra.save('./testout_clean_map_I_' + str(band) + '.npy', map)
self.params = {'sm_input_root' : 'testdata/',
'sm_file_middles' : ("testfile",),
'sm_input_end' : "_guppi_rotated.fits",
'sm_output_root' : "./testout_",
'sm_output_end' : "_sub.fits",
'sm_solve_for_gain' : True,
'sm_gain_output_end' : 'gain.pickle',
'sm_map_input_root' : './testout_',
'sm_map_type' : 'clean_map_',
'sm_map_polarizations' : ('I',),
'sm_map_bands' : bands
}
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:33,代码来源:test_subtract_map_data.py
示例19: dw
def dw(self):
"""Calculates the Durbin-Waston statistic
"""
de = diff(self.e,1)
dw = dot(de,de) / dot(self.e,self.e)
return dw
开发者ID:strategist922,项目名称:qikify,代码行数:7,代码来源:OLS.py
示例20: fitLength
def fitLength(t, v, startInd, startModel, vErr=0):
expInd = max(enumerate(diff(v)), key=lambda x:x[1])[0] + 1
startInd = max(expInd, startInd)
# only fit the exponential part of the traces
t, v = t[startInd:], v[startInd:]
# start with initial guess and fit parameters of model
startParams = [vErr] + [p for pair in startModel for p in pair]
try:
params, pCov = optimize.curve_fit(expSumParams, t, v, p0=startParams,
maxfev=500)
except RuntimeError as err:
if 'Number of calls to function has reached maxfev' in err.message:
print(err.message)
return [], float('Inf'), float('inf')
else:
raise
fitModel = [(tau, dV) for tau, dV in zip(params[1::2], params[2::2])]
vErr = params[0]
fitV = expSum(t, fitModel, vErr=vErr)
vResid = sqrt(sum((vn - fitVn)**2 for vn, fitVn in zip(v, fitV)) / len(fitV))
return fitModel, vErr, vResid
开发者ID:CosmoJG,项目名称:quantifying-morphology,代码行数:26,代码来源:peelLength.py
注:本文中的scipy.diff函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论