本文整理汇总了Python中numpy.diff函数的典型用法代码示例。如果您正苦于以下问题:Python diff函数的具体用法?Python diff怎么用?Python diff使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了diff函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: draw
def draw(self, event):
"""Draw the widget
Parameters
----------
event : instance of Event
The draw event.
"""
super(Console, self).draw(event)
if event is None:
raise RuntimeError('Event cannot be None')
xform = event.get_full_transform()
tr = (event.document_to_framebuffer *
event.framebuffer_to_render)
logical_scale = np.diff(tr.map(([0, 1], [1, 0])), axis=0)[0, :2]
tr = event.document_to_framebuffer
log_to_phy = np.mean(np.diff(tr.map(([0, 1], [1, 0])), axis=0)[0, :2])
n_pix = (self.font_size / 72.) * 92. # num of pixels tall
# The -2 here is because the char_height has a gap built in
font_scale = max(n_pix / float((self._char_height-2)), 1)
self._resize_buffers(font_scale)
self._do_pending_writes()
self._program['u_origin'] = xform.map((0, 0, 0, 1))
self._program.prepare()
self._program['u_logical_scale'] = font_scale * logical_scale
self._program['u_color'] = self.text_color.rgba
self._program['u_physical_scale'] = font_scale * log_to_phy
self._program['a_position'] = self._position
self._program['a_bytes_012'] = VertexBuffer(self._bytes_012)
self._program['a_bytes_345'] = VertexBuffer(self._bytes_345)
set_state(depth_test=False, blend=True,
blend_func=('src_alpha', 'one_minus_src_alpha'))
self._program.draw('points')
开发者ID:astrofrog,项目名称:vispy,代码行数:33,代码来源:console.py
示例2: getOmega
def getOmega(dels):
# for k in range(1,dels.delta_d.shape[0])
N = dels.delta_d.shape[1]
delta_t = dels.delta_t
delta_d = dels.delta_d
a_t = np.diff(delta_t)
a_t = a_t[:,0:-1]
a_d = np.diff(delta_t[:,::-1])
a_d = a_d[:,::-1]
a_d = a_d[:,1::]
b_t = np.diff(delta_d)
b_t = b_t[:,0:-1]
b_d = np.diff(delta_d[:,::-1])
b_d = b_d[:,::-1]
b_d = b_d[:,1::]
c_t = 0.25*(np.abs(a_t)+np.abs(b_t))*np.sign(a_t)*np.sign(b_t)*(np.sign(a_t)*np.sign(b_t)-1)
c_d = 0.25*(np.abs(a_d)+np.abs(b_d))*np.sign(a_d)*np.sign(b_d)*(np.sign(a_d)*np.sign(b_d)-1)
Omega = 1.0/(2*N)*(c_t.mean(axis=0) + c_d.mean(axis=0))
return Omega
开发者ID:oewhien,项目名称:Python-Collection,代码行数:25,代码来源:Huett_stoch_res_basics.py
示例3: compute_metric
def compute_metric(self):
gfloprate = 0
if self.ts.pmc_type == 'amd64' :
gfloprate += self.arc(self.ts.data[0])
if self.ts.pmc_type == 'intel_hsw':
# print "Haswell chips do not have FLOP counters"
return
if self.ts.pmc_type == 'intel_snb':
schema = self.ts.j.get_schema('intel_snb')
if 'ERROR' in schema: return
data = self.ts.j.aggregate_stats('intel_snb')
try:
flops = numpy.diff(data[0][:,schema['SSE_DOUBLE_SCALAR'].index] + 2*data[0][:,schema['SSE_DOUBLE_PACKED'].index] +
4*data[0][:,schema['SIMD_DOUBLE_256'].index])/numpy.diff(self.ts.t)
except:
flops = numpy.diff(data[0][:,schema['SSE_D_ALL'].index] + 4*data[0][:,schema['SIMD_D_256'].index])/numpy.diff(self.ts.t)
flops = flops/data[1]
self.metric = tmean(flops)/1.0e9
return
开发者ID:jgentle,项目名称:tacc_stats,代码行数:26,代码来源:lowflops.py
示例4: sample_line_segment_mm_s
def sample_line_segment_mm_s(start_xy_mm, end_xy_mm, dt_s, mW=None, max_mm=5.0):
""" Given a line segment in mm space, map it to galvo space.
To make the line straight in mm space, samples may be added to
more-closely approximate a straight line.
Returns: An array of shape nx3 (if mW is None) or nx4 (if mW is not None)
of points time deltas in mm and seconds,
excluding start_xy_mm and including end_xy_mm,
possibly including samples along the way.
"""
import FLP
from numpy.linalg import norm
dist_mm = norm(np.asarray(end_xy_mm) - start_xy_mm)
if dist_mm <= max_mm:
if mW is None:
return np.array((tuple(end_xy_mm) + (dt_s,),)) # Just the end sample.
else:
return np.array((tuple(end_xy_mm) + (dt_s, mW),)) # Just the end sample.
samples_s = np.linspace(0, dt_s, np.ceil(dist_mm / max_mm) + 1)
timeRange_s = (0, dt_s)
if mW is None:
return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
np.diff(samples_s)])
else:
return np.transpose([np.interp(samples_s[1:], timeRange_s, (start_xy_mm[0], end_xy_mm[0])),
np.interp(samples_s[1:], timeRange_s, (start_xy_mm[1], end_xy_mm[1])),
np.diff(samples_s),
mW * np.ones_like(samples_s[1:])])
开发者ID:cooptechnodent,项目名称:OpenFL,代码行数:28,代码来源:Printer.py
示例5: test_mean_std_12bit
def test_mean_std_12bit(self):
# Input 12-bit, with an 8-bit color target
input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
color_target = np.tile(np.arange(256)[:, None, None], (1, 1, 3))
luts = hm.mean_std_luts(input_scene.astype(np.uint16),
color_target.astype(np.uint8))
np.testing.assert_array_equal(luts[0], luts[1])
np.testing.assert_array_equal(luts[1], luts[2])
lut = luts[0]
assert np.all(lut[:8] == 0)
assert np.all(lut[-8:] == 4096)
assert np.diff(lut[8:-8]).min() == 1
assert np.diff(lut[8:-8]).max() == 2
# Input 12-bit, with a 12-bit color target
input_scene = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
color_target = np.tile(np.arange(4096)[:, None, None], (1, 1, 3))
luts = hm.mean_std_luts(input_scene.astype(np.uint16),
color_target.astype(np.uint16))
# Should be a 1 to 1 look-up-table...
np.testing.assert_array_equal(luts[0], np.arange(4097))
开发者ID:huleg,项目名称:color_balance,代码行数:26,代码来源:histogram_match_tests.py
示例6: __init__
def __init__(self,turn,elem,single,name,s,x,xp,y,yp,pc,de,tau,**args):
apc=float(pc[0])*1e9
ade=float(de[0])
self.m0=self.pmass
en=np.sqrt(apc**2+self.pmass**2)
self.e0=en-ade
self.p0c=np.sqrt(self.e0**2-self.m0**2)
# structure
self.elem=np.array(elem,dtype=int)
self.turn=np.array(turn,dtype=int)
d0=np.where(np.diff(self.elem)!=0)[0][0]+1
d1=(np.where(np.diff(self.turn)!=0)[0][0]+1)/d0
d2=len(self.turn)/d1/d0
self.single=np.array(single,dtype=int)
self.name=np.array(name,dtype=str)
self.s =np.array(s ,dtype=float)
self.x =np.array(x ,dtype=float)
self.y =np.array(y ,dtype=float)
self.tau=-np.array(tau,dtype=float)*self.clight
opd=np.array(pc,dtype=float)*(1e9/self.p0c)
self.delta=opd-1
self.pt=np.array(de,dtype=float)/self.p0c
self.px=np.array(xp,dtype=float)*opd
self.py=np.array(yp,dtype=float)*opd
for nn,vv in self.__dict__.items():
if hasattr(vv,'__len__') and len(vv)==d0*d1*d2:
setattr(self,nn,vv.reshape(d2,d1,d0))
开发者ID:vrosnet,项目名称:SixTrackLib,代码行数:27,代码来源:sixdump.py
示例7: get_resampling_matrix
def get_resampling_matrix(global_grid,local_grid):
"""Build the rectangular matrix that linearly resamples from the global grid to a local grid.
The local grid range must be contained within the global grid range.
Args:
global_grid(numpy.ndarray): Sorted array of n global grid wavelengths.
local_grid(numpy.ndarray): Sorted array of m local grid wavelengths.
Returns:
numpy.ndarray: Array of (m,n) matrix elements that perform the linear resampling.
"""
assert np.all(np.diff(global_grid) > 0),'Global grid is not strictly increasing.'
assert np.all(np.diff(local_grid) > 0),'Local grid is not strictly increasing.'
# Locate each local wavelength in the global grid.
global_index = np.searchsorted(global_grid,local_grid)
assert local_grid[0] >= global_grid[0],'Local grid extends below global grid.'
assert local_grid[-1] <= global_grid[-1],'Local grid extends above global grid.'
# Lookup the global-grid bracketing interval (xlo,xhi) for each local grid point.
# Note that this gives xlo = global_grid[-1] if local_grid[0] == global_grid[0]
# but this is fine since the coefficient of xlo will be zero.
global_xhi = global_grid[global_index]
global_xlo = global_grid[global_index-1]
# Create the rectangular interpolation matrix to return.
alpha = (local_grid - global_xlo)/(global_xhi - global_xlo)
local_index = np.arange(len(local_grid),dtype=int)
matrix = np.zeros((len(local_grid),len(global_grid)))
matrix[local_index,global_index] = alpha
matrix[local_index,global_index-1] = 1 - alpha
return matrix
开发者ID:desihub,项目名称:desispec,代码行数:30,代码来源:coaddition.py
示例8: create_grid_polygons
def create_grid_polygons(x,y):
"""
Creates a list of grid polygons (rectangles) in well-known text (WKT) format from evenly spaced x and y vectors.
Args:
x (1d numpy array): vector of x-values
y (1d numpy array): vector of y-values
Returns:
list: grid polygons in WKT format
"""
import numpy as np
import pdb
xdiff = np.diff(x)
if np.std(xdiff)>1e-10:
raise ValueError('Uneven longitude spacing.')
dx = np.mean(xdiff)
ydiff = np.diff(y)
if np.std(ydiff)>1e-10:
raise ValueError('Uneven latitude spacing.')
dy = np.mean(ydiff)
logger.debug('Spacing is ({},{})'.format(dx,dy))
xmatr,ymatr = np.meshgrid(x,y)
rows = []
for (i,j),x_ij in np.ndenumerate(xmatr):
y_ij = ymatr[i,j]
x1,y1 = x_ij-dx/2.,y_ij-dy/2.
x2,y2 = x_ij+dx/2.,y_ij+dy/2.
rows.append((i,j,x_ij,y_ij,'POLYGON(({x1} {y1},{x1} {y2},{x2} {y2},{x2} {y1},{x1} {y1}))'.format(x1=x1,y1=y1,x2=x2,y2=y2)))
return rows
开发者ID:joelgoop,项目名称:weather-data-processing,代码行数:35,代码来源:calculations.py
示例9: test_power
def test_power():
a = 5. # shape
samples = 10000
s1 = np.random.power(a, samples)
s2 = common.rand_pow_array(a, samples)
plt.figure('power test')
count1, bins1, ignored1 = plt.hist(s1,
bins=30,
label='numpy',
histtype='step')
x = np.linspace(0, 1, 100)
y = a * x**(a - 1.0)
normed_y1 = samples * np.diff(bins1)[0] * y
plt.plot(x, normed_y1, label='numpy.random.power fit')
count2, bins2, ignored2 = plt.hist(s2,
bins=30,
label='joinmarket',
histtype='step')
normed_y2 = samples * np.diff(bins2)[0] * y
plt.plot(x, normed_y2, label='common.rand_pow_array fit')
plt.title('testing power distribution')
plt.legend(loc='upper left')
plt.show()
开发者ID:AdamISZ,项目名称:joinmarket,代码行数:25,代码来源:randomfunc-test.py
示例10: xCoordinates
def xCoordinates(sobel_img):
num_rows = float(len(sobel_img)) # get number of x values
# sum along y axis
vert_sum = np.sum(sobel_img,axis=0)
# make it an average value (divide by # of x values)
vert_sum = np.divide(vert_sum,num_rows)
x = np.arange(0,len(vert_sum)) # for graphing
xnew = np.arange(0,len(vert_sum),50) # for smoothing
#smooth
y_smooth = spline(x, vert_sum, xnew)
#make a sin curve 1/3 of the width of image
img_width, img_height = sobel_img.shape
z = np.arange(0,int(img_width/3),1)
def f(x):
return np.sin(x/90)*-15 + 25
f = [f(i) for i in z] # make sine into an array
# convolve sine and the vertical sum
y_conv = np.convolve(vert_sum, f,'same')
# detect local minima
mins = (np.diff(np.sign(np.diff(y_conv))) > 0).nonzero()[0] + 1
return mins
开发者ID:saraoswald,项目名称:Tree-Re-ID,代码行数:28,代码来源:detect.py
示例11: yCoordinates
def yCoordinates(sobel_img):
num_col = float(len(sobel_img[0])) #number of y values
# sum along x axis
horiz_sum = np.sum(sobel_img, axis=1)
#average value
horiz_sum = np.divide(horiz_sum, num_col)
y = np.arange(0, len(horiz_sum))
ynew = np.arange(0, len(horiz_sum))
x_smooth = spline(y, horiz_sum, ynew)
#make a sin curve 1/3 of the height
img_width, img_height = sobel_img.shape
z = np.arange(0,int(img_height/3),1)
def f(x):
return np.sin(x/90)*-15 + 25
f = [f(i) for i in z] # make sine into an array
# convolve sine and the vertical sum
y_conv = np.convolve(horiz_sum, f,'same')
# detect local minima
mins = (np.diff(np.sign(np.diff(y_conv))) > 0).nonzero()[0] + 1
return mins
开发者ID:saraoswald,项目名称:Tree-Re-ID,代码行数:26,代码来源:detect.py
示例12: check_obs_scheme
def check_obs_scheme(self):
" Checks the internal validity of provided observation schemes "
# check sub_pops
idx_union = np.sort(self._sub_pops[0])
i = 1
while idx_union.size < self._p and i < len(self._sub_pops):
idx_union = np.union1d(idx_union, self._sub_pops[i])
i += 1
if idx_union.size != self._p or np.any(idx_union!=np.arange(self._p)):
raise Exception(('all subpopulations together have to cover '
'exactly all included observed varibles y_i in y.'
'This is not the case. Change the difinition of '
'subpopulations in variable sub_pops or reduce '
'the number of observed variables p. '
'The union of indices of all subpopulations is'),
idx_union )
# check obs_time
if not self._obs_time[-1]==self._T:
raise Exception(('Entries of obs_time give the respective ends of '
'the periods of observation for any '
'subpopulation. Hence the last entry of obs_time '
'has to be the full recording length. The last '
'entry of obs_time before is '), self._obs_time[-1])
if np.any(np.diff(self._obs_time)<1):
raise Exception(('lengths of observation have to be at least 1. '
'Minimal observation time for a subpopulation: '),
np.min(np.diff(self._obs_time)))
# check obs_pops
if not self._obs_time.size == self._obs_pops.size:
raise Exception(('each entry of obs_pops gives the index of the '
'subpopulation observed up to the respective '
'time given in obs_time. Thus the sizes of the '
'two arrays have to match. They do not. '
'no. of subpop. switch points and no. of '
'subpopulations ovserved up to switch points '
'are '), (self._obs_time.size, self._obs_pops.size))
idx_pops = np.sort(np.unique(self._obs_pops))
if not np.min(idx_pops)==0:
raise Exception(('first subpopulation has to have index 0, but '
'is given the index '), np.min(idx_pops))
elif not idx_pops.size == len(self._sub_pops):
raise Exception(('number of specified subpopulations in variable '
'sub_pops does not meet the number of '
'subpopulations indexed in variable obs_pops. '
'Delete subpopulations that are never observed, '
'or change the observed subpopulations in '
'variable obs_pops accordingly. The number of '
'indexed subpopulations is '),
len(self._sub_pops))
elif not np.all(np.diff(idx_pops)==1):
raise Exception(('subpopulation indices have to be consecutive '
'integers from 0 to the total number of '
'subpopulations. This is not the case. '
'Given subpopulation indices are '),
idx_pops)
开发者ID:mackelab,项目名称:pyLDS_dev,代码行数:60,代码来源:obs_scheme.py
示例13: _buildInterp
def _buildInterp(self, x, y, z, pot):
""" Private function to build interpolation arrays using potential
array `pot`. Assumes that only the positive part of z is in the array,
so reflects the array in the (x, y) plane.
"""
self.xmin = x[0]
self.xmax = x[-1]
self.ymin = y[0]
self.ymax = y[-1]
self.zmin = -z[-1]
self.zmax = z[-1]
# Field in negative z direction. Reverse the order in this axis.
potNeg = pot[...,-1:0:-1]
# Concatenate positive and negative z direction arrays.
_z = np.hstack((-z[-1:0:-1], z))
_pot = np.dstack((potNeg, pot))
self.bInterpolator = Interpolator((x, y, _z), _pot)
# Build difference derivative arrays
self.dx = x[1]-x[0]
self.dy = y[1]-y[0]
self.dz = z[1]-z[0]
dbdx = np.diff(_pot, axis=0)/self.dx
dbdy = np.diff(_pot, axis=1)/self.dy
dbdz = np.diff(_pot, axis=2)/self.dz
x_dbdx = x[:-1]+self.dx/2
y_dbdy = y[:-1]+self.dy/2
z_dbdz = _z[:-1]+self.dz/2
self.dBdxInterp = Interpolator((x_dbdx, y, _z), dbdx)
self.dBdyInterp = Interpolator((x, y_dbdy, _z), dbdy)
self.dBdzInterp = Interpolator((x, y, z_dbdz), dbdz)
开发者ID:softleygroup,项目名称:zflyer,代码行数:34,代码来源:hexapole.py
示例14: get_blotter_pnl
def get_blotter_pnl(order_qty, filled_qty, filled_price, cum_position, data, drawdown):
#import pdb; pdb.set_trace()
mid = midpoint(data)
cash = np.sum(filled_qty * filled_price) * (-1.0)
open_cash = cum_position[-1] * mid[-1]
pnl = cash + open_cash
pnl_t = np.cumsum(cum_position[:-1] * np.diff(mid))
spread = np.cumsum((mid - filled_price) * filled_qty)
pnl_t = spread[1:] + pnl_t
assert abs(pnl - pnl_t[-1]) < 0.01
running_max = np.maximum.accumulate(pnl_t)
idx = np.where(pnl_t - running_max < drawdown)[0]
if len(idx) > 0:
stop_idx = np.min(idx)
cum_position[(stop_idx+1):] = 0.0
pnl_t = np.cumsum(cum_position[:-1] * np.diff(mid))
order_qty[(stop_idx+1):] = 0.0
filled_qty[(stop_idx+1):] = 0.0
spread = np.cumsum((mid - filled_price) * filled_qty)
pnl_t = spread[1:] + pnl_t
order_volume = np.sum(np.abs(order_qty))
trade_volume = np.sum(np.abs(filled_qty))
result = np.array([(pnl_t[-1], np.min(pnl_t), np.max(pnl_t),
np.min(cum_position), np.max(cum_position), trade_volume,
order_volume, trade_volume * 1.0 / order_volume)],
dtype = [('total_pnl', 'f'), ('min_pnl', 'f'),
('max_pnl', 'f'), ('min_position', int),
('max_position', int), ('volume', int),
('order_volume', int), ('fill_ratio', float)])
return result
开发者ID:anlovescat,项目名称:scratch_space0,代码行数:34,代码来源:hft_strat_lib.py
示例15: zero_crossings
def zero_crossings(y_axis, window = 11):
"""
Algorithm to find zero crossings. Smoothens the curve and finds the
zero-crossings by looking for a sign change.
keyword arguments:
y_axis -- A list containg the signal over which to find zero-crossings
window -- the dimension of the smoothing window; should be an odd integer
(default: 11)
return -- the index for each zero-crossing
"""
# smooth the curve
length = len(y_axis)
x_axis = np.asarray(range(length), int)
# discard tail of smoothed signal
y_axis = _smooth(y_axis, window)[:length]
zero_crossings = np.where(np.diff(np.sign(y_axis)))[0]
indices = [x_axis[index] for index in zero_crossings]
# check if zero-crossings are valid
diff = np.diff(indices)
if diff.std() / diff.mean() > 0.2:
print diff.std() / diff.mean()
print np.diff(indices)
raise(ValueError,
"False zero-crossings found, indicates problem {0} or {1}".format(
"with smoothing window", "problem with offset"))
# check if any zero crossings were found
if len(zero_crossings) < 1:
raise(ValueError, "No zero crossings found")
return indices
开发者ID:MonsieurV,项目名称:py-findpeaks,代码行数:35,代码来源:peakdetect.py
示例16: _compute_divided_differences
def _compute_divided_differences(xvals, fvals, N=None, full=True, forward=True):
"""Return a matrix of divided differences for the xvals, fvals pairs
DD[i, j] = f[x_{i-j}, ..., x_i] for 0 <= j <= i
If full is False, just return the main diagonal(or last row):
f[a], f[a, b] and f[a, b, c].
If forward is False, return f[c], f[b, c], f[a, b, c]."""
if full:
if forward:
xvals = np.asarray(xvals)
else:
xvals = np.array(xvals)[::-1]
M = len(xvals)
N = M if N is None else min(N, M)
DD = np.zeros([M, N])
DD[:, 0] = fvals[:]
for i in range(1, N):
DD[i:, i] = np.diff(DD[i - 1:, i - 1]) / (xvals[i:] - xvals[:M - i])
return DD
xvals = np.asarray(xvals)
dd = np.array(fvals)
row = np.array(fvals)
idx2Use = (0 if forward else -1)
dd[0] = fvals[idx2Use]
for i in range(1, len(xvals)):
denom = xvals[i:i + len(row) - 1] - xvals[:len(row) - 1]
row = np.diff(row)[:] / denom
dd[i] = row[idx2Use]
return dd
开发者ID:ElDeveloper,项目名称:scipy,代码行数:31,代码来源:zeros.py
示例17: _get_ladder_adjustment
def _get_ladder_adjustment(self, time, betas0, ratios):
"""
Execute temperature adjustment according to dynamics outlined in
`arXiv:1501.05823 <http://arxiv.org/abs/1501.05823>`_.
"""
# Some sanity checks on the ladder...
assert np.all(np.diff(betas0) < 1), \
'Temperatures should be in ascending order.'
assert betas0[0] == 1, \
'Bottom temperature should be 1.'
betas = betas0.copy()
# Modulate temperature adjustments with a hyperbolic decay.
decay = self.adaptation_lag / (time + self.adaptation_lag)
kappa = decay / self.adaptation_time
# Construct temperature adjustments.
dSs = kappa * (ratios[:-1] - ratios[1:])
# Compute new ladder (hottest and coldest chains don't move).
deltaTs = np.diff(1 / betas[:-1])
deltaTs *= np.exp(dSs)
betas[1:-1] = 1 / (np.cumsum(deltaTs) + 1 / betas[0])
assert np.all(np.diff(betas) < 0), \
'Temperatures not correctly ordered following temperature dynamics: {:}'.format(betas)
# Don't mutate the ladder here; let the client code do that.
return betas - betas0
开发者ID:farr,项目名称:emcee,代码行数:32,代码来源:ptsampler.py
示例18: test_volatility
def test_volatility():
# Test volatility
G = np.zeros([3, 3, 3])
G[0, 1, [0, 1, 2]] = 1
G[0, 2, 1] = 1
G[1, 2, 2] = 1
G = G + G.transpose([1, 0, 2])
# global volatility
v_global = teneto.networkmeasures.volatility(G)
# v volatility per time point
v_time = teneto.networkmeasures.volatility(G, calc='time')
v_tr = np.array([2/6, 4/6])
if not v_global == np.mean(v_tr):
raise AssertionError()
if not all(v_time == v_tr):
raise AssertionError()
# event displacement
v_er_tr = np.array([0, 2/6, 2/6])
v_er = teneto.networkmeasures.volatility(
G, calc='event_displacement', event_displacement=0)
if not all(v_er == v_er_tr):
raise AssertionError()
# vol per node
v_edge = teneto.networkmeasures.volatility(G, calc='edge')
if not np.all(v_edge == np.mean(np.abs(np.diff(G)), axis=-1)):
raise AssertionError()
v_node = teneto.networkmeasures.volatility(G, calc='node')
if not np.all(v_node == np.mean(
np.mean(np.abs(np.diff(G)), axis=-1), axis=-1)):
raise AssertionError()
开发者ID:wiheto,项目名称:teneto,代码行数:30,代码来源:test_volatility.py
示例19: findspikes
def findspikes(t,x,dxdt):
"""Searching for spikes in a TODL LTC2442 data series. The algorithm
searches for the given threshold. If it is found and the subsequent
data exceeds the threshold as well with a negative sign its defined as
a spike
Args:
t: time
x: data
dxdt: Threshold for rejection, a working rejection for FP07 is 0.1 [V/s]
"""
#print('Despiking')
dt = np.diff(t)
dx = np.diff(x)
spikes = np.zeros(np.shape(t))
for i in range(1,len(dt)-1):
dxdt1 = dx[i]/dt[i]
dxdt2 = dx[i+1]/dt[i+1]
if(abs(dxdt1) > dxdt):
if(abs(dxdt1) > dxdt):
if(np.sign(dxdt1) == -np.sign(dxdt2)):
spikes[i+1] = 1
#print('Done despiking')
return spikes
开发者ID:MarineDataTools,项目名称:pymqdatastream,代码行数:26,代码来源:todl_data_processing.py
示例20: plot_thist
def plot_thist(self,ax,index,xscale=1.0,yscale=1.0,xlabel='',ylabel='',
do_rate=False):
d=[]
for k in self.ts.j.hosts.keys():
v=self.ts.assemble(index,k,0)
if do_rate:
d.append(numpy.divide(numpy.diff(v),numpy.diff(self.ts.t)))
else:
d.append((v[:-1]+v[1:])/2.0)
a=numpy.array(d)
h=[]
mn=numpy.min(a)
mn=min(0.,mn)
mx=numpy.max(a)
n=float(len(self.ts.j.hosts.keys()))
for i in range(len(self.ts.t)-1):
hist=numpy.histogram(a[:,i],30,(mn,mx))
h.append(hist[0])
h2=numpy.transpose(numpy.array(h))
ax.pcolor(self.ts.t/xscale,hist[1]/yscale,h2,
edgecolors='none',rasterized=True,cmap='spectral')
self.setlabels(ax,self.ts,index,xlabel,ylabel,yscale)
ax.autoscale(tight=True)
开发者ID:vishnu2,项目名称:tacc_stats,代码行数:26,代码来源:plots.py
注:本文中的numpy.diff函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论