本文整理汇总了Python中numpy.around函数的典型用法代码示例。如果您正苦于以下问题:Python around函数的具体用法?Python around怎么用?Python around使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了around函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plot_density
def plot_density(count_trips,count,title):
grid = np.zeros((config.bins,config.bins))
for (i,j),z in np.ndenumerate(grid):
try:
grid[j,i] = float(count[(i,j)]) / float(count_trips[(i,j)])
except:
grid[j,i] = 0
#print "----"
#print grid[i,j], i, j
#print count[(i,j)]
#print count_trips[(i,j)]
grid = np.flipud(grid) #to counter matshow vertical flip
fig, ax = plt.subplots(figsize=(10, 10))
ax.matshow(grid, cmap='spectral')
ax.xaxis.set_ticks_position('bottom')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
xticks = np.linspace(config.minlong,config.maxlong,num=round(config.bins/2))
yticks = np.linspace(config.minlat,config.maxlat,num=round(config.bins/2))
yticks = yticks[::-1]
xticks = np.around(xticks,decimals=1)
yticks = np.around(yticks,decimals=1)
xspace = np.linspace(0,config.bins-1,config.bins/2)
yspace = np.linspace(0,config.bins-1,config.bins/2)
plt.xticks(xspace,xticks)
plt.yticks(yspace,yticks)
for (i,j),z in np.ndenumerate(grid):
ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center')
plt.title(title)
plt.show()
开发者ID:od0,项目名称:HW2,代码行数:30,代码来源:A.py
示例2: test_logvecadd
def test_logvecadd(self):
vec1 = log(array([1, 2, 3, 4]))
vec2 = log(array([5, 6, 7, 8]))
sumvec = array([1.79175947, 2.07944154, 2.30258509, 2.48490665])
self.assertTrue(
array_equal(around(MarkovModel._logvecadd(vec1, vec2), decimals=3), around(sumvec, decimals=3)))
开发者ID:BioGeek,项目名称:biopython,代码行数:7,代码来源:test_MarkovModel.py
示例3: _get_initial_classes
def _get_initial_classes(self):
images = map(lambda f: cv2.imread(path.join(self._root, f)), self._files)
self._avg_pixels = np.array([], dtype=np.uint8)
# extract parts from each image for all of our 6 categories
for i in range(0, self._n_objects):
rects = self._rects[:, i]
# compute maximum rectangle
rows = np.max(rects['f2'] - rects['f0'])
cols = np.max(rects['f3'] - rects['f1'])
# extract annotated rectangles
im_rects = map(lambda (im, r): im[r[0]:r[2],r[1]:r[3],:], zip(images, rects))
# resize all rectangles to the max size & average all the rectangles
im_rects = np.array(map(lambda im: cv2.resize(im, (cols, rows)), im_rects), dtype=np.float)
avgs = np.around(np.average(im_rects, axis = 0))
# average the resulting rectangle to compute
mn = np.around(np.array(cv2.mean(avgs), dtype='float'))[:-1].astype('uint8')
if(self._avg_pixels.size == 0):
self._avg_pixels = mn
else:
self._avg_pixels = np.vstack((self._avg_pixels, mn))
开发者ID:fierval,项目名称:retina,代码行数:26,代码来源:regions_detect_knn.py
示例4: test_make_tone_irregular
def test_make_tone_irregular():
fq = 15066
db = 82
fs = 200101
dur = 0.7
risefall = 0.0015
calv = 0.888
caldb = 99
npts = int(fs*dur)
tone, timevals = tools.make_tone(fq, db, dur, risefall, fs, caldb, calv)
print 'lens', npts, len(tone), len(timevals)
assert len(tone) == npts
assert len(timevals) == npts
spectrum = np.fft.rfft(tone)
peak_idx = (abs(spectrum - max(spectrum))).argmin()
freq_idx = np.around(fq*(float(npts)/fs))
assert peak_idx == freq_idx
print 'intensities', (20 * np.log10(tools.signal_amplitude(tone, fs)/calv)) + caldb, db
assert np.around((20 * np.log10(tools.signal_amplitude(tone, fs)/calv)) + caldb, 1) == db
print 'durs', np.around(timevals[-1], 5), dur - (1./fs)
assert dur - 2*(1./fs) < timevals[-1] <= dur - (1./fs)
开发者ID:boylea,项目名称:sparkle,代码行数:26,代码来源:test_audiotools.py
示例5: test_metrics_correctness_with_iterator
def test_metrics_correctness_with_iterator(self):
layers = [
keras.layers.Dense(8, activation='relu', input_dim=4,
kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(layers, (4,))
model.compile(
loss='binary_crossentropy',
metrics=['accuracy', metrics_module.BinaryAccuracy()],
optimizer='rmsprop',
run_eagerly=testing_utils.should_run_eagerly())
np.random.seed(123)
x = np.random.randint(10, size=(100, 4)).astype(np.float32)
y = np.random.randint(2, size=(100, 1)).astype(np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
outs = model.evaluate(iterator, steps=10)
self.assertEqual(np.around(outs[1], decimals=1), 0.5)
self.assertEqual(np.around(outs[2], decimals=1), 0.5)
y = np.zeros((100, 1), dtype=np.float32)
dataset = dataset_ops.Dataset.from_tensor_slices((x, y))
dataset = dataset.repeat(100)
dataset = dataset.batch(10)
iterator = dataset_ops.make_one_shot_iterator(dataset)
outs = model.evaluate(iterator, steps=10)
self.assertEqual(outs[1], 0.)
self.assertEqual(outs[2], 0.)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:training_dataset_test.py
示例6: save2mat
def save2mat(self, i):
global samples0, samples1;
std0 = np.around(np.std(samples0), 5);
std1 = np.around(np.std(samples1), 5);
print 'std0:', std0, 'std1', std1;
scipy.io.savemat('./data/'+folder_name+'/'+filename+'_'+str(i)+'.mat', mdict={'s0':samples0, 's1':samples1, 'timestamp': self.timestamp, 'fs':self.sdr.sample_rate, 'ref_addr':ref_addr});
开发者ID:TuringTW,项目名称:SoftwareRadar,代码行数:7,代码来源:go_mat.py
示例7: test_circmean_against_scipy
def test_circmean_against_scipy():
# testing against scipy.stats.circmean function
# the data is the same as the test before, but in radians
data = np.array([0.89011792, 1.1693706, 0.6981317, 1.90240888, 0.54105207,
6.24827872])
answer = scipy.stats.circmean(data)
assert_equal(np.around(answer, 2), np.around(circmean(data), 2))
开发者ID:bernie-simon,项目名称:astropy,代码行数:7,代码来源:test_circstats.py
示例8: receivers_setup
def receivers_setup(self, pr, pz, Tiempo):
self.receiver_r = np.int32(np.around(np.array(pr)/self.dr))
self.receiver_z = np.int32(np.around(np.array(pz)/self.dz))
self.N_z = np.size(self.receiver_z,0)
self.receivers_signals = np.zeros((Tiempo,self.N_z), dtype=np.float32)
开发者ID:cageo,项目名称:Iturraran-Viveros-2013,代码行数:7,代码来源:FD25D_CL.py
示例9: test_obtain_shape_from_bb
def test_obtain_shape_from_bb():
s = sdm2.obtain_shape_from_bb(np.array([[26, 49], [350, 400]]))
assert ((np.around(s.points) == np.around(initial_shape[3].points)).
all())
assert (s.n_dims == 2)
assert (s.n_landmark_groups == 0)
assert (s.n_points == 68)
开发者ID:yymath,项目名称:menpo,代码行数:7,代码来源:sdm_test.py
示例10: us_grid
def us_grid(resolution=.5, sparse=True):
resolution = .5
bounds = USA.bounds
# Grid boundaries are determined by nearest degree.
min_long = np.floor(bounds[0])
min_lat = np.floor(bounds[1])
max_long = np.ceil(bounds[2])
max_lat = np.ceil(bounds[3])
# Division should be close to an integer.
# Add one to number of points to include the end
# This is robust only to resolutions that "evenly" divide the range.
nPointsLong = np.around((max_long - min_long) / resolution) + 1
nPointsLat = np.around((max_lat - min_lat ) / resolution) + 1
long_points = np.linspace(min_long, max_long, nPointsLong)
lat_points = np.linspace(min_lat, max_lat, nPointsLat )
outline = contiguous_outline2('../tiger/cb_2013_us_nation_20m.shp')
for i, (xi, yi) in enumerate(product(range(len(long_points)-1),
range(len(lat_points)-1))):
cell = box(long_points[xi], lat_points[yi],
long_points[xi+1], lat_points[yi+1])
if sparse:
# Add cell only if it intersects contiguous USA
if cell.intersects(outline):
yield cell
else:
yield cell
开发者ID:chebee7i,项目名称:twitter,代码行数:28,代码来源:usoutline.py
示例11: test_pmf_accuracy
def test_pmf_accuracy():
"""Compare accuracy of the probability mass function.
Compare the results with the accuracy check proposed in [Hong2013]_,
equation (15).
"""
[p1, p2, p3] = np.around(np.random.random_sample(size=3), decimals=2)
[n1, n2, n3] = np.random.random_integers(1, 10, size=3)
nn = n1 + n2 + n3
l1 = [p1 for i in range(n1)]
l2 = [p2 for i in range(n2)]
l3 = [p3 for i in range(n3)]
p = l1 + l2 + l3
b1 = binom(n=n1, p=p1)
b2 = binom(n=n2, p=p2)
b3 = binom(n=n3, p=p3)
k = np.random.randint(0, nn + 1)
chi_bn = 0
for j in range(0, k+1):
for i in range(0, j+1):
chi_bn += b1.pmf(i) * b2.pmf(j - i) * b3.pmf(k - j)
pb = PoiBin(p)
chi_pb = pb.pmf(k)
assert np.all(np.around(chi_bn, decimals=10) == np.around(chi_pb,
decimals=10))
开发者ID:tsakim,项目名称:poibin,代码行数:25,代码来源:test_poibin.py
示例12: _symmetrical_uncertainty
def _symmetrical_uncertainty(X, Y):
"""Symmetrical uncertainty, Press et al., 1988."""
from Orange.preprocess._relieff import contingency_table
X, Y = np.around(X), np.around(Y)
cont = contingency_table(X, Y)
ig = InfoGain().from_contingency(cont, 1)
return 2 * ig / (_entropy(cont.sum(0)) + _entropy(cont.sum(1)))
开发者ID:675801717,项目名称:orange3,代码行数:7,代码来源:score.py
示例13: _plotRound
def _plotRound(self, values):
"""
A function round an array-like object while maintaining the
amount of entries. This is needed for the isolines since we
want the labels to look pretty (=rounding), but we do not
know the spacing of the lines. A fixed number of digits after
rounding might lead to reduced array size.
"""
inVal = numpy.unique(numpy.sort(numpy.array(values)))
output = inVal[1:] * 0.0
digits = -1
limit = 10
lim = inVal * 0.0 + 10
# remove less from the numbers until same length,
# more than 10 significant digits does not really
# make sense, does it?
while len(inVal) > len(output) and digits < limit:
digits += 1
val = ( numpy.around(numpy.log10(numpy.abs(inVal))) * -1) + digits + 1
val = numpy.where(val < lim, val, lim)
val = numpy.where(val >-lim, val, -lim)
output = numpy.zeros(inVal.shape)
for i in range(len(inVal)):
output[i] = numpy.around(inVal[i],decimals=int(val[i]))
output = numpy.unique(output)
return output
开发者ID:TimHarvey2,项目名称:CoolProp,代码行数:26,代码来源:Plots.py
示例14: __init__
def __init__(self, parser, k, startIndex=-1, parallel = True, batch=True):
if startIndex==-1:
startIndex = k
self.Data = parser
self.names = self.Data.getNames()
self.k = k
self.clusters = KMeans(k, n_jobs=1 - 2*(not parallel),n_init=10)
self.props = self.Data.getProperties()
self.artefacts = np.atleast_2d(self.Data.getList(self.props[0]))
for attr in self.Data.getProperties()[1:]:
self.artefacts = np.append(self.artefacts,np.atleast_2d(self.Data.getList(attr)),axis=0)
self.artefacts = self.artefacts.T
self.times = self.Data.getList()
zipped = zip(self.times,self.artefacts,self.names)
zipped = sorted(zipped,key=lambda x: x[0])
unzipped = zip(*zipped)
self.times = list(unzipped[0])
self.artefacts = np.array(unzipped[1])
self.names = list(unzipped[2])
if batch:
self.trainAll()
self.currentIndex = len(self.names)-1
else:
self.currentIndex = startIndex
self.noveltyList = np.zeros(len(self.artefacts))
while self.currentIndex+1 < len(self.names) and self.times[self.currentIndex+1]==self.times[self.currentIndex]:
self.currentIndex +=1
while self.currentIndex < len(self.names):
self.train()
newArtefacts = [self.currentIndex+1]
while newArtefacts[-1]+1 < len(self.names) and self.times[newArtefacts[-1]+1]==self.times[newArtefacts[0]]:
newArtefacts.append(newArtefacts[-1]+1)
novelties = []
for i,a in enumerate(self.names[newArtefacts[0]:newArtefacts[-1]+1]):
dist,cluster = self.novelty(a,normedDistance=False)
time=self.times[self.names.index(a)]
novelties.append((dist/self.sizes[cluster],cluster,time,a))
self.noveltyList[self.currentIndex+i] = novelties[-1][0]
novelties = sorted(novelties,key=lambda x: x[0])
scales = {}
translates = {}
for k in self.Data.pastCalc.keys():
if k in self.props:
scales[k] = self.Data.pastCalc[k]['std']
translates[k] = self.Data.pastCalc[k]['avg']
for n in novelties[::-1]:
cent = np.copy(self.centroids[n[1]])
art = np.copy(self.artefacts[self.names.index(n[3])])
c = self.clusters.predict(art)[0]
for i,v in enumerate(self.props):
cent[i] = np.around(cent[i] * scales[v] + translates[v],decimals=1)
art[i] = np.around(art[i] * scales[v] + translates[v],decimals=1)
print 'Closest cluster to',n[3],'(released',str(n[2])+') was #'+str(n[1]),'with distance',str(n[0])+'. Actual cluster was',str(c)+'.'
if n[0] > 1:
print 'Attrs: RAM ROM CPU DDia DWid DLen Wid Len Dep Vol Mass DPI'
print 'Cluster:',cent
print 'Design: ',art
print 'Diff: ',art-cent
self.increment(len(newArtefacts))
开发者ID:Kazjon,项目名称:SurpriseEval,代码行数:60,代码来源:Novelty.py
示例15: print_errors
def print_errors(self):
"""
Print all errors metrics.
Note:
For better printing format, install :mod:`prettytable`.
"""
self.calc_metrics()
try:
from prettytable import PrettyTable
table = PrettyTable(["Error", "Value"])
table.align["Error"] = "l"
table.align["Value"] = "l"
for error in sorted(self.dict_errors.keys()):
table.add_row([error, np.around(self.dict_errors[error], decimals=8)])
print()
print(table.get_string(sortby="Error"))
print()
except ImportError:
print("For better table format install 'prettytable' module.")
print()
for error in sorted(self.dict_errors.keys()):
print(error, np.around(self.dict_errors[error], decimals=8))
print()
开发者ID:ExtremeLearningMachines,项目名称:acba.elm,代码行数:32,代码来源:mltools.py
示例16: generate_dataset
def generate_dataset(first_pathway_id, first_pathway_genes, second_pathway_id, second_pathway_genes, proteomics, POSITIVE_SAMPLES=100, NEGATIVE_SAMPLES=100):
means = proteomics.mean(axis=0)
variances = proteomics.var(axis=0)
negatives = sample_cov(50, proteomics)
negatives = np.around(negatives + means.values, 6)
negatives = pd.DataFrame(negatives, columns=proteomics.columns, index=['negative']*50)
first_new_pathway_means = pd.Series(np.random.normal(0,variances), index=variances.index)[first_pathway_genes].fillna(0)
second_new_pathway_means = pd.Series(np.random.normal(0,variances), index=variances.index)[second_pathway_genes].fillna(0)
first_new_means = pd.concat([means, first_new_pathway_means], axis=1).fillna(0).sum(axis=1).reindex(means.index)
second_new_means = pd.concat([means, second_new_pathway_means], axis=1).fillna(0).sum(axis=1).reindex(means.index)
both_new_means = pd.concat([means, first_new_pathway_means, second_new_pathway_means], axis=1).fillna(0).sum(axis=1).reindex(means.index)
first = sample_cov(50, proteomics)
first = np.around(first + first_new_means.values, 6)
first = pd.DataFrame(first, columns=proteomics.columns, index=[first_pathway_id]*50)
second = sample_cov(50, proteomics)
second = np.around(second + second_new_means.values, 6)
second = pd.DataFrame(second, columns=proteomics.columns, index=[second_pathway_id]*50)
both = sample_cov(50, proteomics)
both = np.around(both + both_new_means.values, 6)
both = pd.DataFrame(both, columns=proteomics.columns, index=['negative']*50)
dataset = pd.concat([negatives,first,second,both]).sample(frac=1) # shuffle
filename = './xor_ludwig_svd_normals/'+first_pathway_id+'_'+second_pathway_id+'_inbiomap_exp.csv'
return dataset.to_csv(filename, index=True, header=True)
开发者ID:codealphago,项目名称:GSLR,代码行数:32,代码来源:xor_kegg_pathways.py
示例17: solve_LP_problem
def solve_LP_problem(self):
(f_coef_matrix, f_column_vector) = self.build_function_coef_matrix_and_column_vector()
(d_coef_matrix, d_column_vector) = self.build_derivative_coef_matrix_and_column_vector()
# Solve the LP problem by combining constraints for both function and derivative info.
objective_function_vector = matrix(list(itertools.repeat(1.0, self.no_vars)))
coef_matrix = sparse([f_coef_matrix, d_coef_matrix])
column_vector = matrix([f_column_vector, d_column_vector])
min_sol = solvers.lp(objective_function_vector, coef_matrix, column_vector)
is_consistent = min_sol['x'] is not None
# Print the LP problem for debugging purposes.
if self.verbose:
self.display_LP_problem(coef_matrix, column_vector)
if is_consistent:
self.min_heights = np.array(min_sol['x']).reshape(self.no_points_per_axis)
print np.around(self.min_heights, decimals=2)
# Since consistency has been established, solve the converse LP problem to get the
# maximal bounding surface.
max_sol = solvers.lp(-objective_function_vector, coef_matrix, column_vector)
self.max_heights = np.array(max_sol['x']).reshape(self.no_points_per_axis)
print np.around(self.max_heights, decimals=2)
if self.plot_surfaces:
self.plot_3D_objects_for_2D_case()
else:
print 'No witness for consistency found.'
return is_consistent
开发者ID:costika1234,项目名称:PiecewiseLinear,代码行数:33,代码来源:consistency.py
示例18: resample
def resample(self, dx, dy, method='nearest'):
""" Resample array to have spacing `dx`, `dy'. The grid origin remains
in the same position.
Parameters
----------
dx : float
cell dimension 1
dy : float
cell dimension 2
method : str, optional
interpolation method, currently only 'nearest' supported
"""
ny, nx = self.bands[0].size
dx0, dy0 = self._transform[2:4]
xllcenter, yllcenter = self.center_llref()
if method == 'nearest':
rx, ry = dx / dx0, dy / dy0
I = np.around(np.arange(ry/2, ny, ry)-0.5).astype(int)
J = np.around(np.arange(rx/2, nx, rx)-0.5).astype(int)
if I[-1] == ny:
I = I[:-1]
if J[-1] == nx:
J = J[:-1]
JJ, II = np.meshgrid(J, I)
values = self[:,:][II, JJ]
else:
raise NotImplementedError('method "{0}" not '
'implemented'.format(method))
t = self._transform
tnew = (t[0], t[1], dx, dy, t[4], t[5])
return RegularGrid(tnew, values=values, crs=self.crs,
nodata_value=self.nodata)
开发者ID:ivn888,项目名称:karta,代码行数:35,代码来源:grid.py
示例19: nlfer
def nlfer(signal, pitch, parameters):
#---------------------------------------------------------------
# Set parameters.
#---------------------------------------------------------------
N_f0_min = np.around((parameters['f0_min']*2/float(signal.new_fs))*pitch.nfft)
N_f0_max = np.around((parameters['f0_max']/float(signal.new_fs))*pitch.nfft)
window = hanning(pitch.frame_size+2)[1:-1]
data = np.zeros((signal.size)) #Needs other array, otherwise stride and
data[:] = signal.filtered #windowing will modify signal.filtered
#---------------------------------------------------------------
# Main routine.
#---------------------------------------------------------------
samples = np.arange(int(np.fix(float(pitch.frame_size)/2)),
signal.size-int(np.fix(float(pitch.frame_size)/2)),
pitch.frame_jump)
data_matrix = np.empty((len(samples), pitch.frame_size))
data_matrix[:, :] = stride_matrix(data, len(samples),
pitch.frame_size, pitch.frame_jump)
data_matrix *= window
specData = np.fft.rfft(data_matrix, pitch.nfft)
frame_energy = np.abs(specData[:, N_f0_min-1:N_f0_max]).sum(axis=1)
pitch.set_energy(frame_energy, parameters['nlfer_thresh1'])
pitch.set_frames_pos(samples)
开发者ID:Parakrant,项目名称:AMFM_decompy,代码行数:29,代码来源:pYAAPT.py
示例20: image
def image(img, cmap='gray', bar=False, nans=True, clim=None, size=7, ax=None):
"""
Streamlined display of images using matplotlib.
Parameters
----------
img : ndarray, 2D or 3D
The image to display
cmap : str or Colormap, optional, default = 'gray'
A colormap to use, for non RGB images
bar : boolean, optional, default = False
Whether to append a colorbar
nans : boolean, optional, deafult = True
Whether to replace NaNs, if True, will replace with 0s
clim : tuple, optional, default = None
Limits for scaling image
size : scalar, optional, deafult = 9
Size of the figure
ax : matplotlib axis, optional, default = None
An existing axis to plot into
"""
from matplotlib.pyplot import axis, colorbar, figure, gca
img = asarray(img)
if (nans is True) and (img.dtype != bool):
img = nan_to_num(img)
if ax is None:
f = figure(figsize=(size, size))
ax = gca()
if img.ndim == 3:
if bar:
raise ValueError("Cannot show meaningful colorbar for RGB images")
if img.shape[2] != 3:
raise ValueError("Size of third dimension must be 3 for RGB images, got %g" % img.shape[2])
mn = img.min()
mx = img.max()
if mn < 0.0 or mx > 1.0:
raise ValueError("Values must be between 0.0 and 1.0 for RGB images, got range (%g, %g)" % (mn, mx))
im = ax.imshow(img, interpolation='nearest', clim=clim)
else:
im = ax.imshow(img, cmap=cmap, interpolation='nearest', clim=clim)
if bar is True:
cb = colorbar(im, fraction=0.046, pad=0.04)
rng = abs(cb.vmax - cb.vmin) * 0.05
cb.set_ticks([around(cb.vmin + rng, 1), around(cb.vmax - rng, 1)])
cb.outline.set_visible(False)
axis('off')
return im
开发者ID:freeman-lab,项目名称:showit,代码行数:60,代码来源:showit.py
注:本文中的numpy.around函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论