本文整理汇总了Python中numpy.max函数的典型用法代码示例。如果您正苦于以下问题:Python max函数的具体用法?Python max怎么用?Python max使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了max函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: max
def max(self, axis=None, out=None, keepdims=False):
self._prepare_out(out=out)
try:
value = np.max(self.value, axis=axis, out=out, keepdims=keepdims)
except: # numpy < 1.7
value = np.max(self.value, axis=axis, out=out)
return self.__quantity_instance__(value, self.unit, copy=False)
开发者ID:astrosilverio,项目名称:astropy,代码行数:7,代码来源:quantity.py
示例2: cfl_superbee_theta
def cfl_superbee_theta(r,cfl,theta=0.95):
r"""
CFL-Superbee (Roe's Ultrabee) with theta parameter
"""
a = np.empty((2,len(r)))
b = np.zeros((2,len(r)))
a[0,:] = 0.001
a[1,:] = cfl
cfmod1 = np.max(a,axis=0)
a[0,:] = 0.999
cfmod2 = np.min(a,axis=0)
s1 = theta * 2.0 / cfmod1
phimax = theta * 2.0 / (1.0 - cfmod2)
a[0,:] = s1*r
a[1,:] = phimax
b[1,:] = np.min(a,axis=0)
ultra = np.max(b,axis=0)
a[0,:] = ultra
b[0,:] = 1.0
b[1,:] = r
a[1,:] = np.max(b,axis=0)
return np.min(a,axis=0)
开发者ID:tareqmalas,项目名称:pyclaw,代码行数:26,代码来源:tvd.py
示例3: cada_torrilhon_limiter
def cada_torrilhon_limiter(r,cfl,epsilon=1.0e-3):
r"""
Cada-Torrilhon modified
Additional Input:
- *epsilon* =
"""
a = np.ones((2,len(r))) * 0.95
b = np.empty((3,len(r)))
a[0,:] = cfl
cfl = np.min(a)
a[1,:] = 0.05
cfl = np.max(a)
# Multiply all parts except b[0,:] by (1.0 - epsilon) as well
b[0,:] = 1.0 + (1+cfl) / 3.0 * (r - 1)
b[1,:] = 2.0 * np.abs(r) / (cfl + epsilon)
b[2,:] = (8.0 - 2.0 * cfl) / (np.abs(r) * (cfl - 1.0 - epsilon)**2)
b[1,::2] *= (1.0 - epsilon)
a[0,:] = np.min(b)
a[1,:] = (-2.0 * (cfl**2 - 3.0 * cfl + 8.0) * (1.0-epsilon)
/ (np.abs(r) * (cfl**3 - cfl**2 - cfl + 1.0 + epsilon)))
return np.max(a)
开发者ID:tareqmalas,项目名称:pyclaw,代码行数:25,代码来源:tvd.py
示例4: work
def work(self):
self.worked = True
kwargs = dict(
weights=self.weights,
mus=self.mus,
sigmas=self.sigmas,
low=self.low,
high=self.high,
q=self.q,
)
samples = GMM1(rng=self.rng,
size=(self.n_samples,),
**kwargs)
samples = np.sort(samples)
edges = samples[::self.samples_per_bin]
#print samples
pdf = np.exp(GMM1_lpdf(edges[:-1], **kwargs))
dx = edges[1:] - edges[:-1]
y = 1 / dx / len(dx)
if self.show:
plt.scatter(edges[:-1], y)
plt.plot(edges[:-1], pdf)
plt.show()
err = (pdf - y) ** 2
print np.max(err)
print np.mean(err)
print np.median(err)
if not self.show:
assert np.max(err) < .1
assert np.mean(err) < .01
assert np.median(err) < .01
开发者ID:AshBT,项目名称:hyperopt,代码行数:33,代码来源:test_tpe.py
示例5: Haffine_from_points
def Haffine_from_points(fp, tp):
'''计算仿射变换的单应性矩阵H,使得tp是由fp经过仿射变换得到的'''
if fp.shape != tp.shape:
raise RuntimeError('number of points do not match')
# 对点进行归一化
# 映射起始点
m = numpy.mean(fp[:2], axis=1)
maxstd = numpy.max(numpy.std(fp[:2], axis=1)) + 1e-9
C1 = numpy.diag([1/maxstd, 1/maxstd, 1])
C1[0, 2] = -m[0] / maxstd
C1[1, 2] = -m[1] / maxstd
fp_cond = numpy.dot(C1, fp)
# 映射对应点
m = numpy.mean(tp[:2], axis=1)
maxstd = numpy.max(numpy.std(tp[:2], axis=1)) + 1e-9
C2 = numpy.diag([1/maxstd, 1/maxstd, 1])
C2[0, 2] = -m[0] / maxstd
C2[1, 2] = -m[1] / maxstd
tp_cond = numpy.dot(C2, tp)
# 因为归一化之后点的均值为0,所以平移量为0
A = numpy.concatenate((fp_cond[:2], tp_cond[:2]), axis=0)
U, S, V = numpy.linalg.svd(A.T)
# 创建矩阵B和C
tmp = V[:2].T
B = tmp[:2]
C = tmp[2:4]
tmp2 = numpy.concatenate((numpy.dot(C, numpy.linalg.pinv(B)), numpy.zeros((2, 1))), axis=1)
H = numpy.vstack((tmp2, [0, 0, 1]))
H = numpy.dot(numpy.linalg.inv(C2), numpy.dot(H, C1)) # 反归一化
return H / H[2, 2] # 归一化,然后返回
开发者ID:MarkPrecursor,项目名称:Programming-Computer-Vision-with-python,代码行数:35,代码来源:homography.py
示例6: viterbi_decode
def viterbi_decode(score, transition_params):
"""Decode the highest scoring sequence of tags outside of TensorFlow.
This should only be used at test time.
Args:
score: A [seq_len, num_tags] matrix of unary potentials.
transition_params: A [num_tags, num_tags] matrix of binary potentials.
Returns:
viterbi: A [seq_len] list of integers containing the highest scoring tag
indicies.
viterbi_score: A float containing the score for the Viterbi sequence.
"""
trellis = np.zeros_like(score)
backpointers = np.zeros_like(score, dtype=np.int32)
trellis[0] = score[0]
for t in range(1, score.shape[0]):
v = np.expand_dims(trellis[t - 1], 1) + transition_params
trellis[t] = score[t] + np.max(v, 0)
backpointers[t] = np.argmax(v, 0)
viterbi = [np.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
viterbi_score = np.max(trellis[-1])
return viterbi, viterbi_score
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:30,代码来源:crf.py
示例7: _crinfo_from_specific_data
def _crinfo_from_specific_data (self, data, margin):
# hledáme automatický ořez, nonzero dá indexy
nzi = np.nonzero(data)
x1 = np.min(nzi[0]) - margin[0]
x2 = np.max(nzi[0]) + margin[0] + 1
y1 = np.min(nzi[1]) - margin[0]
y2 = np.max(nzi[1]) + margin[0] + 1
z1 = np.min(nzi[2]) - margin[0]
z2 = np.max(nzi[2]) + margin[0] + 1
# ošetření mezí polí
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if z1 < 0:
z1 = 0
if x2 > data.shape[0]:
x2 = data.shape[0]-1
if y2 > data.shape[1]:
y2 = data.shape[1]-1
if z2 > data.shape[2]:
z2 = data.shape[2]-1
# ořez
crinfo = [[x1, x2],[y1,y2],[z1,z2]]
#dataout = self._crop(data,crinfo)
#dataout = data[x1:x2, y1:y2, z1:z2]
return crinfo
开发者ID:mjirik,项目名称:pycat,代码行数:31,代码来源:pycat1.py
示例8: _get_initial_classes
def _get_initial_classes(self):
images = map(lambda f: cv2.imread(path.join(self._root, f)), self._files)
self._avg_pixels = np.array([], dtype=np.uint8)
# extract parts from each image for all of our 6 categories
for i in range(0, self._n_objects):
rects = self._rects[:, i]
# compute maximum rectangle
rows = np.max(rects['f2'] - rects['f0'])
cols = np.max(rects['f3'] - rects['f1'])
# extract annotated rectangles
im_rects = map(lambda (im, r): im[r[0]:r[2],r[1]:r[3],:], zip(images, rects))
# resize all rectangles to the max size & average all the rectangles
im_rects = np.array(map(lambda im: cv2.resize(im, (cols, rows)), im_rects), dtype=np.float)
avgs = np.around(np.average(im_rects, axis = 0))
# average the resulting rectangle to compute
mn = np.around(np.array(cv2.mean(avgs), dtype='float'))[:-1].astype('uint8')
if(self._avg_pixels.size == 0):
self._avg_pixels = mn
else:
self._avg_pixels = np.vstack((self._avg_pixels, mn))
开发者ID:fierval,项目名称:retina,代码行数:26,代码来源:regions_detect_knn.py
示例9: get_batch
def get_batch(self, model, batch_size):
len_memory = len(self.memory)
num_actions = 6
encouraged_actions = np.zeros(num_actions, dtype=np.int)
predicted_actions = np.zeros(num_actions, dtype=np.int)
inputs = np.zeros((min(len_memory, batch_size), 4, 80, 74))
targets = np.zeros((inputs.shape[0], num_actions))
q_list = np.zeros(inputs.shape[0])
for i, idx in enumerate(np.random.randint(0, len_memory, size=inputs.shape[0])):
input_t, action_t, reward_t, input_tp1 = self.memory[idx][0]
terminal = self.memory[idx][1]
inputs[i] = input_t
targets[i] = model.predict(input_t.reshape(1, 4, 80, 74))[0]
q_next = np.max(model.predict(input_tp1.reshape(1, 4, 80, 74))[0])
q_list[i] = np.max(targets[i])
predicted_actions[np.argmax(targets[i])] += 1
targets[i, action_t] = (1. - terminal) * self.discount * q_next + reward_t
if reward_t > 0. or terminal:
print "Action %d rewarded with %f (sample #%d)"%(action_t, targets[i, action_t], idx)
encouraged_actions[np.argmax(targets[i])] += 1
return inputs, targets, encouraged_actions, predicted_actions, np.average(q_list)
开发者ID:blazer82,项目名称:ai,代码行数:28,代码来源:atari.py
示例10: quantify
def quantify(self):
"""Quantify shape of the contours."""
four_pi = 4. * np.pi
for edge in self.edges:
# Positions
x = edge['x']
y = edge['y']
A, perimeter, x_center, y_center, distances = \
self.get_shape_factor(x, y)
# Set values.
edge['area'] = A
edge['perimeter'] = perimeter
edge['x_center'] = x_center
edge['y_center'] = y_center
# Circle is 1. Rectangle is 0.78. Thread-like is close to zero.
edge['shape_factor'] = four_pi * edge['area'] / \
edge['perimeter'] ** 2.
# We assume that the radius of the edge
# as the median value of the distances from the center.
radius = np.median(distances)
edge['radius_deviation'] = np.std(distances - radius) / radius
edge['x_min'] = np.min(x)
edge['x_max'] = np.max(x)
edge['y_min'] = np.min(y)
edge['y_max'] = np.max(y)
开发者ID:dwkim78,项目名称:ASTRiDE,代码行数:29,代码来源:edge.py
示例11: get_num_samples
def get_num_samples(self, idx):
"""
Number of samples needed to estimate the population variance within the tolerance limit
Sample variance is normally distributed http://stats.stackexchange.com/a/105338/71884
(see warning below).
Var(s^2) /approx 1/n * (\mu_4 - \sigma^4)
Adjust n as per the tolerance needed to estimate the sample variance
warning: does not work for some distributions like bernoulli - https://stats.stackexchange.com/a/104911
use the min_samples for explicitly controlling the number of samples to be drawn
"""
if self.min_samples:
return self.min_samples
min_samples = 1000
tol = 10.0
required_precision = self.prec / tol
if not self.scipy_dist:
return min_samples
args, kwargs = self.scipy_arg_fn(**self.get_dist_params(idx, wrap_tensor=False))
try:
fourth_moment = np.max(self.scipy_dist.moment(4, *args, **kwargs))
var = np.max(self.scipy_dist.var(*args, **kwargs))
min_computed_samples = int(math.ceil((fourth_moment - math.pow(var, 2)) / required_precision))
except (AttributeError, ValueError):
return min_samples
return max(min_samples, min_computed_samples)
开发者ID:Magica-Chen,项目名称:pyro,代码行数:25,代码来源:dist_fixture.py
示例12: gm_assign_to_cluster
def gm_assign_to_cluster(X, center_list, cov_list, p_k):
"""Assigns each sample to one of the Gaussian clusters given.
Returns an array with numbers, 0 corresponding to the first cluster in the
cluster list.
"""
# Reused code from E-step, should be unified somehow:
samples = X.shape[0]
K = len(center_list)
log_p_Xn_mat = np.zeros((samples, K))
for k in range(K):
log_p_Xn_mat[:, k] = logmulnormpdf(X, center_list[k], cov_list[k]) + np.log(p_k[k])
pmax = np.max(log_p_Xn_mat, axis=1)
log_p_Xn = pmax + np.log(np.sum(np.exp(log_p_Xn_mat.T - pmax), axis=0).T)
logL = np.sum(log_p_Xn)
log_p_nk = np.zeros((samples, K))
for k in range(K):
# log_p_nk[:,k] = logmulnormpdf(X, center_list[k], cov_list[k]) + np.log(p_k[k]) - log_p_Xn
log_p_nk[:, k] = log_p_Xn_mat[:, k] - log_p_Xn
print log_p_nk
# Assign to cluster:
maxP_k = np.c_[np.max(log_p_nk, axis=1)] == log_p_nk
# print np.max(log_p_nk, axis=1)
maxP_k = maxP_k * (np.array(range(K)) + 1)
return np.sum(maxP_k, axis=1) - 1
开发者ID:kslin,项目名称:CS181,代码行数:27,代码来源:gmm.py
示例13: makeThresholdMap
def makeThresholdMap(image, findCars, scales=[1.5], percentOfHeapmapToToss=.5):
print("scales:", scales, ", type:", type(scales), "image.shape:", image.shape, ", dtype:", image.dtype, ", percentOfHeapmapToToss:", percentOfHeapmapToToss)
boundingBoxList=[]
boundingBoxWeights=[]
for scale in scales:
listOfBoundingBoxes, listOfWeights = findCars(image, scale)
boundingBoxList+=listOfBoundingBoxes
boundingBoxWeights+=listOfWeights
if USEBOUNDINGBOXWEIGHTS:
unNormalizedHeatMap=addWeightedHeat(image.shape, boundingBoxList, boundingBoxWeights)
else:
unNormalizedHeatMap=addHeat(image.shape, boundingBoxList)
if USESTACKOFHEATMAPS:
unNormalizedHeatMap,_=totalHeatmapStack(unNormalizedHeatMap)
unNormalizedHeatMapCounts=np.unique(unNormalizedHeatMap, return_counts=True)
if TESTING: print("makeThresholdMap-unNormalizedHeatMapCounts:", unNormalizedHeatMapCounts, ", len(unNormalizedHeatMapCounts):", len(unNormalizedHeatMapCounts), ", len(unNormalizedHeatMapCounts[0]):", len(unNormalizedHeatMapCounts[0]))
unNormalizedHeatMapMidpoint=unNormalizedHeatMapCounts[0][int(round(len(unNormalizedHeatMapCounts[0])*percentOfHeapmapToToss))]
thresholdMap=applyThreshold(unNormalizedHeatMap, unNormalizedHeatMapMidpoint)
print("makeThresholdMap-max(thresholdMap):", np.max(thresholdMap), ", min(thresholdMap):", np.min(thresholdMap))
if TESTING: print("makeThresholdMap-thresholdMap counts:", (np.unique(thresholdMap, return_counts=True)), ", len(thresholdMap):", len(thresholdMap), ", len(thresholdMap[0]):", len(thresholdMap[0]))
normalizedMap=normalizeMap(thresholdMap)
if TESTING: print("makeThresholdMap-normalizedMap counts:", (np.unique(normalizedMap, return_counts=True)), ", len(normalizedMap):", len(normalizedMap), ", len(normalizedMap[0]):", len(normalizedMap[0]))
print("makeThresholdMap-max(normalizedMap):", np.max(normalizedMap), ", min(normalizedMap):", np.min(normalizedMap))
return normalizedMap, boundingBoxList, unNormalizedHeatMap, boundingBoxWeights
开发者ID:autohandle,项目名称:CarNdVehicleDetection-,代码行数:28,代码来源:FindCars.py
示例14: diff_dist_matrix
def diff_dist_matrix(self, res_range=None, scaled=False):
if res_range != None: assert(len(res_range) == 2)
dist_matrices = []
for pdb in self.get_next_pdb():
ca_xyz = pdb.get_ca_xyz_matrix()
if res_range != None: ca_xyz = ca_xyz[res_range[0]-1:res_range[1], :]
dist_matrix = calc_distance_matrix(ca_xyz)
dist_matrices.append(dist_matrix)
scaled_diff_dist_matrix = num.zeros(dist_matrices[0].shape, 'd')
count = 0
for i in range(len(dist_matrices)):
for j in range(i+1, len(dist_matrices)):
diff_dist_matrix = num.abs(dist_matrices[i] - dist_matrices[j])
if scaled:
scale = num.max(diff_dist_matrix)
if scale == 0: continue
diff_dist_matrix /= scale
scaled_diff_dist_matrix += diff_dist_matrix
count += 1
#print >> sys.stderr, count
scaled_diff_dist_matrix /= count
if scaled:
scaled_diff_dist_matrix /= num.max(scaled_diff_dist_matrix)
return scaled_diff_dist_matrix
开发者ID:chris-lee-mc,项目名称:MutInf,代码行数:26,代码来源:PDBlite.py
示例15: grid_xyz
def grid_xyz(xyz, n_x, n_y, **kwargs):
""" Grid data as a list of X,Y,Z coords into a 2D array
Parameters
----------
xyz: np.array
Numpy array of X,Y,Z values, with shape (n_points, 3)
n_x: int
Number of points in x direction (fastest varying!)
n_y: int
Number of points in y direction
Returns
-------
gridded_data: np.array
2D array of gridded data, with shape (n_x, n_y)
Notes
-----
'x' is the inner dimension, i.e. image dimensions are (n_y, n_x). This is
counterintuitive (to me at least) but in line with numpy definitions.
"""
x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]
x_ax = np.linspace(np.min(x), np.max(x), n_x)
y_ax = np.linspace(np.min(y), np.max(y), n_y)
xg, yg = np.meshgrid(x_ax, y_ax)
data = griddata(xyz[:, :2], z, (xg, yg), **kwargs)
return data
开发者ID:telegraphic,项目名称:lwa_ant,代码行数:30,代码来源:grid_utils.py
示例16: update_im_clim
def update_im_clim(self, val, im, slider):
if np.mean(self.data[self.frame_slice]) < 0:
self.im.set_clim(np.min(self.data[self.frame_slice]) * (self.sliders[-1]._slider.val / 100),
np.max(self.data[self.frame_slice]) * (self.sliders[-2]._slider.val / 100))
else:
self.im.set_clim(np.max(self.data[self.frame_slice]) * (self.sliders[-1]._slider.val / 100),
np.max(self.data[self.frame_slice]) * (self.sliders[-2]._slider.val / 100))
开发者ID:sunpy,项目名称:sunkit-sst,代码行数:7,代码来源:cube_explorer.py
示例17: zplane
def zplane(self, title="", fontsize=18):
""" Display filter in the complex plane
Parameters
----------
"""
rb = self.z
ra = self.p
t = np.arange(0, 2 * np.pi + 0.1, 0.1)
plt.plot(np.cos(t), np.sin(t), "k")
plt.plot(np.real(ra), np.imag(ra), "x", color="r")
plt.plot(np.real(rb), np.imag(rb), "o", color="b")
M1 = -10000
M2 = -10000
if len(ra) > 0:
M1 = np.max([np.abs(np.real(ra)), np.abs(np.imag(ra))])
if len(rb) > 0:
M2 = np.max([np.abs(np.real(rb)), np.abs(np.imag(rb))])
M = 1.6 * max(1.2, M1, M2)
plt.axis([-M, M, -0.7 * M, 0.7 * M])
plt.title(title, fontsize=fontsize)
plt.show()
开发者ID:tattoxcm,项目名称:pylayers,代码行数:25,代码来源:DF.py
示例18: max
def max(self, axis=None, out=None, keepdims=False):
self._prepare_out(out=out)
try:
value = np.max(self.value, axis=axis, out=out, keepdims=keepdims)
except: # numpy < 1.7
value = np.max(self.value, axis=axis, out=out)
return self._new_view(value)
开发者ID:kapiV,项目名称:astropy,代码行数:7,代码来源:quantity.py
示例19: draw_ohl_graph
def draw_ohl_graph(ax, data):
# sort data along args.x_column and make it np.array again
all_data = sorted(data, key=itemgetter(args.x_column))
scores = list({e[0] for e in all_data})
scores.sort()
print("scores=", scores)
np_all_data = np.array(all_data)
all_x = np_all_data[:, args.x_column]
all_y = np_all_data[:, args.y_column]
x_max = np.max(all_x)
x_min = np.min(all_x)
y_max = np.max(all_y)
y_min = np.min(all_y)
# print("ymax=", y_max, "ymin=", y_min)
y_width = y_max - y_min
if y_width == 0:
if y_max == 0:
y_width = 1.0
else:
y_min = 0
y_width = y_max
ax.set_xlim(xmax = x_max / args.scale)
ax.set_xlim(xmin = 0)
ax.set_ylim(ymax = y_max + y_width * 0.05)
ax.set_ylim(ymin = y_min - y_width * 0.05)
for score in scores:
# print("score=", score)
data = list(filter(lambda e: e[0] == score, all_data))
data = np.array(data)
x = data[:, args.x_column]
y = data[:, args.y_column]
x = x / args.scale
ans = args.ans
if len(data) < 5:
ax.plot(x, y, '.', label=str(score))
continue
elif len(data) * 0.1 < args.ans:
ans = int(len(data) * 0.1)
if ans < 4:
ans = 4
# print("ans=", ans)
weight = np.ones(ans, dtype=np.float)/ans
y_average = np.convolve(y, weight, 'valid')
rim = ans - 1
rim_l = rim // 2
rim_r = rim - rim_l
ax.plot(x[rim_l:-rim_r], y_average, label=str(score))
ax.legend(loc=2)
ax.set_xlabel(args.xlabel)
ax.set_ylabel(args.ylabel)
ax.grid(linewidth=1, linestyle="-", alpha=0.1)
开发者ID:Itsukara,项目名称:async_deep_reinforce,代码行数:60,代码来源:plot.py
示例20: test_zernike_get_opd
def test_zernike_get_opd():
zernike_optic = wfe.ZernikeWFE(coefficients=[NWAVES * WAVELENGTH,], radius=RADIUS)
opd_map = zernike_optic.get_opd(WAVELENGTH, units='meters')
assert np.max(opd_map) == NWAVES * WAVELENGTH
opd_map_waves = zernike_optic.get_opd(WAVELENGTH, units='waves')
assert np.max(opd_map_waves) == NWAVES
开发者ID:mperrin,项目名称:poppy,代码行数:7,代码来源:test_wfe.py
注:本文中的numpy.max函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论