本文整理汇总了Python中numpy.fmax函数的典型用法代码示例。如果您正苦于以下问题:Python fmax函数的具体用法?Python fmax怎么用?Python fmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fmax函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: run
def run(self):
""" inputs[0] = ERROR AXIS ., so stores all possible error values
inputs[1] = DEL_ERROR AXIS ., ,,
inputs[2] = CONTROL_OUTPUT AXIS ., ,,
ERROR DEL_ERROR CONTROL_OUTPUT m_value for crisp e and delta_e values
b[0][0] -ve Medium || b[1][0] -ve Medium || b[2][0] -ve Medium .. f[0] | f_d[0]
b[0][1] -ve small || b[1][1] -ve small || b[2][1] -ve small .. f[1] | f_d[1]
b[0][2] zero || b[1][2] zero || b[2][2] zero .. f[2] | f_d[2]
b[0][3] +ve small || b[1][3] +ve small || b[2][3] +ve small .. f[3] | f_d[3]
b[0][4] +ve Medium || b[1][4] +ve Medium || b[2][4] +_ve Medium .. f[4] | f_d[4]
f_mat is fuzzy fuzzy_matrix
"""
inputs = [ np.arange(var[0], var[1]+1, 1) for var in self.var_ranges] #step size = 1, third dimension of b matrix. As of now, an assumption.
b = []
output = [0,0,0,0,0]
out_final = []
for i in range(3) :
b.append( [membership_f(self.mu[i], inputs[i], a) for a in self.d_mu[i] ])
# To visualize the membership func. call .. [ visualize_mf(b,inputs) ]
f ,f_d = error_fuzzify(inputs, b, self.error, self.delta_e)
f_mat = fuzzy_matrix(f,f_d)
output = rule_base(b, f_mat, output)
print 'output : ', output
aggregated = np.fmax(output[0], np.fmax(output[1],np.fmax(output[2], np.fmax(output[3], output[4]))))
out_final = fuzz.defuzz(inputs[2], aggregated, 'centroid')
out_activation = fuzz.interp_membership(inputs[2], aggregated, out_final) # for plot
visualize.visualize_mf(b,inputs,output, out_final, out_activation, aggregated)
visualize.visualize_output(b, inputs, out_final, out_activation, aggregated)
plt.show()
开发者ID:icyflame,项目名称:fuzzy-control,代码行数:34,代码来源:fuzzy.py
示例2: __init__
def __init__(self):
# Generate universe functions
self.distance = np.arange(0.,181.,1.)
self.acceleration = np.arange(0.,0.1,0.01)
# Generate Distance membership functions
self.near = fuzz.trapmf(self.distance, (-1.,-1.,20.,65.))
self.medium = fuzz.trapmf(self.distance,(35.,80.,120.,135.))
self.far = fuzz.trapmf(self.distance,(105.,170.,180.,200.))
# Generate Acceleration membership functions
self.slow = fuzz.trimf(self.acceleration, (-1.,0.,0.05))
self.normal = fuzz.trapmf(self.acceleration,(0.02,0.035,0.04,0.07))
self.fast = fuzz.trapmf(self.acceleration,(0.06,0.085,0.1,0.2))
# Fuzzy relation
self.R1 = fuzz.relation_product(self.near,self.slow)
self.R2 = fuzz.relation_product(self.medium,self.normal)
self.R3 = fuzz.relation_product(self.far,self.fast)
# Combine the fuzzy relation
self.R_combined = np.fmax(self.R1, np.fmax(self.R2, self.R3))
self.thetaOne = 0.0
self.thetaTwo = 0.0
self.InputDistanceAngle = 0.0
self.OutputAcceleration = 0.0
self.visualize = True
开发者ID:ansrivas,项目名称:gameailab,代码行数:32,代码来源:fuzzyControl.py
示例3: clip_to_window
def clip_to_window(boxlist, window):
"""Clip bounding boxes to a window.
This op clips input bounding boxes (represented by bounding box
corners) to a window, optionally filtering out boxes that do not
overlap at all with the window.
Args:
boxlist: BoxList holding M_in boxes
window: a numpy array of shape [4] representing the
[y_min, x_min, y_max, x_max] window to which the op
should clip boxes.
Returns:
a BoxList holding M_out boxes where M_out <= M_in
"""
y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
win_y_min = window[0]
win_x_min = window[1]
win_y_max = window[2]
win_x_max = window[3]
y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min)
y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min)
x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min)
x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min)
clipped = np_box_list.BoxList(
np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped]))
clipped = _copy_extra_fields(clipped, boxlist)
areas = area(clipped)
nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)),
[-1]).astype(np.int32)
return gather(clipped, nonzero_area_indices)
开发者ID:ucsky,项目名称:ActivityNet,代码行数:32,代码来源:np_box_list_ops.py
示例4: TR
def TR(HIGH, LOW, CLOSE):
CLOSELAG = LAG(CLOSE,1)
range1 = HIGH - LOW
range2 = np.abs(HIGH-CLOSELAG)
range3 = np.abs(LOW -CLOSELAG)
out = np.fmax(np.fmax(range1,range2),range3)
return out
开发者ID:Quantiacs,项目名称:HenryCarstens-101-Trading-Ideas,代码行数:7,代码来源:Carstens_Illustration3.py
示例5: get_output
def get_output(self, current_temp, target_temp, rate_of_change_per_minute):
temp_err_in = self.temp_error_category(current_temp, target_temp, rate_of_change_per_minute)
print "Temp Error", temp_err_in
#What is the temperature doing?
mf_temp_too_cold = temp_err_in['too_cold']
mf_temp_cold = temp_err_in['cold']
mf_temp_optimal = temp_err_in['optimal']
mf_temp_hot = temp_err_in['hot']
mf_temp_too_hot = temp_err_in['too_hot']
mf_cooling_quickly = temp_err_in['cooling_quickly']
#Then:
when_too_cold = np.fmin(mf_temp_too_cold, self.ho_high)
when_cold = np.fmin(mf_temp_cold, self.ho_low)
when_optimal = np.fmin(mf_temp_optimal, self.co_off)
when_hot = np.fmin(mf_temp_hot, self.co_low)
when_too_hot = np.fmin(mf_temp_too_hot, self.co_high)
#If the temperate is temp_hot AND cooling_quickly SET chiller off
when_hot_and_cooling_quickly = np.fmin(np.fmin(mf_temp_hot, mf_cooling_quickly), self.co_off)
aggregate_membership = np.fmax(when_hot_and_cooling_quickly, np.fmax(when_too_cold, np.fmax(when_cold, np.fmax(when_optimal, np.fmax(when_hot, when_too_hot)))))
result = fuzz.defuzz(self.chill_out, aggregate_membership, 'centroid')
return result
开发者ID:chrisdpa,项目名称:brumulus,代码行数:27,代码来源:ControlTemperature.py
示例6: kramer_unsoldt_opacity
def kramer_unsoldt_opacity(dens, Z, A, Zbar, Te, lmbda):
"""
Computes the Kramer-Unsoldt opacity [Zel’dovich & Raizer 1967 p 27]
Parameters:
-----------
dens: [ndarray] density in (g.cm⁻³)
Z: [ndarray] atomic number
A: [ndarray] atomic mass
Zbar: [ndarray] ionization
Te: [ndarray] electron temperature (eV)
lmdba: [ndarray] wavelength (nm)
Returns:
--------
out: [ndarray] of the same shape as input containing the opacity [cm⁻¹]
"""
# check sign here
Ibar = 10.4*Z**(4./3) * (Zbar/Z)**2 / (1 - Zbar/Z)**(2./3)
Ibar = np.fmax(Ibar, 6.0)
y = 1240./(lmbda * Te)
y1 = Ibar / Te
Ni = dens * cst.N_A / A
#print Ibar, y, y1, Ni
return np.fmax(7.13e-16* Ni * (Zbar + 1)**2 * np.exp(y - y1) / (Te**2*y**3), 1e-16)
开发者ID:fimay,项目名称:hedp,代码行数:25,代码来源:krammer.py
示例7: proj_weights
def proj_weights(W, correlation=False):
# From Chen, Y. & Ye, X. (2011). Projection onto a simplex.
if correlation:
k, n = W.shape
W_proj = empty((k, n))
for col_idx in range(n):
w = sort(W[:, col_idx])
idx = k - 2
while(True):
t_idx = (sum(w[idx + 1 :]) - 1) / (k - idx - 1)
if t_idx >= w[idx]:
W_proj[:, col_idx] = fmax(W[:, col_idx] - t_idx, 0)
break
else:
idx = idx - 1
if idx < 0:
t_idx = (sum(w) - 1) / k
W_proj[:, col_idx] = fmax(W[:, col_idx] - t_idx, 0)
break
return W_proj
else:
return fmax(W, 0)
开发者ID:clarafj,项目名称:covariance-dictionary,代码行数:27,代码来源:covdict.py
示例8: pitchComparison
def pitchComparison(oriF0Array,singerF0Array,bestCorrespondingOriIndexList):
lenSinger=len(singerF0Array)
# turn F0 array into log F0 array
# have a max here to prevent log(-1) gives us no number.
oriLogF0Array=np.fmax(0,np.log2(oriF0Array))
singerLogF0Array=np.fmax(0,np.log2(singerF0Array))
subrating=[None for i in range(lenSinger)]
rating=0.0
numRated=0
for i in range(lenSinger):
if bestCorrespondingOriIndexList[i]!=None and singerLogF0Array[i]!=0.0:
# check whether they're off by more than half an octave.
# If so, move it up/down for them to stay within same octave
currSingerLogF0=singerLogF0Array[i]
currOriLogF0=oriLogF0Array[bestCorrespondingOriIndexList[i]]
currSingerLogF0=currSingerLogF0+math.floor(currOriLogF0-currSingerLogF0+0.5)
# triangle filter. Notes that are perfectly on pitch will have score 1.
# Notes that are half-octave off will have score 0. Everything in between is scored linearly.
subrating[i]=(1.0-abs(currOriLogF0-currSingerLogF0)/0.5)*100 # *100 to make everything on an 100 scale
rating+=subrating[i]
numRated+=1
else:
continue # The subrating will be None for notes that do not have correspondence.
# divide by number of scores to get average scoring
rating/=numRated
return rating,subrating
开发者ID:jerryli27,项目名称:songJudging,代码行数:34,代码来源:pitchComparison.py
示例9: _cmeans0_kth
def _cmeans0_kth(data, u_old, c, m, *para):
"""
Single step in generic fuzzy c-means clustering algorithm.
data2 is for intersect counting
"""
k = para[0]
# Normalizing, then eliminating any potential zero values.
u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
u_old = np.fmax(u_old, np.finfo(np.float64).eps)
um = u_old ** m
# remain the belonging rate >= the k-th max location of each cluster in um_c
filter_k = lambda row:row < sorted(row, reverse=True)[k-1]
fail_indices = np.apply_along_axis(filter_k, axis=1, arr=u_old)
um[fail_indices] = 0
# Calculate cluster centers
# data1:2861,2; um:30,2861
data = data.T
cntr = um.dot(data) / (np.ones((data.shape[1],1)).dot(np.atleast_2d(um.sum(axis=1))).T)
d = cdistance.get_center_distance(data, cntr)
d = np.fmax(d, np.finfo(np.float64).eps)
jm = (um * d ** 2).sum()
u = d ** (- 2. / (m - 1))
u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
return cntr, u, jm, d
开发者ID:amy22292003,项目名称:Liu,代码行数:30,代码来源:_cmeans_location.py
示例10: run_simulation
def run_simulation(self, dt, timesteps, c, h, init_cond=np.zeros( (2, 75, 75) ) ):
r_E = np.zeros((timesteps, self.N_pairs, self.N_pairs))
r_I = np.copy(r_E)
# add initial conditions:
r_E[0,:,:] = init_cond[0]
r_I[0,:,:] = init_cond[1]
I_E = np.zeros((timesteps, self.N_pairs, self.N_pairs))
I_I = np.copy(I_E)
# rSS_E = np.copy(I_E)
# rSS_I = np.copy(I_I)
for t in range(1,timesteps):
# Input drive from external input and network
I_E[t,:,:] = c*h + np.sum( np.sum( self.W_EE * r_E[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T - np.sum( np.sum( self.W_EI * r_I[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T
I_I[t,:,:] = c*h + np.sum( np.sum( self.W_IE * r_E[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T - np.sum( np.sum( self.W_II * r_I[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T
# steady state firing rates - power law I/O
rSS_E = np.multiply(self.k, np.power(np.fmax(0,I_E[t,:,:]), self.n_E))
rSS_I = np.multiply(self.k, np.power(np.fmax(0,I_I[t,:,:]), self.n_I))
# set negative steady state rates to zero
rSS_E[rSS_E < 0] = 0
rSS_I[rSS_I < 0] = 0
# instantaneous firing rates approaching steady state
r_E[t,:,:] = r_E[t-1,:,:] + dt*(np.divide(-r_E[t-1,:,:]+rSS_E, self.tau_E))
r_I[t,:,:] = r_I[t-1,:,:] + dt*(np.divide(-r_I[t-1,:,:]+rSS_I, self.tau_I))
return [r_E, r_I, I_E, I_I]
开发者ID:benselby,项目名称:v1_modelling,代码行数:31,代码来源:ssn.py
示例11: _cmeans0_2distw
def _cmeans0_2distw(distance1, u_old, c, m, *para):
# the kth for each cluster
k = para[0]
distance2 = para[1]
w = para[2]
# Normalizing, then eliminating any potential zero values.
u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
u_old = np.fmax(u_old, np.finfo(np.float64).eps)
um = u_old ** m
# remain the belonging rate >= the k-th max location of each cluster in um_c
filter_k = lambda row:row >= sorted(row, reverse=True)[k-1]
large_k_indices = np.apply_along_axis(filter_k, axis=1, arr=um)
# Calculate the average distance from entity to cluster
d1 = large_k_indices.dot(distance1) / np.ones((distance1.shape[1],1)).dot(np.atleast_2d(large_k_indices.sum(axis=1))).T
d1 = d1 / np.std(d1)
#print("d1:", d1[0:3, 0:5], " max:", np.amax(d1), " min:", np.amin(d1))
# Get the distance from data2
d2 = large_k_indices.dot(distance2) / np.ones((distance2.shape[1],1)).dot(np.atleast_2d(large_k_indices.sum(axis=1))).T
d2 = d2 / np.std(d2)
#print("d2:", d2[0:3, 0:5], " max:", np.amax(d2), " min:", np.amin(d2))
d = w * d1 + (1 - w) * d2
#print("d:", d[0:3, 0:5], " max:", np.amax(d), " min:", np.amin(d))
d = np.fmax(d, np.finfo(np.float64).eps)
jm = (um * d ** 2).sum()
u = d ** (- 2. / (m - 1))
u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
return u, jm, d
开发者ID:amy22292003,项目名称:Liu,代码行数:35,代码来源:_cmeans_nocenter.py
示例12: centroid
def centroid(x, mfx):
"""
Defuzzification using centroid (`center of gravity`) method.
Parameters
----------
x : 1d array, length M
Independent variable
mfx : 1d array, length M
Fuzzy membership function
Returns
-------
u : 1d array, length M
Defuzzified result
See also
--------
skfuzzy.defuzzify.defuzz, skfuzzy.defuzzify.dcentroid
"""
'''
As we suppose linearity between each pair of points of x, we can calculate
the exact area of the figure (a triangle or a rectangle).
'''
sum_moment_area = 0.0
sum_area = 0.0
# If the membership function is a singleton fuzzy set:
if len(x) == 1:
return x[0]*mfx[0] / np.fmax(mfx[0], np.finfo(float).eps).astype(float)
# else return the sum of moment*area/sum of area
for i in range(1, len(x)):
x1 = x[i - 1]
x2 = x[i]
y1 = mfx[i - 1]
y2 = mfx[i]
# if y1 == y2 == 0.0 or x1==x2: --> rectangle of zero height or width
if not(y1 == y2 == 0.0 or x1 == x2):
if y1 == y2: # rectangle
moment = 0.5 * (x1 + x2)
area = (x2 - x1) * y1
elif y1 == 0.0 and y2 != 0.0: # triangle, height y2
moment = 2.0 / 3.0 * (x2-x1) + x1
area = 0.5 * (x2 - x1) * y2
elif y2 == 0.0 and y1 != 0.0: # triangle, height y1
moment = 1.0 / 3.0 * (x2 - x1) + x1
area = 0.5 * (x2 - x1) * y1
else:
moment = (2.0 / 3.0 * (x2-x1) * (y2 + 0.5*y1)) / (y1+y2) + x1
area = 0.5 * (x2 - x1) * (y1 + y2)
sum_moment_area += moment * area
sum_area += area
return sum_moment_area / np.fmax(sum_area,
np.finfo(float).eps).astype(float)
开发者ID:boton-rojo-ml,项目名称:scikit-fuzzy,代码行数:60,代码来源:defuzz.py
示例13: _cmeans_predict0
def _cmeans_predict0(test_data, cntr, u_old, c, m):
"""
Single step in fuzzy c-means prediction algorithm. Clustering algorithm
modified from Ross, Fuzzy Logic w/Engineering Applications (2010)
p.352-353, equations 10.28 - 10.35, but this method to generate fuzzy
predictions was independently derived by Josh Warner.
Parameters inherited from cmeans()
Very similar to initial clustering, except `cntr` is not updated, thus
the new test data are forced into known (trained) clusters.
"""
# Normalizing, then eliminating any potential zero values.
u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
u_old = np.fmax(u_old, np.finfo(float).eps)
um = u_old ** m
test_data = test_data.T
# For prediction, we do not recalculate cluster centers. The test_data is
# forced to conform to the prior clustering.
d = _distance(test_data, cntr)
d = np.fmax(d, np.finfo(float).eps)
jm = (um * d ** 2).sum()
u = d ** (- 2. / (m - 1))
u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
return u, jm, d
开发者ID:timesofbadri,项目名称:scikit-fuzzy,代码行数:32,代码来源:_cmeans.py
示例14: _cmeans0
def _cmeans0(data, u_old, c, m, metric):
"""
Single step in generic fuzzy c-means clustering algorithm.
Modified from Ross, Fuzzy Logic w/Engineering Applications (2010),
pages 352-353, equations 10.28 - 10.35.
Parameters inherited from cmeans()
"""
# Normalizing, then eliminating any potential zero values.
u_old = normalize_columns(u_old)
u_old = np.fmax(u_old, np.finfo(np.float64).eps)
um = u_old ** m
# Calculate cluster centers
data = data.T
cntr = um.dot(data) / np.atleast_2d(um.sum(axis=1)).T
d = _distance(data, cntr, metric)
d = np.fmax(d, np.finfo(np.float64).eps)
jm = (um * d ** 2).sum()
u = normalize_power_columns(d, - 2. / (m - 1))
return cntr, u, jm, d
开发者ID:JDWarner,项目名称:scikit-fuzzy,代码行数:27,代码来源:_cmeans.py
示例15: parker
def parker(rbar,C,vbar_guess):
tol = 1.0e-10
# handle bifurcation point at r = r_c
if rbar > 1.0:
vbar = np.fmax(vbar_guess,1.0+tol)
else:
# also can't have vbar = 0 (log (0) = bad)
vbar = np.fmax(vbar_guess,tol)
print vbar
it = 0
while parker_err(vbar,rbar,C) > tol:
dvbar = - parker_f(vbar,rbar,C) / \
parker_dfdvbar(vbar)
# Limit changes of vbar to be no larger
# than 20% per iteration step.
# This turns out to be neccessary to
# keep the solution from wandering off into
# bad territory (like vbar < 0).
fac1 = np.fmin(0.2/np.abs(dvbar/vbar),1.0)
vbar = vbar + fac1 * dvbar
# debug output
# print it, fac1, parker_f(vbar,rbar,C), parker_err(vbar,rbar,C), vbar
it = it+1
return vbar
开发者ID:pointofnoreturn,项目名称:astro_work,代码行数:29,代码来源:parker.py
示例16: _cmeans0
def _cmeans0(data, u_old, c, m):
"""
Single step in generic fuzzy c-means clustering algorithm. Modified from
Ross, Fuzzy Logic w/Engineering Applications (2010) p.352-353, equations
10.28 - 10.35.
Parameters inherited from cmeans()
This algorithm is a ripe target for Cython.
"""
# Normalizing, then eliminating any potential zero values.
u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
u_old = np.fmax(u_old, np.finfo(float).eps)
um = u_old ** m
# Calculate cluster centers
data = data.T
cntr = um.dot(data) / (np.ones((data.shape[1],
1)).dot(np.atleast_2d(um.sum(axis=1))).T)
d = _distance(data, cntr)
d = np.fmax(d, np.finfo(float).eps)
jm = (um * d ** 2).sum()
u = d ** (- 2. / (m - 1))
u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
return cntr, u, jm, d
开发者ID:timesofbadri,项目名称:scikit-fuzzy,代码行数:31,代码来源:_cmeans.py
示例17: _get_ln_y_ref
def _get_ln_y_ref(self, rup, dists, C):
"""
Get an intensity on a reference soil.
Implements eq. 13a.
"""
# reverse faulting flag
Frv = 1. if 30 <= rup.rake <= 150 else 0.
# normal faulting flag
Fnm = 1. if -120 <= rup.rake <= -60 else 0.
# hanging wall flag
Fhw = np.zeros_like(dists.rx)
idx = np.nonzero(dists.rx >= 0.)
Fhw[idx] = 1.
# a part in eq. 11
mag_test1 = np.cosh(2. * max(rup.mag - 4.5, 0))
# centered DPP
centered_dpp = self._get_centered_cdpp(dists)
# centered_ztor
centered_ztor = self._get_centered_ztor(rup, Frv)
#
ln_y_ref = (
# first part of eq. 11
C['c1']
+ (C['c1a'] + C['c1c'] / mag_test1) * Frv
+ (C['c1b'] + C['c1d'] / mag_test1) * Fnm
+ (C['c7'] + C['c7b'] / mag_test1) * centered_ztor
+ (C['c11'] + C['c11b'] / mag_test1) *
np.cos(math.radians(rup.dip)) ** 2
# second part
+ C['c2'] * (rup.mag - 6)
+ ((C['c2'] - C['c3']) / C['cn'])
* np.log(1 + np.exp(C['cn'] * (C['cm'] - rup.mag)))
# third part
+ C['c4']
* np.log(dists.rrup + C['c5']
* np.cosh(C['c6'] * max(rup.mag - C['chm'], 0)))
+ (C['c4a'] - C['c4'])
* np.log(np.sqrt(dists.rrup ** 2 + C['crb'] ** 2))
# forth part
+ (C['cg1'] + C['cg2'] / (np.cosh(max(rup.mag - C['cg3'], 0))))
* dists.rrup
# fifth part
+ C['c8'] * np.fmax(1 - (np.fmax(dists.rrup - 40,
np.zeros_like(dists)) / 30.),
np.zeros_like(dists))[0]
* min(max(rup.mag - 5.5, 0) / 0.8, 1.0)
* np.exp(-1 * C['c8a'] * (rup.mag - C['c8b']) ** 2) * centered_dpp
# sixth part
+ C['c9'] * Fhw * np.cos(math.radians(rup.dip)) *
(C['c9a'] + (1 - C['c9a']) * np.tanh(dists.rx / C['c9b']))
* (1 - np.sqrt(dists.rjb ** 2 + rup.ztor ** 2)
/ (dists.rrup + 1.0))
)
return ln_y_ref
开发者ID:STRES-T,项目名称:oq-hazardlib,代码行数:60,代码来源:chiou_youngs_2014.py
示例18: test_fmax
def test_fmax(self):
from numpy import fmax, array
nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf')
a = array((complex(ninf, 10), complex(10, ninf),
complex( inf, 10), complex(10, inf),
5+5j, 5-5j, -5+5j, -5-5j,
0+5j, 0-5j, 5, -5,
complex(nan, 0), complex(0, nan)), dtype = complex)
b = [ninf]*a.size
res = [a[0 ], a[1 ], a[2 ], a[3 ],
a[4 ], a[5 ], a[6 ], a[7 ],
a[8 ], a[9 ], a[10], a[11],
b[12], b[13]]
assert (fmax(a, b) == res).all()
b = [inf]*a.size
res = [b[0 ], b[1 ], a[2 ], b[3 ],
b[4 ], b[5 ], b[6 ], b[7 ],
b[8 ], b[9 ], b[10], b[11],
b[12], b[13]]
assert (fmax(a, b) == res).all()
b = [0]*a.size
res = [b[0 ], a[1 ], a[2 ], a[3 ],
a[4 ], a[5 ], b[6 ], b[7 ],
a[8 ], b[9 ], a[10], b[11],
b[12], b[13]]
assert (fmax(a, b) == res).all()
开发者ID:abhinavthomas,项目名称:pypy,代码行数:26,代码来源:test_complex.py
示例19: test_half_ufuncs
def test_half_ufuncs(self):
"""Test the various ufuncs"""
a = np.array([0, 1, 2, 4, 2], dtype=float16)
b = np.array([-2, 5, 1, 4, 3], dtype=float16)
c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
assert_equal(np.equal(a, b), [False, False, False, True, False])
assert_equal(np.not_equal(a, b), [True, True, True, False, True])
assert_equal(np.less(a, b), [False, True, False, False, True])
assert_equal(np.less_equal(a, b), [False, True, False, True, True])
assert_equal(np.greater(a, b), [True, False, True, False, False])
assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
assert_equal(np.logical_and(a, b), [False, True, True, True, True])
assert_equal(np.logical_or(a, b), [True, True, True, True, True])
assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
assert_equal(np.logical_not(a), [True, False, False, False, False])
assert_equal(np.isnan(c), [False, False, False, True, False])
assert_equal(np.isinf(c), [False, False, True, False, False])
assert_equal(np.isfinite(c), [True, True, False, False, True])
assert_equal(np.signbit(b), [True, False, False, False, False])
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
x = np.maximum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
x = np.minimum(b, c)
assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
assert_equal(np.square(b), [4, 25, 1, 16, 9])
assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
assert_equal(np.conjugate(b), b)
assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
assert_equal(np.negative(b), [2, -5, -1, -4, -3])
assert_equal(np.positive(b), b)
assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
开发者ID:AlerzDev,项目名称:Brazo-Proyecto-Final,代码行数:59,代码来源:test_half.py
示例20: _cmeans0_kth
def _cmeans0_kth(data1, similarity2, u_old, c, w, m, *para):
"""
Single step in generic fuzzy c-means clustering algorithm.
data2 is for intersect counting
"""
k = para[0]
# Normalizing, then eliminating any potential zero values.
u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
u_old = np.fmax(u_old, np.finfo(np.float64).eps)
um = u_old ** m
# calculating u_c
u_c = u_old / u_old.sum(axis=1)[:,None]
# remain the belonging rate >= the k-th max location of each cluster in um_c
filter_k = lambda row:row < sorted(row, reverse=True)[k-1]
fail_indices = np.apply_along_axis(filter_k, axis=1, arr=u_c)
um[fail_indices] = 0
# Calculate cluster centers
# data1:2861,2; um:30,2861
d1 = 0
if data1 is not None:
data1 = data1.T
cntr1 = um.dot(data1) / (np.ones((data1.shape[1],
1)).dot(np.atleast_2d(um.sum(axis=1))).T)
d1 = _distance(data1, cntr1) # euclidean distance
#print("b4-- d1:", d1[0:5,0],d1[0:5,1])
#print(" min d1:", np.min(d1), ";max d1:", np.max(d1), " std 1:", np.std(d1))
d1 = d1 / np.std(d1)
# data2
d2 = 0
if similarity2 is not None:
#print("similarity2", similarity2[0:5,0])
#d2 = um_c.dot(similarity2)
d2 = um.dot(1 - similarity2) / np.ones((similarity2.shape[1],1)).dot(np.atleast_2d(um.sum(axis=1))).T
#print("b4--d2:", d2[0:5,0],d2[0:5,1])
#print(" d2.shape", d2.shape, ",std2:", np.std(d2))
d2 = d2 / np.std(d2)
# combined distance and similarity of two data
d = w * d1 + (1-w) * d2
#print("-- d1:", d1[0:6,0],d1[0:6,1], " \n ,d2:", d2[0:6,0],d2[0:6,1],d2[0:6,2], " \n ,d:", d[0:5,0],d[0:5,1])
#print(" min d1:", np.min(d1), ";max d1:", np.max(d1), ",max d2:", np.max(d2))
#print(" std 1:", np.std(d1), " ,2:", np.std(d2), " ,d:", np.std(d))
d = np.fmax(d, np.finfo(np.float64).eps)
jm = (um * d ** 2).sum()
u = d ** (- 2. / (m - 1))
#print("end u.sum:", u.sum(axis=0), u.sum(axis=1), "\nu[:,0]:", u[:,0])
#print("/:", np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0))))
u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
#print("end u.sum:", u.sum(axis=0), u.sum(axis=1), "\nu:", u[:,0])
return cntr1, u, jm, d1, d2, d
开发者ID:amy22292003,项目名称:Liu,代码行数:59,代码来源:_cmeans_intersect.py
注:本文中的numpy.fmax函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论