本文整理汇总了Python中scipy.repeat函数的典型用法代码示例。如果您正苦于以下问题:Python repeat函数的具体用法?Python repeat怎么用?Python repeat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了repeat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: plot_disc_policy
def plot_disc_policy():
#First compute policy function...==========================================
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
psiprime = sp.ones((N,1))
gamma = 0.1
beta = 0.9
m = 15
tol = 10**-9
delta = 1+tol
it = 0
while (delta >= tol):
it += 1
psi = psiprime.copy()
arg1 = sp.repeat(sp.transpose(VEprime),N,0)
arg2 = sp.repeat(EVUprime,N,1)
arg = sp.array([arg2,arg1])
psiprime = sp.argmax(arg,axis = 0)
for j in sp.arange(0,m):
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
arg = arg1+arg2
VUprime = alpha_util_grid + beta*arg
EVUprime = sp.dot(VUprime,f)
delta = sp.linalg.norm(psiprime -psi)
wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
wr = w[wr_ind]
print w[250],wr[250]
#Then plot=================================================================
plt.plot(w,psiprime[250,:])
plt.ylim([-.5,1.5])
plt.xlabel(r'$w\prime$')
plt.yticks([0,1])
plt.savefig('disc_policy.pdf')
开发者ID:byuimpactrevisions,项目名称:numerical_computing,代码行数:60,代码来源:job_plots.py
示例2: _init_arrays
def _init_arrays(self):
self.D = sp.repeat(self.u_gnd_l.D, self.N + 2)
self.q = sp.repeat(self.u_gnd_l.q, self.N + 2)
#Make indicies correspond to the thesis
#Deliberately add a None to the end to catch [-1] indexing!
self.K = sp.empty((self.N + 3), dtype=sp.ndarray) #Elements 1..N
self.C = sp.empty((self.N + 2), dtype=sp.ndarray) #Elements 1..N-1
self.A = sp.empty((self.N + 3), dtype=sp.ndarray) #Elements 1..N
self.r = sp.empty((self.N + 3), dtype=sp.ndarray) #Elements 0..N
self.l = sp.empty((self.N + 3), dtype=sp.ndarray)
self.eta = sp.zeros((self.N + 1), dtype=self.typ)
if (self.D.ndim != 1) or (self.q.ndim != 1):
raise NameError('D and q must be 1-dimensional!')
#Don't do anything pointless
self.D[0] = self.u_gnd_l.D
self.D[self.N + 1] = self.u_gnd_l.D
self.l[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
self.r[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
self.K[0] = sp.zeros((self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
self.C[0] = sp.empty((self.q[0], self.q[1], self.D[0], self.D[1]), dtype=self.typ, order=self.odr)
self.A[0] = sp.empty((self.q[0], self.D[0], self.D[0]), dtype=self.typ, order=self.odr)
for n in xrange(1, self.N + 2):
self.K[n] = sp.zeros((self.D[n-1], self.D[n-1]), dtype=self.typ, order=self.odr)
self.r[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
self.l[n] = sp.zeros((self.D[n], self.D[n]), dtype=self.typ, order=self.odr)
self.A[n] = sp.empty((self.q[n], self.D[n-1], self.D[n]), dtype=self.typ, order=self.odr)
if n < self.N + 1:
self.C[n] = sp.empty((self.q[n], self.q[n+1], self.D[n-1], self.D[n+1]), dtype=self.typ, order=self.odr)
开发者ID:bcriger,项目名称:evoMPS,代码行数:35,代码来源:tdvp_sandwich.py
示例3: Problem6Real
def Problem6Real():
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
psiprime = sp.ones((N,1))
gamma = 0.1
beta = 0.9
m = 15
tol = 10**-9
delta = 1+tol
it = 0
while (delta >= tol):
it += 1
psi = psiprime.copy()
arg1 = sp.repeat(sp.transpose(VEprime),N,0)
arg2 = sp.repeat(EVUprime,N,1)
arg = sp.array([arg2,arg1])
psiprime = sp.argmax(arg,axis = 0)
for j in sp.arange(0,m):
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
arg = arg1+arg2
VUprime = alpha_util_grid + beta*arg
EVUprime = sp.dot(VUprime,f)
delta = sp.linalg.norm(psiprime -psi)
#print(delta)
wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
wr = w[wr_ind]
plt.plot(w,wr)
plt.show()
return wr
开发者ID:davidreber,项目名称:Labs,代码行数:55,代码来源:solutionstester.py
示例4: __init__
def __init__(self, N, uni_ground):
self.odr = 'C'
self.typ = sp.complex128
self.zero_tol = sp.finfo(self.typ).resolution
"""Tolerance for detecting zeros. This is used when (pseudo-) inverting
l and r."""
self._sanity_checks = False
self.N = N
"""The number of sites. Do not change after initializing."""
self.N_centre = N / 2
"""The 'centre' site. This affects the gauge-fixing and canonical
form. It is the site between the left-gauge parts and the
right-gauge parts."""
self.D = sp.repeat(uni_ground.D, self.N + 2)
"""Vector containing the bond-dimensions. A[n] is a
q[n] x D[n - 1] x D[n] tensor."""
self.q = sp.repeat(uni_ground.q, self.N + 2)
"""Vector containing the site Hilbert space dimensions. A[n] is a
q[n] x D[n - 1] x D[n] tensor."""
self.S_hc = sp.repeat(sp.NaN, self.N + 1)
"""Vector containing the von Neumann entropy S_hc[n] corresponding to
splitting the state between sites n and n + 1. Available only
after performing update(restore_CF=True) or restore_CF()."""
self.uni_l = copy.deepcopy(uni_ground)
self.uni_l.symm_gauge = False
self.uni_l.sanity_checks = self.sanity_checks
self.uni_l.update()
self.uni_r = copy.deepcopy(uni_ground)
self.uni_r.sanity_checks = self.sanity_checks
self.uni_r.symm_gauge = False
self.uni_r.update()
self.grown_left = 0
self.grown_right = 0
self.shrunk_left = 0
self.shrunk_right = 0
self._init_arrays()
for n in xrange(self.N + 2):
self.A[n][:] = self.uni_l.A
self.r[self.N] = self.uni_r.r
self.r[self.N + 1] = self.r[self.N]
self.l[0] = self.uni_l.l
开发者ID:fgrosshans,项目名称:evoMPS,代码行数:54,代码来源:mps_sandwich.py
示例5: adj_loglikelihood_gradient
def adj_loglikelihood_gradient(xVec, lenSampleRibo, lenSampleRna, X, y, mu, sign):
disp = sp.hstack([sp.repeat(xVec[0], lenSampleRibo), sp.repeat(xVec[1], lenSampleRna)])
Gradient = sp.zeros_like(xVec)
for i in range(len(xVec)):
f1 = (digamma((1 / xVec[i])) - digamma( (1 / xVec[i]) + y[i])) / (xVec[i] ** 2)
f2 = -((xVec[i] * mu[i] + (1 + xVec[i] * mu[i]) * sp.log(1 / (1 + xVec[i] * mu[i]))) / ((xVec[i] ** 2) * (1 + xVec[i] * mu[i])))
f3 = y[i] / (xVec[i] + (xVec[i] ** 2) * mu[i])
f4 = 0.5 * X.shape[1] * (mu[i] / (1 + sp.dot(mu.transpose(), disp)))
Gradient[i] = f1 + f2 + f3 + f4
return Gradient
开发者ID:ratschlab,项目名称:spladder,代码行数:13,代码来源:likelihood.py
示例6: ausw_poly2
def ausw_poly2(a,x):
""" ausw_poly berechnet den Funktionswert von
p(x)=a_1 +a_2 x + a_3 x^2+ ... +a^n x^(n-1)
INPUT: a Vektor der Koeffizienten
x Vektor der auszuwertenden Punkte
OUTPUT: y Vektor der Funktionswerte (y=p(x))"""
n = len(a)
k = len(x)
xm = np.array([x])
A = sp.repeat(xm.T, n,1)
B = sp.repeat(np.array([range(0,n)]), k,0)
return dot(A**B,a)
开发者ID:laevar,项目名称:mapy,代码行数:13,代码来源:poly.py
示例7: __init__
def __init__(self, N, uni_ground):
self.odr = 'C'
self.typ = sp.complex128
self.zero_tol = sp.finfo(self.typ).resolution
"""Tolerance for detecting zeros. This is used when (pseudo-) inverting
l and r."""
self._sanity_checks = False
self.N = N
"""The number of sites. Do not change after initializing."""
self.N_centre = N / 2
"""The 'centre' site. This affects the gauge-fixing and canonical
form. It is the site between the left-gauge parts and the
right-gauge parts."""
self.D = sp.repeat(uni_ground.D, self.N + 2)
"""Vector containing the bond-dimensions. A[n] is a
q[n] x D[n - 1] x D[n] tensor."""
self.q = sp.repeat(uni_ground.q, self.N + 2)
"""Vector containing the site Hilbert space dimensions. A[n] is a
q[n] x D[n - 1] x D[n] tensor."""
self.uni_l = copy.deepcopy(uni_ground)
self.uni_l.symm_gauge = True
self.uni_l.sanity_checks = self.sanity_checks
self.uni_l.update()
if not N % self.uni_l.L == 0:
print "Warning: Length of nonuniform window is not a multiple of the uniform block size."
self.uni_r = copy.deepcopy(self.uni_l)
self.grown_left = 0
self.grown_right = 0
self.shrunk_left = 0
self.shrunk_right = 0
self._init_arrays()
for n in xrange(1, self.N + 1):
self.A[n][:] = self.uni_l.A[(n - 1) % self.uni_l.L]
for n in xrange(self.N + 2):
self.r[n][:] = sp.asarray(self.uni_l.r[(n - 1) % self.uni_l.L])
self.l[n][:] = sp.asarray(self.uni_l.l[(n - 1) % self.uni_l.L])
开发者ID:hariseldon99,项目名称:evoMPS,代码行数:49,代码来源:mps_sandwich.py
示例8: MakeTestIonoclass
def MakeTestIonoclass(testv=False,testtemp=False,N_0=1e11,z_0=250.0,H_0=50.0,coords=None,times =sp.array([[0,1e6]])):
""" This function will create a test ionoclass with an electron density that
follows a chapman function"""
if coords is None:
xvec = sp.arange(-250.0,250.0,20.0)
yvec = sp.arange(-250.0,250.0,20.0)
zvec = sp.arange(50.0,900.0,2.0)
# Mesh grid is set up in this way to allow for use in MATLAB with a simple reshape command
xx,zz,yy = sp.meshgrid(xvec,zvec,yvec)
coords = sp.zeros((xx.size,3))
coords[:,0] = xx.flatten()
coords[:,1] = yy.flatten()
coords[:,2] = zz.flatten()
zzf=zz.flatten()
else:
zzf = coords[:,2]
# H_0 = 50.0 #km scale height
# z_0 = 250.0 #km
# N_0 = 10**11
# Make electron density
Ne_profile = Chapmanfunc(zzf,H_0,z_0,N_0)
# Make temperture background
if testtemp:
(Te,Ti)= TempProfile(zzf)
else:
Te = np.ones_like(zzf)*2000.0
Ti = np.ones_like(zzf)*2000.0
# set up the velocity
(Nlocs,ndims) = coords.shape
Ntime= len(times)
vel = sp.zeros((Nlocs,Ntime,ndims))
if testv:
vel[:,:,2] = sp.repeat(zzf[:,sp.newaxis],Ntime,axis=1)/5.0
species=['O+','e-']
# put the parameters in order
params = sp.zeros((Ne_profile.size,len(times),2,2))
params[:,:,0,1] = sp.repeat(Ti[:,sp.newaxis],Ntime,axis=1)
params[:,:,1,1] = sp.repeat(Te[:,sp.newaxis],Ntime,axis=1)
params[:,:,0,0] = sp.repeat(Ne_profile[:,sp.newaxis],Ntime,axis=1)
params[:,:,1,0] = sp.repeat(Ne_profile[:,sp.newaxis],Ntime,axis=1)
Icont1 = IonoContainer(coordlist=coords,paramlist=params,times = times,sensor_loc = sp.zeros(3),ver =0,coordvecs =
['x','y','z'],paramnames=None,species=species,velocity=vel)
return Icont1
开发者ID:hhuangmeso,项目名称:RadarDataSim,代码行数:48,代码来源:IonoContainer.py
示例9: ecdf
def ecdf(data,weighted=False,alpha=0.05):
"""
Given an array and an alpha, returns
(x,p,a_n)
x and p are arrays, where x are values in the sample space and p
is the corresponding cdf value.
a_n is the margin of error according to the DKWM theorem using
the supplied value of alpha. Interpreted, this means:
P( {|cdf(x) - ecdf(x)| > a_n} ) <= alpha
If weighted=True, then data should be a N-by-2 matrix,
where each row contains
(data_pt, weight)
where weight is as defined in the Horvitz-Thompson estimate.
"""
cdf = {}
if not weighted:
# give all elements weight of 1
data = concatenate( (data.reshape(len(data),1),ones((len(data),1))), axis=1 )
def helper((x,w)):
cdf[x] = cdf.get(x,0.0) + w
print " Uniqueifying..."
map(helper,data)
# data now has unique values
print " Arraying..."
data = array(cdf.items())
print " Weighting..."
w_total = data[:,1].sum()
print " Sorting..."
sort_order = data[:,0].argsort()
sorted = data[sort_order]
print " Summing..."
ret_x = repeat(sorted[:,0],2)
ret_p = concatenate(( [0.0],
repeat(1./w_total * cumsum(sorted[:-1,1]),2),
[1.0] ))
a_n = sqrt( 1./(2*w_total) * log(2./alpha) )
return ret_x,ret_p,a_n
开发者ID:cbick,项目名称:gps2gtfs,代码行数:48,代码来源:Stats.py
示例10: ddpsimul
def ddpsimul(pstar, s, N, x):
""" Monte-Carlo simulation of discrete-state/action controlled Markov process
Parameters
-------------
pstar : array, shape (n, n) or (n, n, T)
Optimal state transition matrix. Usually returned by one of the methods of
`Dpsolve`. The array has shape (n, n) for infinite horizon processes,
and (n, n, T) for finite horizon processes.
s : array, shape (k, )
Initial states
N : int
Number of simulations
x : array, shape (n, ) or (n, T)
Optimal controls
Returns
---------
spath : array, shape (k, N + 1)
Simulated states
"""
infinite = (len(pstar.shape) == 2)
n = pstar.shape[1]
k = len(s)
spath = sp.zeros((k, N+1), int)
if infinite:
## Row cumulative sum
cp = pstar.cumsum(1)
spath[:, 0] = s
for t in range(1, N + 1):
## Draws the column from a categorical distribution
rdraw = random.rand(k, 1)
s = (sp.repeat(rdraw, n, 1) > cp[s, ]).sum(1)
spath[:, t] = s
else:
T = pstar.shape[2]
if N > T:
print("Simulations greater than the time horizon are ignored.")
N = min(N, T)
spath[:, 0] = s
for t in range(N + 1):
cp = pstar[...,t].cumsum(1)
rdraw = random.rand(k, 1)
s = (sp.repeat(rdraw, n, 1) > cp[s, ]).sum(1)
xpath = x[spath]
return (spath, xpath)
开发者ID:jrnold,项目名称:psc585,代码行数:48,代码来源:dp.py
示例11: __init__
def __init__(self, covars, names=[], *args, **kw_args):
#1. check that all covars are covariance functions
#2. get number of params
super(SumCF, self).__init__(*args, **kw_args)
self.n_params_list = []
self.covars = []
self.covars_theta_I = []
self.covars_covar_I = []
self.covars = covars
if names and len(names) == len(self.covars):
self.names = names
elif names:
self.names = []
print "names provided, but shapes not matching (names:{}) (covars:{})".format(len(names), len(covars))
else:
self.names = []
i = 0
for nc in xrange(len(covars)):
covar = covars[nc]
assert isinstance(covar, CovarianceFunction), 'SumCF: SumCF is constructed from a list of covaraince functions'
Nparam = covar.get_number_of_parameters()
self.n_params_list.append(Nparam)
self.covars_theta_I.append(sp.arange(i, i + covar.get_number_of_parameters()))
self.covars_covar_I.extend(sp.repeat(nc, Nparam))
i += covar.get_number_of_parameters()
self.n_params_list = sp.array(self.n_params_list)
self.n_hyperparameters = self.n_params_list.sum()
开发者ID:sg3510,项目名称:home-automation-yr3proj,代码行数:31,代码来源:combinators.py
示例12: make_batches
def make_batches(data, labels=None, batch_size=100):
if labels is not None:
num_labels = labels.shape[1]
cls_data = [data[find(labels[:,i] == 1)] for i in range(num_labels)]
cls_sizes = [d.shape[0] for d in cls_data]
cls_sels = [permutation(range(s)) for s in cls_sizes]
n = min(cls_sizes) * len(cls_sizes)
batch_size = min(n, batch_size)
lpb = batch_size / num_labels
new_dat = []
for i in range(n/batch_size):
for sel, cd in zip(cls_sels, cls_data):
new_dat.append(cd[sel[i*lpb:(i+1)*lpb]])
if sparse.issparse(data):
data = sparse.vstack(new_dat).tocsr()
else:
data = np.vstack(new_dat)
labels = np.tile(np.repeat(np.eye(num_labels),lpb,0), (n/batch_size,1))
n = len(labels)
perm = range(n)
else:
n = data.shape[0]
perm = permutation(range(n))
i = 0
while i < n:
batch = perm[i:i+batch_size]
i += batch_size
yield (data[batch], None) if labels is None else (data[batch], labels[batch])
开发者ID:ageek,项目名称:sandbox,代码行数:28,代码来源:trainer.py
示例13: make_y0
def make_y0(model):
""" Make y0 """
def mu_ij(i, j):
return -sp.sqrt(uij[j, i] + (model.c / (1 - model.p[j]))
- (1 - model.d) * ubar[j]
- model.d * v0[j])
# \bar{u} : status quo payoffs
ubar = -(model.ideals ** 2).sum(1) + model.K
# TODO: where did plus 10 come from?
uij = (-(model.ideals[:, 0] - model.ideals[:, 0][:, sp.newaxis])**2 +
-(model.ideals[:, 1] - model.ideals[:, 1][:, sp.newaxis])**2 + model.K)
# v_0
v0 = (uij * model.p[:, sp.newaxis]).sum(1) + model.c
## \lambda_0
lam0 = sp.ones((5, 6)) * -sp.sqrt(model.c)
# if m_i = i
lam0[sp.r_[0:5], sp.r_[0:5]] = 1
lam0 = reshape(lam0, (lam0.size, ))
# x_0
x0 = sp.reshape(sp.repeat(model.ideals, 6, axis=0), (60, ))
# \mu_0
mu0 = sp.zeros((5, 6, 2))
# For players
for i in range(5):
# For coalitions
for mi in range(6):
# for each other player in the coalition
ii = i * 6 + mi
mu0[i, mi, 0] = mu_ij(i, model.part1[ii])
mu0[i, mi, 1] = mu_ij(i, model.part2[ii])
mu0 = sp.ravel(mu0)
# y_0
y0 = sp.concatenate((v0, lam0, x0, mu0))
return y0
开发者ID:jrnold,项目名称:psc585,代码行数:35,代码来源:ps3.py
示例14: globs
def globs(globs):
# setup mock urllib2 module to avoid downloading from mldata.org
mock_datasets = {
'mnist-original': {
'data': sp.empty((70000, 784)),
'label': sp.repeat(sp.arange(10, dtype='d'), 7000),
},
'iris': {
'data': sp.empty((150, 4)),
},
'datasets-uci-iris': {
'double0': sp.empty((150, 4)),
'class': sp.empty((150,)),
},
}
global custom_data_home
custom_data_home = tempfile.mkdtemp()
makedirs(join(custom_data_home, 'mldata'))
globs['custom_data_home'] = custom_data_home
global _urllib2_ref
_urllib2_ref = datasets.mldata.urllib2
globs['_urllib2_ref'] = _urllib2_ref
globs['mock_urllib2'] = mock_urllib2(mock_datasets)
return globs
开发者ID:Yangqing,项目名称:scikit-learn,代码行数:26,代码来源:mldata_fixture.py
示例15: getBrownianIncrement
def getBrownianIncrement(self,N):
K = self.param['nb_grid']
M = N/K
dW = scipy.zeros((1,M))
dW[0,:]=self.rand.normal(loc=0.,scale=1.,size=M)
dWW = scipy.repeat(dW,K,axis=0)
return dWW.flatten()
开发者ID:pvnuffel,项目名称:riskmodel,代码行数:7,代码来源:inv_transform_double.py
示例16: ground_motion_sample
def ground_motion_sample(self, log_mean, log_sigma):
"""
Like .sample_for_eqrm() but adds spawn and recurrence_model dimensions.
log_mean, log_sigma: ndarray[gmm, site, event, period]. These
represent the mean and standard deviation of the predicted
spectral accelerations (indexed by period) at a site due to an
event, as calculated by the attenuation model indexed by
gmm. See the ground_motion_interface module for more
information.
Returns: ndarray[spawn, GMmodel, rec_model, site, event,
period] spectral accelerations, measured in G.
"""
assert log_mean.ndim == 4
if self.var_method == SPAWN:
s = self._spawn(log_mean, log_sigma)
else:
s = self.sample_for_eqrm(log_mean, log_sigma, self.var_in_last_axis)[newaxis, ...]
# monte_carlo has added and populated the recurrence model dimension
if self.var_method == RANDOM_SAMPLING:
return s
# Add the recurrence model dimension and "manually" broadcast
# it so that our caller doesn't have to treat this as a
# special case.
return repeat(s[:, :, newaxis, :, :, :],
self.n_recurrence_models,
2)
开发者ID:dynaryu,项目名称:eqrm,代码行数:32,代码来源:ground_motion_distribution.py
示例17: spindens
def spindens(self,lrgm_out):
from scipy import split,pi,complex128,zeros,repeat
number_of_lattice_points = self.canvas.shape[0]*self.canvas.shape[1]
number_of_nodes = len(self.tuple_canvas_coordinates)
if number_of_nodes == number_of_lattice_points:
if self.order == 'even':
Gup, Gdown = split(lrgm_out.reshape(self.canvas.shape[0],self.canvas.shape[1]*2),2,axis=1)
if self.order == 'odd':
Gup, Gdown = split(lrgm_out.reshape(-1,2),2,axis=1)
Gup, Gdown = Gup.reshape(self.canvas.shape), Gdown.reshape(self.canvas.shape)
else:
print "Please specify order of Nodes, i.e 'even' for allspinup-allspindown per sclice or odd for spinup-spindown-spinup-..."
Sz = self.p.upar.hbar/(4*pi*1j*self.p.upar.a**2)*(Gup-Gdown)
elif number_of_nodes < number_of_lattice_points:
Sz= zeros(self.canvas.shape,dtype=complex128)
print 'calculating spin density for sparse structure'
lrgm_out = self.p.hbar/(4*pi*1j*self.p.upar.a**2)*lrgm_out
expanded_array_of_coords = repeat(self.tuple_canvas_coordinates,2,axis=0)
for index,node_name in enumerate(self.nodelist):
if node_name % 2 == 0:
sign = 1
else:
sign = -1
Sz[tuple(expanded_array_of_coords[node_name])] += sign * lrgm_out[index]
else:
print 'Number of nodes larger than canvas, something is wrong!'
print 'max Spin Split: ', Sz.real.max()-Sz.real.min()
#Realteil scheint wahrscheinlicher, imag oszilliert wie bloed
return Sz.real
开发者ID:DrBones,项目名称:greentransport,代码行数:29,代码来源:operators.py
示例18: adj_loglikelihood
def adj_loglikelihood(xVec, lenSampleRibo, lenSampleRna, X, y, mu, sign):
disp = sp.hstack([sp.repeat(xVec[0], lenSampleRibo), sp.repeat(xVec[1], lenSampleRna)])
n = 1 / disp
p = n / (n + mu)
loglik = sum(nbinom.logpmf(y, n, p))
diagVec = mu / (1 + sp.dot(mu.transpose(), disp))
diagWM = sp.diagflat(diagVec)
xtwx = sp.dot(sp.dot(sp.transpose(X), diagWM), X)
coxreid = 0.5 * sp.log(sp.linalg.det(xtwx))
ret = (loglik - coxreid) * sign
#print "return value is " + str(ret)
if isinstance(ret, complex):
raise complexException()
return ret
开发者ID:ratschlab,项目名称:spladder,代码行数:17,代码来源:likelihood.py
示例19: Euclidean_DML
def Euclidean_DML(feat, M, query=None,
is_sparse=False, is_trans=False):
""" Euclidean distance with DML.
"""
(N, D) = feat.shape
dotprod = feat.dot(M).dot(feat.T)
l2norm = sp.repeat(dotprod.diagonal().reshape(1, -1), N, 0)
return l2norm + l2norm.T - 2 * dotprod
开发者ID:RowenaWong,项目名称:hdidx,代码行数:8,代码来源:distance.py
示例20: Problem5Real
def Problem5Real():
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
gamma = 0.1
beta = 0.9
tol = 10**-9
delta1 = 1+tol
delta2 = 1+tol
it = 0
while ((delta1 >= tol) or (delta2 >= tol)):
it += 1
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)
arg2 = sp.repeat(EVU,N,1)
arg = sp.array([arg2,arg1])
VUprime = alpha_util_grid + beta*sp.amax(arg,axis = 0)
psi = sp.argmax(arg,axis = 0)
EVUprime = sp.dot(VUprime,f)
delta1 = sp.linalg.norm(VEprime - VE)
delta2 = sp.linalg.norm(VUprime - VU)
#print(delta1)
wr_ind = sp.argmax(sp.diff(psi), axis = 1)
wr = w[wr_ind]
return wr
开发者ID:davidreber,项目名称:Labs,代码行数:45,代码来源:solutionstester.py
注:本文中的scipy.repeat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论