本文整理汇总了Python中scipy.copy函数的典型用法代码示例。如果您正苦于以下问题:Python copy函数的具体用法?Python copy怎么用?Python copy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了copy函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: initialize
def initialize(self,data,random=False):
self.data = data
self.n_dim = data.shape[1]
if random:
mins = sp.zeros(self.n_dim)
maxes = sp.zeros(self.n_dim)
sds = sp.zeros(self.n_dim)
centers = sp.zeros((self.n_components,self.n_dim))
for i in xrange(self.n_dim):
mins[i] = min(self.data[:,i])
maxes[i] = max(self.data[:,i])
sds[i] = sp.std(self.data[:,i])
centers[:,i] = sp.random.uniform(mins[i],maxes[i],self.n_components)
self.comp = sp.ones(self.n_components)/float(self.n_components) + sp.random.uniform(-1./self.n_components,1./self.n_components,self.n_components)
self.comp /= sp.sum(self.comp)
covars = sp.array([sp.diag(sds**2) for i in xrange(self.n_components)])
self.centers = centers
self.covars = covars
else:
clust = cluster.KMeans(self.n_components)
clust.fit(self.data)
self.centers = sp.copy(clust.cluster_centers_)
labels = sp.copy(clust.labels_)
self.covars = sp.zeros((self.n_components,self.n_dim,self.n_dim))
self.comp = sp.zeros(self.n_components)
for i in xrange(self.n_components):
inds = labels == i
temp = self.data[inds,:]
self.covars[i,:,:] = sp.dot(temp.T,temp)
self.comp[i] = sum(inds)/float(self.data.shape[0])
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:30,代码来源:gmm.py
示例2: NumpyTensorInitializerForVacancy
def NumpyTensorInitializerForVacancy(gridShape, filename, vacancyfile=None):
"""
Initialize a 10 component plasticity state by reading from a numpy "tofile" type file or two files.
"""
dict = {('x','x') : (0,0), ('x','y') : (0,1), ('x','z') : (0,2),\
('y','x') : (1,0), ('y','y') : (1,1), ('y','z') : (1,2),\
('z','x') : (2,0), ('z','y') : (2,1), ('z','z') : (2,2)}
data = fromfile(filename)
if vacancyfile is None:
data = data.reshape([10] + list(gridShape))
else:
data = data.reshape([3,3] + list(gridShape))
dataV = fromfile(vacancyfile)
dataV = dataV.reshape(list(gridShape))
state = VacancyState.VacancyState(gridShape)
field = state.GetOrderParameterField()
if vacancyfile is None:
i = 0
for component in field.components:
field[component] = copy(data[i])
i += 1
else:
for component in field.components:
if component[0] not in [x,y,z]:
field[component] = copy(dataV)
else:
field[component] = copy(data[dict[component]])
return state
开发者ID:mattbierbaum,项目名称:cuda-plasticity,代码行数:28,代码来源:FieldInitializer.py
示例3: __init__
def __init__(self,fitness_func,npop = 20,w = 0.5,c1 = 2.01,c2 = 2.02,debug = False):
seed()
self.debug = debug
self.c1 = c1
self.c2 = c2
self.w = w
self.ns = int(npop)
self.fitness_func = fitness_func
# gera pop inicial
if os.path.isfile("dump_pso.pkl"):
dump_fd = open("dump_pso.pkl",'r')
self.pop = cPickle.load(dump_fd)
self.fit = cPickle.load(dump_fd)
self.v = cPickle.load(dump_fd)
self.bfg = cPickle.load(dump_fd)
self.bfg_fitness = cPickle.load(dump_fd)
self.bfp = cPickle.load(dump_fd)
self.bfp_fitness = cPickle.load(dump_fd)
else:
self.pop = scipy.array([self.gera_individuo() for i in scipy.arange(self.ns)])
self.fit = scipy.zeros(self.ns)
# avalia fitness de toda populacao
for i in scipy.arange(self.ns):
self.fit[i],self.pop[i] = self.avalia_aptidao(self.pop[i])
# inicializa velocidades iniciais
self.v = scipy.zeros((self.ns,Dim))
# guarda a melhor posicao de cada particula
self.bfp = scipy.copy(self.pop)
self.bfp_fitness = scipy.copy(self.fit)
# guarda a melhor posicao global
self.bfg = self.pop[self.bfp_fitness.argmin()].copy()
self.bfg_fitness = self.bfp_fitness.min().copy()
开发者ID:mmssouza,项目名称:coevol,代码行数:32,代码来源:optimize.py
示例4: __init__
def __init__(self, field, system_dir, nprocs=4, **kwargs):
#
super().__init__()
#
# field attributes that are copied over
field.create_point_data()
self.nx = field.nx
self.nz = field.nz
self.data_vector = field.data_vector
self.data_map = field.data_map
self.point_data = field.point_data
self._field = field.clone()
self._mask = sp.ones(self.data_map.shape, dtype=bool)
#
self.offset_map = sp.zeros(self.data_map.shape)
self.offset_points = sp.zeros(self.point_data.shape)
if kwargs.get('offset_field', None):
kwargs['offset_field'].create_point_data()
self.offset_map = sp.copy(kwargs['offset_field'].data_map)
self.offset_points = sp.copy(kwargs['offset_field'].point_data)
#
self.system_dir = system_dir
self.nprocs = nprocs
self.avg_fact = kwargs.get('avg_fact', 1.0)
self.mesh_params = kwargs.get('mesh_params', {})
self.merge_groups = []
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:26,代码来源:__ParallelMeshGen__.py
示例5: _read_sky_logfile
def _read_sky_logfile(self):
#TODO : expand to read errors, msgs etc
# read in the whole sky log file, shouldn't be big
f = open(self.skylogfile)
lines = f.readlines()
f.close()
dust = [line.split()[1:] for line in lines if line.startswith('dtau_dust')]
line = [line.split()[1:] for line in lines if line.startswith('dtau_line')]
dust = _sp.array(dust, dtype='float')
line = _sp.array(line, dtype='float')
transitions = _sp.unique(dust[:,0])
shells = _sp.unique(dust[:,1])
dtau_dust = dict()
dtau_line = dict()
dtau_tot = dict()
for t in transitions:
d = []
l = []
for s in shells:
d.append( _sp.mean([i[2] for i in dust if ((i[0]==t) * (i[1]==s))]) )
l.append( _sp.mean([i[2] for i in line if ((i[0]==t) * (i[1]==s))]) )
dtau_dust[t] = _sp.copy(d)
dtau_line[t] = _sp.copy(l)
dtau_tot[t] = _sp.array(d) + _sp.array(l)
# create object to store in main class
class Tau(object):pass
Tau.dtau_dust = dtau_dust
Tau.dtau_line = dtau_line
Tau.dtau_tot = dtau_tot
Tau.transitions = transitions
Tau.shells = shells
self.Tau = Tau
开发者ID:vilhelmp,项目名称:ratran_python,代码行数:32,代码来源:ratout.py
示例6: __init__
def __init__(self,n_components,comp=None,centers=None,covars=None):
self.n_components = n_components
self.comp = sp.copy(comp)
self.centers = sp.copy(centers)
self.covars = sp.copy(covars)
if centers != None:
self.n_dim = centers.shape[1]
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:7,代码来源:gmm.py
示例7: copy_data
def copy_data(self, obj):
r"""
Copies data properites of the field onto another object created
"""
obj.nx = self.nx
obj.nz = self.nz
obj.data_map = sp.copy(self.data_map)
obj.data_vector = sp.copy(self.data_vector)
obj.point_data = sp.copy(self.point_data)
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:9,代码来源:__core__.py
示例8: wigner
def wigner(psi,xvec,yvec,g=sqrt(2)):
"""Wigner function for a state vector or density matrix
at points xvec+i*yvec.
Parameters
----------
state : qobj
A state vector or density matrix.
xvec : array_like
x-coordinates at which to calculate the Wigner function.
yvec : array_like
y-coordinates at which to calculate the Wigner function.
g : float
Scaling factor for a = 0.5*g*(x+iy), default g=sqrt(2).
Returns
--------
W : array
Values representing the Wigner function calculated over the specified range [xvec,yvec].
"""
if psi.type=='ket' or psi.type=='oper':
M=prod(psi.shape[0])
elif psi.type=='bra':
M=prod(psi.shape[1])
else:
raise TypeError('Input state is not a valid operator.')
X,Y = meshgrid(xvec, yvec)
amat = 0.5*g*(X + 1.0j*Y)
wmat=zeros(shape(amat))
Wlist=array([zeros(shape(amat),dtype=complex) for k in range(M)])
Wlist[0]=exp(-2.0*abs(amat)**2)/pi
if psi.type=='ket' or psi.type=='bra':
psi=ket2dm(psi)
wmat=real(psi[0,0])*real(Wlist[0])
for n in range(1,M):
Wlist[n]=(2.0*amat*Wlist[n-1])/sqrt(n)
wmat+= 2.0*real(psi[0,n]*Wlist[n])
for m in range(M-1):
temp=copy(Wlist[m+1])
Wlist[m+1]=(2.0*conj(amat)*temp-sqrt(m+1)*Wlist[m])/sqrt(m+1)
for n in range(m+1,M-1):
temp2=(2.0*amat*Wlist[n]-sqrt(m+1)*temp)/sqrt(n+1)
temp=copy(Wlist[n+1])
Wlist[n+1]=temp2
wmat+=real(psi[m+1,m+1]*Wlist[m+1])
for k in range(m+2,M):
wmat+=2.0*real(psi[m+1,k]*Wlist[k])
return 0.5*wmat*g**2
开发者ID:niazalikhan87,项目名称:qutip,代码行数:53,代码来源:wigner.py
示例9: clone
def clone(self):
r"""
Creates a fully qualified DataField object from the existing one.
"""
# instantiating class and adding attributes
clone = DataField(None)
#
self.copy_data(clone)
clone._raw_data = sp.copy(self._raw_data)
clone._cell_interfaces = sp.copy(self._cell_interfaces)
#
return clone
开发者ID:stadelmanma,项目名称:netl-AP_MAP_FLOW,代码行数:12,代码来源:__core__.py
示例10: run
def run():
data = sp.copy(housing_data)
x = data[:, [0, 1]]
y = data[:, [2]]
m = sp.shape(y)[0]
# Normalize the x values
(x, mu, sigma) = graddesc.featureNormalize(x)
# Add intercept term to x
x = sp.concatenate((sp.ones((m, 1)), x), axis=1)
# Init Theta and run Gradient Descent
num_iters = 400
# Choose some alpha value
alphas = [0.01, 0.03, 0.1, 0.3, 1.0]
for alpha in alphas:
theta = sp.zeros((3, 1))
(theta, J_history) = graddesc.gradientDescent(x, y, theta, alpha, num_iters)
# Plot the value of J by number of iterations
plt.plot(range(1, J_history.size+1), J_history, '-b')
plt.title('Alpha = %f' % (alpha))
plt.xlabel('Number of iterations')
plt.ylabel('J')
plt.xlim([0, 50])
plt.show(block=True)
# Estimate the price of a 1650 sq-ft, 3 br house
price = 0
house = sp.array([[1.0, 1650.0, 3.0]])
# Normalize the features
house[0, 1:] = (house[0, 1:] - mu) / sigma
price = house.dot(theta)
print('The estimated price with alpha', alpha, 'is', price[0, 0])
# Reload the data
data = sp.copy(housing_data)
x = data[:, [0, 1]]
y = data[:, [2]]
# Add intercept term to x
x = sp.concatenate((sp.ones((m, 1)), x), axis=1)
# Calculate the normal equation
theta = graddesc.normalEqn(x, y)
print('Theta computed from the normal equations:')
print(theta)
开发者ID:DarinM223,项目名称:machine-learning-coursera-python,代码行数:50,代码来源:ex1_multi.py
示例11: run
def run():
theta = sp.zeros((3, 1))
data = sp.copy(admission_data)
X = data[:, [0, 1]]
y = data[:, [2]]
m = sp.shape(y)[0]
# Add intercept term to x
X = sp.concatenate((sp.ones((m, 1)), X), axis=1)
"""
Part 1: Plotting
"""
print('Plotting data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
logres.plotData(data)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
plt.legend('Admitted', 'Not admitted')
plt.show()
print('Program paused. Press enter to continue.')
raw_input()
"""
Part 2: Compute Cost and Gradient
"""
(m, n) = X.shape
initial_theta = sp.zeros((n, 1))
(cost, grad) = logres.costFunction(initial_theta, X, y)
print('Cost at initial theta (zeros): ', cost)
print('Gradient at initial theta (zeros): ', grad)
print('Program paused. Press enter to continue.')
raw_input()
"""
Part 3: Optimizing using fminunc
"""
(theta, cost) = logres.find_minimum_theta(theta, X, y)
print('Cost at theta found by fmin: ', cost)
print('Theta: ', theta)
logres.plotDecisionBoundary(data, X, theta)
plt.show()
"""
Part 4: Predict and Accuracies
"""
prob = logres.sigmoid(sp.asmatrix([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of ', prob[0, 0])
print('Program paused. Press enter to continue.')
开发者ID:DarinM223,项目名称:machine-learning-coursera-python,代码行数:60,代码来源:ex2.py
示例12: getX
def getX(self,standardized=True,maf=None):
"""
return SNPs, if neccessary standardize them
"""
X = SP.copy(self.X)
# test for missing values
isnan = SP.isnan(X)
for i in isnan.sum(0).nonzero()[0]:
# set to mean
X[isnan[:,i],i] = X[~isnan[:,i],i].mean()
if maf!=None:
LG.debug('filter SNPs')
LG.debug('... number of SNPs(before filtering): %d'%X.shape[1])
idx_snps = SP.logical_and(X[self.idx_samples].mean(0)>0.1,X[self.idx_samples].mean(0)<0.9)
LG.debug('... number of SNPs(after filtering) : %d'%idx_snps.sum())
else:
idx_snps = SP.ones(self.n_f,dtype=bool)
if standardized:
LG.debug('standardize SNPs')
X = X[self.idx_samples][:,idx_snps]
X-= X.mean(0)
X /= X.std(0,dtype=NP.float32)
X /= SP.sqrt(X.shape[1])
return X
return X[self.idx_samples][:,idx_snps]
开发者ID:PMBio,项目名称:pygp_kronsum,代码行数:29,代码来源:data.py
示例13: GP_train
def GP_train(x, y, cov_par, cov_func = None, cov_typ ='SE', \
cov_fixed = None, prior = None, \
MF = None, MF_par = None, MF_args = None, \
MF_fixed = None):
'''
Max likelihood optimization of GP hyper-parameters. Calls
GP_negloglik. Takes care of merging / splitting the fixed /
variable and cov / MF parameters
'''
if MF != None:
merged_par = scipy.append(cov_par, MF_par)
n_MF_par = len(MF_par)
fixed = scipy.append(scipy.zeros(len(cov_par), 'bool'), \
scipy.zeros(n_MF_par, 'bool'))
if (cov_fixed != None): fixed[0:-n_MF_par] = cov_fixed
if (MF_fixed != None): fixed[-n_MF_par:] = MF_fixed
if MF_args == None: MF_args = x[:]
else:
merged_par = cov_par[:]
n_MF_par = 0
fixed = scipy.zeros(len(cov_par), 'bool')
if cov_fixed != None: fixed[:] = cov_fixed
var_par_in = merged_par[fixed == False]
fixed_par = merged_par[fixed == True]
args = (x, y, cov_func, cov_typ, MF, n_MF_par, MF_args, fixed, \
fixed_par, prior)
var_par_out = \
sop.fmin(GP_negloglik, var_par_in, args, disp = 0)
par_out = scipy.copy(merged_par)
par_out[fixed == False] = var_par_out
par_out[fixed == True] = fixed_par
if MF != None:
return par_out[:-n_MF_par], par_out[-n_MF_par:]
else:
return par_out
开发者ID:EdGillen,项目名称:SuzPyUtils,代码行数:35,代码来源:GPSuz.py
示例14: sqrtm3
def sqrtm3(X):
M = sp.copy(X)
m, fb, fe = block_structure(M)
n = M.shape[0]
for i in range(0,m):
M[fb[i]:fe[i],fb[i]:fe[i]] = twobytworoot(M[fb[i]:fe[i],fb[i]:fe[i]])
#print M
for j in range(1,m):
for i in range(0,m-j):
#print M[fb[i]:fe[i],fb[JJ]:fe[JJ]]
JJ = i+j
Tnoto = M[fb[i]:fe[i],fb[JJ]:fe[JJ]] #dopo togliere il copy
#print "Tnot: "
#print Tnoto
for k in range(i+1,JJ):
Tnoto -= (M[fb[i]:fe[i],fb[k]:fe[k]]).dot(M[fb[k]:fe[k],fb[JJ]:fe[JJ]])
#print M[fb[i]:fe[i],fb[k]:fe[k]]
#print M[fb[k]:fe[k],fb[JJ]:fe[JJ]]
if((M[fb[i]:fe[i],fb[JJ]:fe[JJ]]).shape==(1,1)):
#print "forma 1"
#print M[fb[i]:fe[i],fb[JJ]:fe[JJ]] # Uij
#print M[fb[i]:fe[i],fb[i]:fe[i]] # Uii
#print M[fb[JJ]:fe[JJ],fb[JJ]:fe[JJ]] # Ujj
M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = Tnoto/(M[fb[i]:fe[i],fb[i]:fe[i]] + M[fb[JJ]:fe[JJ],fb[JJ]:fe[JJ]])
else:
Uii = M[fb[i]:fe[i],fb[i]:fe[i]]
Ujj = M[fb[JJ]:fe[JJ],fb[JJ]:fe[JJ]]
shapeUii = Uii.shape[0]
shapeUjj = Ujj.shape[0]
"""
print "------------"
print Tnoto
print Tnoto.shape
print sp.kron(sp.eye(shapeUjj),Uii)
print sp.kron(Ujj.T,sp.eye(shapeUii))
print Tnoto
"""
#M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = sp.linalg.solve_sylvester(Uii, Ujj, Tnoto)
"""
x, scale, info = dtrsyl(Uii, Ujj, Tnoto
if (scale==1.0):
= x
else:
M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = x*scale
print "scale!=0"
"""
Tnoto = Tnoto.reshape((shapeUii*shapeUjj),1,order="F")
M[fb[i]:fe[i],fb[JJ]:fe[JJ]] = \
linalg.solve(sp.kron(sp.eye(shapeUjj),Uii) +
sp.kron(Ujj.T,sp.eye(shapeUii)),
Tnoto).reshape(shapeUii,shapeUjj,order="F")
return M
开发者ID:sn1p3r46,项目名称:Tiro,代码行数:60,代码来源:sqrtm3.py
示例15: update_rule
def update_rule(Asp,states0,parameters,scale=0.0):
thresh,personal,a,b,c,scale0=parameters #ignore scale ( = 0 )
states=sp.copy(states0)
#states is a list of states for all N individuals
nei_sum=Asp*states
degrees=Asp*sp.ones(len(states))
##get average of all neighbours, i.e. s
nei_av=[]
for i in range(0,len(nei_sum)):
if degrees[i]>0: nei_av.append(nei_sum[i]/degrees[i])
else: nei_av.append(0.0)
totav=sum(states)/len(states) #this is m
for n in range(0,len(states)): #len means length, i.e. number of individuals
utility=a[n]*personal[n]+b[n]*nei_av[n]+c[n]*totav
if states[n] < 1.0: #if state == 0
if utility <= thresh[n]:
states[n]=0.0#scale*utility ##i.e. zero if scale=0
else:
states[n]=1.0
return states
开发者ID:sideshownick,项目名称:NetWorks,代码行数:25,代码来源:dynamics.py
示例16: errorApproximation
def errorApproximation(self, ratio, dim=20):
self.buildMatrix()
sumNonzeros = (self.vxm !=0).sum()
numTest = int(ratio*sumNonzeros)
elementList = []
nonZeroTuple = sp.nonzero(self.vxm)
for x in range(int(numTest)):
rInt = sp.random.randint(0,nonZeroTuple[0].size)
randrow = nonZeroTuple[0][rInt]
randcolumn = nonZeroTuple[1][rInt]
valElementIndex = [randrow,randcolumn]
elementList.append(valElementIndex)
self.modvxm = sp.copy(self.vxm)
for x in elementList:
self.modvxm[x[0],x[1]] = 0
self.modvmx = self.fillAverages(vxm = self.modvxm)
self.newmodvxm = self.predict(dim,vxm=self.modvxm)
sqDiff = 0
for x in elementList:
sqDiff += sp.square(self.newmodvxm[x[0],x[1]] - self.vxm[x[0],x[1]])
self.rmse = sp.sqrt(sqDiff/len(elementList))
开发者ID:DmitriyLeybel,项目名称:SVD_Movie_Ratings,代码行数:31,代码来源:Movies.py
示例17: interp2d
def interp2d(qx, qy, qz):
Vandermonde = sp.zeros((4,4))
Vandermonde[:,0] = 1
Vandermonde[:,1] = qx
Vandermonde[:,2] = qy
Vandermonde[:,3] = qx*qy
Vinv = sp.linalg.inv(Vandermonde)
print 'Vandermonde\n', Vandermonde
print
print 'Vandermonde inverse official \n', Vinv
Vinv = inverse(Vandermonde, 4)
print 'Vandermonde inverse Gauss \n', Vinv
V22 = sp.copy(Vinv.T)
print 'Identity check'
print sp.dot(Vinv,Vandermonde)
print 'Transpose official'
print V22
for i in range(3):
for j in range(i+1,4):
d = Vinv[i,j]
Vinv[i,j]= Vinv[j,i]
Vinv[j,i]= d
print 'Index ranspose\n', Vinv
print 'Check transpose\n', Vinv-V22
def SU2(x,y):
RHS = sp.array([1,x,y,x*y])
b = sp.dot(Vinv,RHS)
return sp.dot(b,qz.T)
SU2 = sp.vectorize(SU2)
return SU2
开发者ID:MatejKosec,项目名称:LUTStandAlone,代码行数:33,代码来源:SU2_interp.py
示例18: normalizeLength
def normalizeLength(self, noteOns, factor):
#shibu = 60. / self.wavetempo * (self.binarized_data[0].size / self.duration)
shibu = (self.fs/10.) / (self.wavetempo/60.)
fixToResolution = noteOns/shibu*480.
fixToResolution[:, 2] = noteOns[:, 2]
# MIDI_Res(分解能) = 480
MIDI_Res = 480.
minnotel = 1./4.*MIDI_Res
#rate(許容誤差)
rate = 0.5
#NoteNoが大きいものから順に並び替え
fixToResolution = self.rowsort(fixToResolution)
self.oldFixToResolution = sp.copy(fixToResolution)
#lilypond符号用リスト
book = [[] for i in range(fixToResolution.shape[0])]
for n in range(fixToResolution.shape[0]):
x_cor = fixToResolution[n, 0] + minnotel*rate - 1
#x_cor = fixToResolution[n, 0] + minnotel - 1
x_cor = (sp.floor(x_cor/minnotel))*minnotel
if(x_cor == 0):
x_cor = 1
fixToResolution[n, 0] = x_cor
fixToResolution[n, 3], book[n] = self.normalizeNoteLength(fixToResolution[n, 3] + factor)
book[n] = self.convertNoteNo(fixToResolution[n, 2]) + book[n]
fixToResolution[n, 1] = fixToResolution[n, 3] + fixToResolution[n, 0] - 1
self.book = book
return fixToResolution
开发者ID:mackee,项目名称:utakata,代码行数:32,代码来源:utakata_time_freq.py
示例19: _site_percolation
def _site_percolation(self, pmask):
r"""
This private method is called by 'find_clusters2'
"""
# Find throats that produce site percolation
conns = sp.copy(self['throat.conns'])
conns[:, 0] = pmask[conns[:, 0]]
conns[:, 1] = pmask[conns[:, 1]]
# Only if both pores are True is the throat set to True
tmask = sp.all(conns, axis=1)
# Perform the clustering using scipy.csgraph
csr = self.create_adjacency_matrix(data=tmask,
sprsfmt='csr',
dropzeros=True)
clusters = sprs.csgraph.connected_components(csgraph=csr,
directed=False)[1]
# Adjust cluster numbers such that non-invaded pores are labelled -1
# Note: The following line also takes care of assigning cluster numbers
# to single isolated invaded pores
p_clusters = (clusters + 1)*(pmask) - 1
# Label invaded throats with their neighboring pore's label
t_clusters = clusters[self['throat.conns']]
ind = (t_clusters[:, 0] == t_clusters[:, 1])
t_clusters = t_clusters[:, 0]
# Label non-invaded throats with -1
t_clusters[~ind] = -1
return (p_clusters, t_clusters)
开发者ID:TomTranter,项目名称:OpenPNM,代码行数:30,代码来源:__GenericNetwork__.py
示例20: execute
def execute(self):
self.power_mat, self.thermal_expectation = self.full_calculation()
n_chan = self.power_mat.shape[1]
n_freq = self.power_mat.shape[0]
# Calculate the the mean channel correlations at low frequencies.
low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real
# Factorize it into preinciple components.
e, v = linalg.eigh(low_f_mat)
self.low_f_mode_values = e
# Make sure the eigenvalues are sorted.
if sp.any(sp.diff(e) < 0):
raise RuntimeError("Eigenvalues not sorted.")
self.low_f_modes = v
# Now subtract out the noisiest channel modes and see what is left.
n_modes_subtract = 10
mode_subtracted_power_mat = sp.copy(self.power_mat.real)
mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq))
for ii in range(n_modes_subtract):
mode = v[:,-ii]
amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1)
amp = sp.sum(amp * mode, 1)
to_subtract = amp[:,None,None] * mode[:,None] * mode
mode_subtracted_power_mat -= to_subtract
auto_power = mode_subtracted_power_mat.view()
auto_power.shape = (n_freq, n_chan**2)
auto_power = auto_power[:,::n_chan + 1]
mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1)
self.subtracted_auto_power = mode_subtracted_auto_power
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:28,代码来源:noise_power.py
注:本文中的scipy.copy函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论