本文整理汇总了Python中scipy.concatenate函数的典型用法代码示例。如果您正苦于以下问题:Python concatenate函数的具体用法?Python concatenate怎么用?Python concatenate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了concatenate函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self,layers,gridOpts):
''' Initialize the grid using the given layers and grid options.
'''
segments = []
qStart = scipy.inf
qEnd = -scipy.inf
for layer in layers:
if layer.isQuantum:
d1 = dn = gridOpts.dzQuantum
segments += [self.get_dz_segment(d1,dn,layer.thickness)]
qStart = min(qStart,sum([len(seg) for seg in segments[:-1]]))
qEnd = max(qEnd, sum([len(seg) for seg in segments]))
elif gridOpts.useFixedGrid:
d1 = dn = gridOpts.dz
segments += [self.get_dz_segment(d1,dn,layer.thickness)]
elif layer.thickness*gridOpts.dzCenterFraction > gridOpts.dzEdge:
d1 = dn = gridOpts.dzEdge
dc = gridOpts.dzCenterFraction*layer.thickness
segments += [self.get_dz_segment(d1,dc,layer.thickness/2),
self.get_dz_segment(dc,dn,layer.thickness/2)]
else:
d1 = dn = gridOpts.dzEdge
segments += [self.get_dz_segment(d1,dn,layer.thickness)]
self.dz = scipy.concatenate(segments)
self.z = scipy.concatenate(([0],scipy.cumsum(self.dz)))
self.zr = (self.z[:-1]+self.z[1:])/2
self.znum = len(self.z)
self.rnum = len(self.zr)
self.gridOpts = gridOpts
self.qIndex = scipy.arange(qStart,qEnd+1) # Wavefunction index
self.qrIndex = scipy.arange(qStart,qEnd) # Quantum region index
开发者ID:puluoning,项目名称:ledsim,代码行数:31,代码来源:ledsim.py
示例2: _initParams_fast
def _initParams_fast(self):
"""
initialize the gp parameters
1) project Y on the known factor X0 -> Y0
average variance of Y0 is used to initialize the variance explained by X0
2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
3) perform PCA on cov(Y1) and considers the first k PC for initializing X
4) the variance of all other PCs is used to initialize the noise
5) the variance explained by interaction is set to a small random number
"""
Xd = LA.pinv(self.X0)
Y0 = self.X0.dot(Xd.dot(self.Y))
Y1 = self.Y-Y0
YY = SP.cov(Y1)
S,U = LA.eigh(YY)
X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
a = SP.array([SP.sqrt(Y0.var(0).mean())])
b = 1e-3*SP.randn(1)
c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
# gp hyper params
params = limix.CGPHyperParams()
if self.interaction:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
else:
params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
params['lik'] = c
return params
开发者ID:Shicheng-Guo,项目名称:scLVM,代码行数:27,代码来源:gp_clvm.py
示例3: dwt_2d
def dwt_2d(image, poly, l=1):
"""
Computes the discrete wavelet transform for a 2D input image
:param image: input image to be processed
:param poly: polyphase filter matrix cointing the lowpass and highpass coefficients
:param l: amount of transforms to be applied
:return: the transformed image
"""
assert max(mod(image.shape, 2**l)) == 0, 'image dimension ({}) does not allow for a {}-level decomposition'.format(image.shape, l)
image_ = image.copy()
for level in range(l):
sub_image = image_[:(image.shape[0]/(2**level)), :(image.shape[1]/(2**level))]
for row in range(sub_image.shape[0]):
s = sub_image[row, :]
a, d = dwt(s, poly)
sub_image[row, :] = concatenate((a[newaxis, :], d[0][newaxis, :]), axis=1)
for col in range(sub_image.shape[1]):
s = sub_image[:, col]
a, d = dwt(s, poly)
sub_image[:, col] = concatenate((a, d[0]), axis=0)
return image_
开发者ID:liangz0707,项目名称:Lasagne-learn,代码行数:27,代码来源:dwtmanege.py
示例4: idwt
def idwt(a, d, poly, l=1):
"""
Computes the inverse discrete wavelet transform for a 1D signal
:param a: the approximation coefficients at the deepest level
:param d: a list of detail coefficients for each level
:param poly: polyphase filter matrix cointing the lowpass and highpass coefficients
:param l: amount of transforms to be applied
:return: the transformed signal
"""
assert len(d) == l, 'insufficient detail coefficients provided for reconstruction depth {}'.format(l)
if len(a.shape) == 1:
a = a[newaxis, :]
for level in reversed(range(l)):
decomposition = concatenate((a, d[level][newaxis, :]), axis=0)
reconstruction = zeros_like(decomposition, dtype=float)
for z in range(poly.shape[1]/2):
reconstruction += dot(poly[:, 2*z:2*z+2].transpose(), concatenate(
(decomposition[:, decomposition.shape[1]-z:], decomposition[:, :decomposition.shape[1]-z]), axis=1))
a = reconstruction.transpose().reshape(1, 2*a.shape[1])
return a
开发者ID:liangz0707,项目名称:Lasagne-learn,代码行数:25,代码来源:dwtmanege.py
示例5: mlr
def mlr(x,y,order):
"""Multiple linear regression fit of the columns of matrix x
(dependent variables) to constituent vector y (independent variables)
order - order of a smoothing polynomial, which can be included
in the set of independent variables. If order is
not specified, no background will be included.
b - fit coeffs
f - fit result (m x 1 column vector)
r - residual (m x 1 column vector)
"""
if order > 0:
s=scipy.ones((len(y),1))
for j in range(order):
s=scipy.concatenate((s,(scipy.arange(0,1+(1.0/(len(y)-1)),1.0/(len(y)-1))**j)[:,nA]),1)
X=scipy.concatenate((x, s),1)
else:
X = x
#calc fit b=fit coefficients
b = scipy.dot(scipy.dot(scipy.linalg.pinv(scipy.dot(scipy.transpose(X),X)),scipy.transpose(X)),y)
f = scipy.dot(X,b)
r = y - f
return b,f,r
开发者ID:jikhanjung,项目名称:modan,代码行数:26,代码来源:chemometrics.py
示例6: invert_epochs
def invert_epochs(epochs, end=None):
"""inverts epochs inverted
The first epoch will be mapped to [0, start] and the last will be mapped
to [end of last epoch, :end:]. Epochs that accidentally become negative
or zero-length will be omitted.
:type epochs: ndarray
:param epochs: epoch set to invert
:type end: int
:param end: If not None, it i taken for the end of the last epoch,
else max(index-dtype) is taken instead.
Default=None
:returns: ndarray - inverted epoch set
"""
# checks
if end is None:
end = sp.iinfo(INDEX_DTYPE).max
else:
end = INDEX_DTYPE.type(end)
# flip them
rval = sp.vstack((sp.concatenate(([0], epochs[:, 1])), sp.concatenate((epochs[:, 0], [end])))).T
return (rval[rval[:, 1] - rval[:, 0] > 0]).astype(INDEX_DTYPE)
开发者ID:rproepp,项目名称:BOTMpy,代码行数:25,代码来源:funcs_spike.py
示例7: ar_model_check_stable
def ar_model_check_stable(A):
"""check if this AR model is stable
:Parameters:
A : ndarray
The coefficient matrix of the model
"""
# inits and checks
m, p = A.shape
p /= m
if p != round(p):
raise ValueError('bad inputs!')
# check for stable model
A1 = N.concatenate((
A,
N.concatenate((
N.eye((p - 1) * m),
N.zeros(((p - 1) * m, m))
), axis=1)
))
lambdas = NL.eigvals(A1)
rval = True
if (N.absolute(lambdas) > 1).any():
rval = False
del A1, lambdas
return rval
开发者ID:mtambos,项目名称:Neural-Simulation,代码行数:28,代码来源:ar_model.py
示例8: _update_6
def _update_6(self):
# construct system
Ax = scipy.zeros((len(self.data), 6))
Ax[:, 0] = 1.0
Ax[:, 2] = self.data[:, 0] - self.center[0]
Ax[:, 3] = self.data[:, 1] - self.center[1]
Ay = scipy.zeros((len(self.data), 6))
Ay[:, 1] = 1.0
Ay[:, 4] = self.data[:, 0] - self.center[0]
Ay[:, 5] = self.data[:, 1] + self.center[1]
A = scipy.concatenate((Ax, Ay), axis = 0)
del Ax, Ay
b = scipy.concatenate((self.data[:, 2], self.data[:, 3]))
# solve for parameters
parameters, residual, rank, sigma = scipy.linalg.lstsq(A, b)
self.tx = parameters[0]
self.ty = parameters[1]
self.exx = parameters[2]
self.exy = parameters[3]
self.eyx = parameters[4]
self.eyy = parameters[5]
del parameters
# compute residuals
self.residuals[:, 2] = self.data[:, 2] - self.tx - self.exx * (self.data[:, 0] - self.center[0]) - self.exy * (self.data[:, 1] - self.center[1])
self.residuals[:, 3] = self.data[:, 3] - self.ty - self.eyx * (self.data[:, 0] - self.center[0]) - self.eyy * (self.data[:, 1] - self.center[1])
开发者ID:demanasta,项目名称:GeoToolbox,代码行数:25,代码来源:strain.py
示例9: shift_row
def shift_row(row, shift):
if shift == 0:
return row
if shift > 0:
return sp.concatenate(([0] * shift, row[:-shift]))
else:
return sp.concatenate((row[-shift:], [0] * -shift))
开发者ID:christiando,项目名称:BOTMpy,代码行数:7,代码来源:alignment.py
示例10: ideal_data
def ideal_data(num, dimU, dimY, dimX, noise=1):
"""Linear system data"""
# generate randomized linear system matrices
A = randn(dimX, dimX)
B = randn(dimX, dimU)
C = randn(dimY, dimX)
D = randn(dimY, dimU)
# make sure state evolution is stable
U, S, V = svd(A)
A = dot(U, dot(diag(S / max(S)), V))
U, S, V = svd(B)
S2 = zeros((size(U,1), size(V,0)))
S2[:,:size(U,1)] = diag(S / max(S))
B = dot(U, dot(S2, V))
# random input
U = randn(num, dimU)
# initial state
X = reshape(randn(dimX), (1,-1))
# initial output
Y = reshape(dot(C, X[-1]) + dot(D, U[0]), (1,-1))
# generate next state
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, U[0]), (1,-1))))
# and so forth
for u in U[1:]:
Y = concatenate((Y, reshape(dot(C, X[-1]) + dot(D, u), (1,-1))))
X = concatenate((X, reshape(dot(A, X[-1]) + dot(B, u), (1,-1))))
return U, Y + randn(num, dimY) * noise
开发者ID:riscy,项目名称:mllm,代码行数:34,代码来源:system_identifier.py
示例11: roc
def roc(labels, predictions):
"""roc - calculate receiver operator curve
labels: true labels (>0 : True, else False)
predictions: the ranking generated from whatever predictor is used"""
#1. convert to arrays
labels = S.array(labels).reshape([-1])
predictions = S.array(predictions).reshape([-1])
#threshold
t = labels>0
#sort predictions in desceninding order
#get order implied by predictor (descending)
Ix = S.argsort(predictions)[::-1]
#reorder truth
t = t[Ix]
#compute true positiive and false positive rates
tp = S.double(N.cumsum(t))/t.sum()
fp = S.double(N.cumsum(~t))/(~t).sum()
#add end points
tp = S.concatenate(([0],tp,[1]))
fp = S.concatenate(([0],fp,[1]))
return [tp,fp]
开发者ID:cyversewarwick,项目名称:gp2s,代码行数:26,代码来源:ROC.py
示例12: run_interact
def run_interact(Y, intA, intB, covs, K):
""" Calculate pvalues for the nested model of including a multiplicative term between intA and intB into the additive model """
[N, Ny] = Y.shape
Na = intA.shape[1] # number of interaction terms 1
Nb = intB.shape[1] # number of interaction terms 2
S,U=LA.eigh(K);
UY=SP.dot(U.T,Y);
UintA=SP.dot(U.T,intA);
UintB=SP.dot(U.T,intB);
Ucovs=SP.dot(U.T,covs);
# for each snp/gene/factor combination, run a lod
# snps need to be diced bc of missing values - iterate over them, else in arrays
lods = SP.zeros([Na, Nb, Ny])
#add mean column:
if covs is None: covs = SP.ones([Ny,1])
# for each pair of interacting terms
for a in range(Na):
for b in range(Nb):
# calculate additive and interaction terms
C = SP.concatenate((Ucovs, UintA[:,a:a+1], UintB[:,b:b+1]))
X = intA[:,a:a+1]*intB[:,b:b+1]
UX = SP.dot(U.T,X);
UX = SP.concatenate((UX, C))
for phen in SP.arange(Ny):
UY_=UY[:,phen];
nllnull,ldeltanull=optdelta(UY_,C,S,ldeltanull=None,numintervals=10,ldeltamin=-5.0,ldeltamax=5.0);
nllalt,ldeltaalt=optdelta(UY_,UX,S,ldeltanull=ldeltanull,numintervals=100,ldeltamin=-5.0,ldeltamax=5.0);
lods[a,b,phen] = nllalt-nllalt;
return lods
开发者ID:PMBio,项目名称:limix,代码行数:33,代码来源:lmm_fast.py
示例13: generateNodesAdaptive
def generateNodesAdaptive(self):
innerDomainSize = self.innerDomainSize
innerMeshSize = self.innerMeshSize
numberElementsInnerDomain = innerDomainSize/innerMeshSize
assert(numberElementsInnerDomain < self.numberElements)
domainCenter = (self.domainStart+self.domainEnd)/2
nodes0 = np.linspace(domainCenter,innerDomainSize/2.0,(numberElementsInnerDomain/2.0)+1.0)
nodes0 = np.delete(nodes0,-1)
numberOuterIntervalsFromDomainCenter = (self.numberElements - numberElementsInnerDomain)/2.0
const = np.log2(innerDomainSize/2.0)/0.5
exp = np.linspace(const,np.log2(self.domainEnd*self.domainEnd),numberOuterIntervalsFromDomainCenter+1)
nodes1 = np.power(np.sqrt(2),exp)
nodesp = np.concatenate((nodes0,nodes1))
nodesn = -nodesp[::-1]
nodesn = np.delete(nodesn,-1)
linNodalCoordinates = np.concatenate((nodesn,nodesp))
nodalCoordinates = 0
#Introduce higher order nodes
if self.elementType == "quadratic" or self.elementType == "cubic":
if self.elementType == "quadratic":
numberNodesPerElement = 3
elif self.elementType == "cubic":
numberNodesPerElement = 4
for i in range(0,len(linNodalCoordinates)-1):
newnodes = np.linspace(linNodalCoordinates[i],linNodalCoordinates[i+1],numberNodesPerElement)
nodalCoordinates = np.delete(nodalCoordinates,-1)
nodalCoordinates = np.concatenate((nodalCoordinates,newnodes))
else:
nodalCoordinates = linNodalCoordinates
return nodalCoordinates
开发者ID:mrinaliyer,项目名称:tuckerDFT,代码行数:34,代码来源:FEM.py
示例14: main
def main():
points = generate_gaussian(1000, 2, 0, 2, center=(10, 0))
pylab.plot (points[:,0], points[:,1], 'r+')
#export("Classe A", points)
points2 = generate_gaussian(1000, 2, 0, 2, center=(5, 5))
pylab.plot (points2[:,0], points2[:,1], 'b+')
#export("Classe C", points)
points3 = generate_gaussian(1000, 2, 0, 2, center=(0, 10))
pylab.plot (points3[:,0], points3[:,1], 'y+')
points4 = generate_gaussian(1000, 2, 0, 2, center=(0, 0))
pylab.plot (points4[:,0], points4[:,1], 'g+')
pylab.axis([-10, 20, -10, 20])
pylab.show()
labels = []
for i in xrange(len(points)):
labels.append(0)
for i in xrange(len(points2)):
labels.append(1)
for i in xrange(len(points3)):
labels.append(2)
for i in xrange(len(points4)):
labels.append(3)
points = scipy.concatenate ((points, points2))
points = scipy.concatenate ((points, points3))
points = scipy.concatenate ((points, points4))
data = dataset.Dataset (points, labels)
data.random ()
dataset.save (data, "../datasets/4gaussians1k.data")
开发者ID:PepGardiola,项目名称:kohonen,代码行数:32,代码来源:dataset_generator.py
示例15: __call__
def __call__(self, Xi, Xj, ni, nj, hyper_deriv=None, symmetric=False):
"""Evaluate the covariance between points `Xi` and `Xj` with derivative order `ni`, `nj`.
Parameters
----------
Xi : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` inputs with dimension `N`.
Xj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` inputs with dimension `N`.
ni : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` derivative orders for set `i`.
nj : :py:class:`Matrix` or other Array-like, (`M`, `N`)
`M` derivative orders for set `j`.
hyper_deriv : Non-negative int or None, optional
The index of the hyperparameter to compute the first derivative
with respect to. If None, no derivatives are taken. Hyperparameter
derivatives are not supported at this point. Default is None.
symmetric : bool, optional
Whether or not the input `Xi`, `Xj` are from a symmetric matrix.
Default is False.
Returns
-------
Kij : :py:class:`Array`, (`M`,)
Covariances for each of the `M` `Xi`, `Xj` pairs.
Raises
------
NotImplementedError
If the `hyper_deriv` keyword is not None.
"""
if hyper_deriv is not None:
raise NotImplementedError("Hyperparameter derivatives have not been implemented!")
n_cat = scipy.asarray(scipy.concatenate((ni, nj), axis=1), dtype=int)
X_cat = scipy.asarray(scipy.concatenate((Xi, Xj), axis=1), dtype=float)
n_cat_unique = unique_rows(n_cat)
k = scipy.zeros(Xi.shape[0], dtype=float)
# Loop over unique derivative patterns:
if self.num_proc > 1:
pool = multiprocessing.Pool(processes=self.num_proc)
for n_cat_state in n_cat_unique:
idxs = scipy.where(scipy.asarray((n_cat == n_cat_state).all(axis=1)).squeeze())[0]
if (n_cat_state == 0).all():
k[idxs] = self.cov_func(Xi[idxs, :], Xj[idxs, :], *self.params)
else:
if self.num_proc > 1 and len(idxs) > 1:
k[idxs] = scipy.asarray(
pool.map(_ArbitraryKernelEval(self, n_cat_state), X_cat[idxs, :]),
dtype=float
)
else:
for idx in idxs:
k[idx] = mpmath.chop(mpmath.diff(self._mask_cov_func,
X_cat[idx, :],
n=n_cat_state,
singular=True))
if self.num_proc > 0:
pool.close()
return k
开发者ID:pennajm,项目名称:gptools,代码行数:60,代码来源:core.py
示例16: _getScalesDiag
def _getScalesDiag(self,termx=0):
"""
Internal function for parameter initialization
Uses 2 term single trait model to get covar params for initialization
Args:
termx: non-noise term terms that is used for initialization
"""
assert self.P>1, 'VarianceDecomposition:: diagonal init_method allowed only for multi trait models'
assert self.noisPos!=None, 'VarianceDecomposition:: noise term has to be set'
assert termx<self.n_randEffs-1, 'VarianceDecomposition:: termx>=n_randEffs-1'
assert self.trait_covar_type[self.noisPos] not in ['lowrank','block','fixed'], 'VarianceDecomposition:: diagonal initializaiton not posible for such a parametrization'
assert self.trait_covar_type[termx] not in ['lowrank','block','fixed'], 'VarianceDecimposition:: diagonal initializaiton not posible for such a parametrization'
scales = []
res = self._getH2singleTrait(self.vd.getTerm(termx).getK())
scaleg = sp.sqrt(res['varg'].mean())
scalen = sp.sqrt(res['varn'].mean())
for term_i in range(self.n_randEffs):
if term_i==termx:
_scales = scaleg*self.diag[term_i]
elif term_i==self.noisPos:
_scales = scalen*self.diag[term_i]
else:
_scales = 0.*self.diag[term_i]
if self.jitter[term_i]>0:
_scales = sp.concatenate((_scales,sp.array([sp.sqrt(self.jitter[term_i])])))
scales.append(_scales)
return sp.concatenate(scales)
开发者ID:letaylor,项目名称:limix,代码行数:28,代码来源:varianceDecomposition.py
示例17: KramersKronigFFT
def KramersKronigFFT(ImX_A):
''' Hilbert transform used to calculate real part of a function from its imaginary part
uses piecewise cubic interpolated integral kernel of the Hilbert transform
use only if len(ImX_A)=2**m-1, uses fft from scipy.fftpack '''
X_A = sp.copy(ImX_A)
N = int(len(X_A))
## be careful with the data type, orherwise it fails for large N
if N > 3e6: A = sp.arange(3,N+1,dtype='float64')
else: A = sp.arange(3,N+1)
X1 = 4.0*sp.log(1.5)
X2 = 10.0*sp.log(4.0/3.0)-6.0*sp.log(1.5)
## filling the kernel
if N > 3e6: Kernel_A = sp.zeros(N-2,dtype='float64')
else: Kernel_A = sp.zeros(N-2)
Kernel_A = (1-A**2)*((A-2)*sp.arctanh(1.0/(1-2*A))+(A+2)*sp.arctanh(1.0/(1+2*A)))\
+((A**3-6*A**2+11*A-6)*sp.arctanh(1.0/(3-2*A))+(A+3)*(A**2+3*A+2)*sp.arctanh(1.0/(2*A+3)))/3.0
Kernel_A = sp.concatenate([-sp.flipud(Kernel_A),sp.array([-X2,-X1,0.0,X1,X2]),Kernel_A])/sp.pi
## zero-padding the functions for fft
ImXExt_A = sp.concatenate([X_A[int((N-1)/2):],sp.zeros(N+2),X_A[:int((N-1)/2)]])
KernelExt_A = sp.concatenate([Kernel_A[N:],sp.zeros(1),Kernel_A[:N]])
## performing the fft
ftReXExt_A = -fft(ImXExt_A)*fft(KernelExt_A)
ReXExt_A = sp.real(ifft(ftReXExt_A))
ReX_A = sp.concatenate([ReXExt_A[int((3*N+3)/2+1):],ReXExt_A[:int((N-1)/2+1)]])
return ReX_A
开发者ID:pokornyv,项目名称:SPEpy,代码行数:25,代码来源:parlib.py
示例18: philm
def philm(l,m,x,y,w0):
normalize_factor = scipy.sqrt(2/2**(l+m)/scipy.misc.factorial(l)/scipy.misc.factorial(m)/pi)/w0
hermite_x = numpy.polynomial.hermite.hermval(scipy.sqrt(2)*x/w0,scipy.concatenate((scipy.zeros(l),scipy.ones(1))))
hermite_y = numpy.polynomial.hermite.hermval(scipy.sqrt(2)*y/w0,scipy.concatenate((scipy.zeros(m),scipy.ones(1))))
phi_lm = normalize_factor*hermite_x*hermite_y*scipy.exp(-(x**2+y**2)/w0**2)
return phi_lm
开发者ID:mmcqed,项目名称:CavityAberration,代码行数:7,代码来源:CavityDefinitions.py
示例19: run
def run(self, T, dT=None, nT=100, times=None):
"""Run the Repressilator for the specified amount of time T, returning
output either for fixed time step dT, or over a specified number
of timesteps nT, or for a specified array of times. Store the
trajectory returned by odeint in the instance variable self.traj,
concatenating the result to the existing self.traj if a previous
trajectory had been created."""
if times is None:
if dT is None:
#times = scipy.linspace(self.t, self.t+T, nT)
times = scipy.arange(0., 50., 0.2)
else:
times = scipy.arange(self.t, self.t+T, dT)
traj = scipy.integrate.odeint(self.dydt, self.y, times, \
args=(self.alpha, self.n, self.alpha0,
self.beta), mxstep=1000)
if self.traj is None:
self.traj = traj
self.y = self.traj[-1]
self.times = times
self.t = self.times[-1]
else:
self.traj = scipy.concatenate((self.traj, traj))
self.y = self.traj[-1]
self.times = scipy.concatenate((self.times, times))
self.t = self.times[-1]
return traj[:, :3]#assume only mRna conc known
开发者ID:azardilis,项目名称:sysbio,代码行数:27,代码来源:SimpleRepressilator.py
示例20: __init__
def __init__(self, U, Y, statedim, reg=None):
if size(shape(U)) == 1:
U = reshape(U, (-1,1))
if size(shape(Y)) == 1:
Y = reshape(Y, (-1,1))
if reg is None:
reg = 0
yDim = size(Y,1)
uDim = size(U,1)
self.output_size = size(Y,1) # placeholder
# number of samples of past/future we'll mash together into a 'state'
width = 1
# total number of past/future pairings we get as a result
K = size(U,0) - 2 * width + 1
# build hankel matrices containing pasts and futures
U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T
# solve the eigenvalue problem
YfUfT = dot(Y_f, U_f.T)
YfUpT = dot(Y_f, U_p.T)
YfYpT = dot(Y_f, Y_p.T)
UfUpT = dot(U_f, U_p.T)
UfYpT = dot(U_f, Y_p.T)
UpYpT = dot(U_p, Y_p.T)
F = bmat([[None, YfUfT, YfUpT, YfYpT],
[YfUfT.T, None, UfUpT, UfYpT],
[YfUpT.T, UfUpT.T, None, UpYpT],
[YfYpT.T, UfYpT.T, UpYpT.T, None]])
Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
[None, pinv(dot(U_f,U_f.T)), None, None],
[None, None, pinv(dot(U_p,U_p.T)), None],
[None, None, None, pinv(dot(Y_p,Y_p.T))]])
F = F - eye(size(F, 0)) * reg
# Take smallest eigenvalues
_, W = eigs(Ginv.dot(F), k=statedim, which='SR')
# State sequence is a weighted combination of the past
W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
W_Y_p = W[ width * (yDim + uDim + uDim):, :]
X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)
# Regress; trim inputs to match the states we retrieved
R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
RRi = pinv(dot(R, R.T))
RL = dot(R, L.T)
Sys = dot(RRi, RL).T
self.A = Sys[:statedim, :statedim]
self.B = Sys[:statedim, statedim:]
self.C = Sys[statedim:, :statedim]
self.D = Sys[statedim:, statedim:]
开发者ID:riscy,项目名称:mllm,代码行数:59,代码来源:system_identifier.py
注:本文中的scipy.concatenate函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论