本文整理汇总了Python中numpy.bmat函数的典型用法代码示例。如果您正苦于以下问题:Python bmat函数的具体用法?Python bmat怎么用?Python bmat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bmat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: getValuesFromPose
def getValuesFromPose(self, P):
'''return the virtual values of the pots corresponding to the pose P'''
vals = []
grads = []
for i, r, l, placement, attach_p in zip(range(3), self.rs, self.ls, self.placements, self.attach_ps):
#first pot axis
a = placement.rot * col([1, 0, 0])
#second pot axis
b = placement.rot * col([0, 1, 0])
#string axis
c = placement.rot * col([0, 0, 1])
#attach point on the joystick
p_joystick = P * attach_p
v = p_joystick - placement.trans
va = v - dot(v, a)*a
vb = v - dot(v, b)*b
#angles of the pots
alpha = math.atan2(dot(vb, a), dot(vb, c))
beta = math.atan2(dot(va, b), dot(va, c))
vals.append(alpha)
vals.append(beta)
#calculation of the derivatives
dv = np.bmat([-P.rot.mat() * quat.skew(attach_p), P.rot.mat()])
dva = (np.eye(3) - a*a.T) * dv
dvb = (np.eye(3) - b*b.T) * dv
dalpha = (1/dot(vb,vb)) * (dot(vb,c) * a.T - dot(vb,a) * c.T) * dvb
dbeta = (1/dot(va,va)) * (dot(va,c) * b.T - dot(va,b) * c.T) * dva
grads.append(dalpha)
grads.append(dbeta)
return (col(vals), np.bmat([[grads]]))
开发者ID:niberger,项目名称:joysix,代码行数:32,代码来源:joystick.py
示例2: svdUpdate
def svdUpdate(U, S, V, a, b):
"""
Update SVD of an (m x n) matrix `X = U * S * V^T` so that
`[X + a * b^T] = U' * S' * V'^T`
and return `U'`, `S'`, `V'`.
`a` and `b` are (m, 1) and (n, 1) rank-1 matrices, so that svdUpdate can simulate
incremental addition of one new document and/or term to an already existing
decomposition.
"""
rank = U.shape[1]
m = U.T * a
p = a - U * m
Ra = numpy.sqrt(p.T * p)
assert float(Ra) > 1e-10
P = (1.0 / float(Ra)) * p
n = V.T * b
q = b - V * n
Rb = numpy.sqrt(q.T * q)
assert float(Rb) > 1e-10
Q = (1.0 / float(Rb)) * q
K = numpy.matrix(numpy.diag(list(numpy.diag(S)) + [0.0])) + numpy.bmat("m ; Ra") * numpy.bmat(" n; Rb").T
u, s, vt = numpy.linalg.svd(K, full_matrices=False)
tUp = numpy.matrix(u[:, :rank])
tVp = numpy.matrix(vt.T[:, :rank])
tSp = numpy.matrix(numpy.diag(s[:rank]))
Up = numpy.bmat("U P") * tUp
Vp = numpy.bmat("V Q") * tVp
Sp = tSp
return Up, Sp, Vp
开发者ID:beibeiyang,项目名称:Latent-Dirichlet-Allocation,代码行数:31,代码来源:lsimodel.py
示例3: generateInitData
def generateInitData(self,outputfile):
#initdata = {}
num_sectors = len(self._num_factor_sectors)
num_factors = self._num_factor_region + np.array(self._num_factor_sectors).sum()
num_rv = self._boundary[-1]
initBeta_all = (np.random.rand(num_rv,self._num_factor_region)-0.5) * 2
initBeta_sectors = [(np.random.rand(self._boundary[i+1]-self._boundary[i], self._num_factor_sectors[i]) - 0.5)*2 for i in np.arange(num_sectors)]
if len(self._num_factor_sectors)>0:
top = np.zeros((self._boundary[0], self._num_factor_sectors[0]))
bottom = np.zeros((self._boundary[-1]-self._boundary[1], self._num_factor_sectors[0]))
temp = np.bmat([[top], [initBeta_sectors[0]], [bottom]])
initBeta_all = np.bmat([initBeta_all, temp])
for i in np.arange(1, num_sectors):
top = np.zeros((self._boundary[i], self._num_factor_sectors[i]))
bottom = np.zeros((self._boundary[-1]-self._boundary[1+i], self._num_factor_sectors[i]))
temp = np.bmat([[top], [initBeta_sectors[i]], [bottom]])
initBeta_all = np.bmat([initBeta_all, temp])
norm = np.sqrt( np.diag(initBeta_all.dot(initBeta_all.T)))
norm = norm.reshape(num_rv,1)
initBeta_all = initBeta_all/norm
initBeta_all = np.array( initBeta_all) * np.sqrt(np.random.rand(num_rv,1))
#outputfile = 'C:\\rk\\SFM\\input\\output_N50_r2_s23222_initdata.xisx'
writer = pd.ExcelWriter(outputfile)
self._initdata[self._beta_region] = pd.DataFrame(data = initBeta_all[:,0:self._num_factor_region])
self._initdata[self._beta_region].to_excel(writer, sheet_name=self._beta_region)
for i in np.arange(len(self._num_factor_sectors)):
self._initdata[self._beta_s + str(i)] = pd.DataFrame(data = initBeta_all[self._boundary[i]:self._boundary[i+1], \
np.int(np.array(self._num_factor_sectors[0:i]).sum()) \
+ self._num_factor_region : np.int( np.array(self._num_factor_sectors[0: i+1]).sum()) + self._num_factor_region])
self._initdata[self._beta_s + str(i)].to_excel(writer, sheet_name =self._beta_s + str(i))
writer.save()
开发者ID:xiaorzou,项目名称:mylib-sfm,代码行数:31,代码来源:sfm.py
示例4: _dfunc
def _dfunc(self):
N = self.N
self._createMatrices()
Abar = self.A + self.A.T
Bbar = self.B + self.B.T
Dbar = self.D + self.D.T
Fbar = self.F + self.F.T
Ainv = linalg.inv(Abar)
A = array(bmat([[zeros((N,N)), eye(N)],
[-dot(Ainv, Bbar), -dot(Ainv, Fbar)]]))
B = array(bmat([[zeros((N,N)), zeros((N,N))],
[-dot(Ainv, diag(self.E)), zeros((N,N))]]))
C = array(bmat([[zeros((N,N)), zeros((N,N))],
[-dot(Ainv, Dbar), zeros((N,N))]]))
D = hstack((zeros(N), -dot(Ainv, self.C)))
def diffMat(y):
yy = vstack((y,)*N)
return yy.T - yy
return lambda y, t: dot(A, y) + dot(B, sin(y)) + D
开发者ID:YulinWu,项目名称:servers,代码行数:27,代码来源:circuitsim.py
示例5: _solve_KKT
def _solve_KKT(H, A, g, ress, C_f, C_nfx):
""" Putting code to solve KKT system in one place. """
# TODO fix it so the solve doesn't sometimes get errors?
o = A.shape[0]
K = mbmat([[ H, A.T],
[ A, zeros[:o, :o]]])
f = bmat([[ g],
[col(ress)]])
xx = col(solve(K, f))
p = - xx[:n]
p = C_f.T * C_f * p
mu = xx[n : n + m]
if abs(K * xx - f).max() > 1e5 * eps:
print('solve failed')
rows_to_keep = get_independent_rows(A, 1e3*eps)
ress = ress[rows_to_keep, 0]
A = extract(A, rows_to_keep)
o = A.shape[0]
K = mbmat([[ H, A.T],
[ A, zeros[:o, :o]]])
f = bmat([[ g],
[ col(ress)]])
xx = col(solve(K, f))
p = - xx[:n]
p = C_f.T * C_f * p
mu = xx[n : n + m]
if abs(K * xx - f).max() > 1e5 * eps:
print('solve failed')
#raise Exception('Solve Still Failed!!!')
return p, mu
开发者ID:gilbertgede,项目名称:PyIntropt,代码行数:33,代码来源:qp_solver.py
示例6: crankNicolson
def crankNicolson(condInitialesPhi, condInitialesPsi, condSpatiales = None, tMax = 0.001, dt=10**-6, v = 1, dx = 1):
if np.size(condInitialesPhi) != np.size(condInitialesPsi) :
raise Exception("La taille de condInitialesPhi doit être semblable à condInitialesPsi")
# Constantes utiles
n = np.size(condInitialesPhi)
k = -dt * v**2 / dx**2 / 2
N = int(tMax / dt)
# Matrice de l’évolution du système
evolution = np.zeros((N+1,2*n))
evolution[0,:n] = condInitialesPhi
evolution[0,n:] = condInitialesPsi
# On créer la matrice d'évolution
I = np.eye(n)
A = np.tri(n, k = 1).T * np.tri(n, k=-1)
A = (A + A.T - 2 * I) * k
M = np.array(np.bmat(((I, -dt*I/2),(A, I))))
K = np.array(np.bmat(((I, dt*I/2),(-A, I))))
invM = np.linalg.inv(M)
matriceEvolution = np.dot(invM,K)
# On applique les conditions spatiales obtenant la liste des points qui varie dans le temps.
if condSpatiales is not None :
matriceEvolution[condSpatiales] = np.zeros(2*n)
matriceEvolution[condSpatiales, condSpatiales] = 1
matriceEvolution[condSpatiales+n] = np.zeros(2*n)
for i in range(1,N+1):
evolution[i] = np.dot(matriceEvolution,evolution[i-1])
return evolution[:,:n], evolution[:,n:]
开发者ID:maxtremblay35,项目名称:CrankNicolson,代码行数:34,代码来源:resolution.py
示例7: hmc_step_stiefel
def hmc_step_stiefel(X0, log_pi, args=(), epsilon=.3, T=500):
"""
Hamiltonian Monte Carlo for Stiefel manifolds.
"""
n, d = X0.shape
U = np.random.randn(*X0.shape)
tmp = np.dot(X0.T, U)
U = orth_stiefel_project(X0, U)
log_pi0, G0 = log_pi(X0, *args)
H0 = log_pi0 + .5 * np.einsum('ij,ij', U, U)
X1 = X0.copy()
G1 = G0
for tau in xrange(T):
U += 0.5 * epsilon * G1
U = orth_stiefel_project(X0, U)
A = np.dot(X1.T, U)
S = np.dot(U.T, U)
exptA = scipy.linalg.expm(-epsilon * A)
tmp0 = np.bmat([X0, U])
tmp1 = scipy.linalg.expm(epsilon * np.bmat([[A, -S],
[np.eye(d), A]]))
tmp2 = scipy.linalg.block_diag(exptA, exptA)
tmp3 = np.dot(tmp0, np.dot(tmp1, tmp2))
X1 = tmp3[:, :d]
U = tmp3[:, d:]
log_pi1, G1 = log_pi(X1, *args)
U += 0.5 * epsilon * G1
U = orth_stiefel_project(X0, U)
H1 = log_pi1 + .5 * np.einsum('ij,ij', U, U)
u = np.random.rand()
if u < math.exp(-H1 + H0):
return X1, 1, log_pi1
return X0, 0, log_pi0
开发者ID:PredictiveScienceLab,项目名称:py-aspgp,代码行数:33,代码来源:test_stiefel_mcmc.py
示例8: update_mini_batch
def update_mini_batch(self, mini_batch, eta, lmbda, spatial_regularization, n):
"""Update the network's weights and biases by applying gradient
descent using backpropagation to a single mini batch. The
``mini_batch`` is a list of tuples ``(x, y)``, ``eta`` is the
learning rate, ``lmbda`` is the regularization parameter, and
``n`` is the total size of the training data set.
"""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [(1-eta*(lmbda/n))*w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
if spatial_regularization:
w_imj = np.bmat([self.weights[0][:,1:],np.zeros((len(self.weights[0]),1))]).A
w_ipj = np.bmat([np.zeros((len(self.weights[0]),1)),self.weights[0][:,:-1]]).A
for i in range(27,783,28):
w_imj[:,i] = np.zeros((1,len(self.weights[0])))
w_ipj[:,i+1] = np.zeros((1,len(self.weights[0])))
w_ijm = np.bmat([self.weights[0][:,28:],np.zeros((len(self.weights[0]),28))]).A
w_ijp = np.bmat([np.zeros((len(self.weights[0]),28)),self.weights[0][:,:-28]]).A
delta_e = 0.25*(w_imj+w_ipj+w_ijm+w_ijp)
self.weights[0] = self.weights[0]+eta*(lmbda/n)*delta_e # regularization by edges
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
开发者ID:math-reader-nn,项目名称:neural-networks-and-deep-learning,代码行数:28,代码来源:network2.py
示例9: admira
def admira(r, b, m, n, iter, A, A_star):
if 2*r > min(m,n):
r_prime = min(m,n)
else:
r_prime = 2*r
# initialization
X_hat = np.random.randn(m,n) # step 1
Psi_hatU = np.matrix([])
Psi_hatV = np.matrix([])
for i in range(iter):
Y = A_star(b - A(X_hat))
(U, s, Vt) = svd(Y)
Psi_primeU = U[:, 0:r_prime]
Psi_primeV = Vt.T[:, 0:r_prime]
if i > 0:
Psi_tildeU = np.bmat([Psi_primeU, Psi_hatU])
Psi_tildeV = np.bmat([Psi_primeV, Psi_hatV])
else:
Psi_tildeU = Psi_primeU
Psi_tildeV = Psi_primeV
AP = lambda b: APsiUV(b, A, Psi_tildeU, Psi_tildeV)
APt = lambda s: APsitUV(s, A_star, Psi_tildeU, Psi_tildeV)
ALS = lambda b: APt(AP(b))
(s, res, iter) = cgsolve(ALS, APt(b), 1e-6, 100, False)
X_tilde = Psi_tildeU*np.matrix(np.diag(np.array(s).reshape(-1)))*Psi_tildeV.T
(U, s, Vt) = svd(X_tilde)
Psi_hatU = U[:, 0:r]
Psi_hatV = Vt.T[:, 0:r]
X_hat = Psi_hatU*np.diag(s[0:r])*Psi_hatV.T
return X_hat
开发者ID:ab39826,项目名称:IndexCoding,代码行数:32,代码来源:greedy_alignment.py
示例10: tf2ss
def tf2ss(tf):
#assert isinstance(tf, TF), "tf2ss() requires a transfer function"
Ts = tf.Ts
# Use observable canonical form
n = len(tf.denominator) - 1
a0 = tf.denominator[0]
b0 = tf.numerator[0]
num = [numerator/a0 for numerator in tf.numerator][1:] # chop off b0
den = [denominator/a0 for denominator in tf.denominator][1:] # chop off a0
aCol = transpose(mat([-a for a in den]))
bCol = []
for i in range(0, n):
bCol.append(num[i] - den[i]*b0)
if n == 1:
A = aCol
C = 1
else:
A = bmat([[aCol, bmat([eye(n-1)], [zeros(1, n-1)])]])
C = bmat([1, zeros(1, n-1)])
B = transpose(mat(bCol))
D = b0
return StateSpace(A, B, C, D, Ts)
开发者ID:tderensis,项目名称:marof,代码行数:25,代码来源:tf2ss.py
示例11: doPhysics
def doPhysics(rbd, force, torque, dtime):
globalcom = rbd.rotmat.dot(rbd.com)+rbd.pos
globalinertiatensor = rbd.rotmat.dot(rbd.inertiatensor).dot(rbd.rotmat.transpose())
globalcom_hat = rm.hat(globalcom)
# si = spatial inertia
Isi00 = rbd.mass * np.eye(3)
Isi01 = rbd.mass * globalcom_hat.transpose()
Isi10 = rbd.mass * globalcom_hat
Isi11 = rbd.mass * globalcom_hat.dot(globalcom_hat.transpose()) + globalinertiatensor
Isi = np.bmat([[Isi00, Isi01], [Isi10, Isi11]])
vw = np.bmat([rbd.linearv, rbd.angularw]).T
pl = Isi*vw
# print np.ravel(pl[0:3])
# print np.ravel(pl[3:6])
ft = np.bmat([force, torque]).T
angularw_hat = rm.hat(rbd.angularw)
linearv_hat = rm.hat(rbd.linearv)
vwhat_mat = np.bmat([[angularw_hat, np.zeros((3,3))], [linearv_hat, angularw_hat]])
dvw = Isi.I*(ft-vwhat_mat*Isi*vw)
# print dvw
rbd.dlinearv = np.ravel(dvw[0:3])
rbd.dangularw = np.ravel(dvw[3:6])
rbd.linearv = rbd.linearv + rbd.dlinearv * dtime
rbd.angularw = rbd.angularw + rbd.dangularw * dtime
return [np.ravel(pl[0:3]), np.ravel(pl[3:6])]
开发者ID:wanweiwei07,项目名称:pyhiro,代码行数:27,代码来源:rigidbody.py
示例12: add_new_data_point
def add_new_data_point(self, x, y):
"""
Add a new function observation to the GP.
Parameters
----------
x: 2d-array
y: 2d-array
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
if self.gp is None:
# Initialize GP
# inference_method = GPy.inference.latent_function_inference.\
# exact_gaussian_inference.ExactGaussianInference()
self.gp = GPy.core.GP(X=x, Y=y, kernel=self.kernel,
# inference_method=inference_method,
likelihood=self.likelihood)
else:
# Add data to GP
# self.gp.set_XY(np.vstack([self.gp.X, x]),
# np.vstack([self.gp.Y, y]))
# Add data row/col to kernel (a, b)
# [ K a ]
# [ a.T b ]
#
# Now K = L.dot(L.T)
# The new Cholesky decomposition is then
# L_new = [ L 0 ]
# [ c.T d ]
a = self.gp.kern.K(self.gp.X, x)
b = self.gp.kern.K(x, x)
b += 1e-8 + self.gp.likelihood.gaussian_variance(
self.gp.Y_metadata)
L = self.gp.posterior.woodbury_chol
c = sp.linalg.solve_triangular(self.gp.posterior.woodbury_chol, a,
lower=True)
d = np.sqrt(b - c.T.dot(c))
L_new = np.asfortranarray(
np.bmat([[L, np.zeros_like(c)],
[c.T, d]]))
K_new = np.bmat([[self.gp.posterior._K, a],
[a.T, b]])
self.gp.X = np.vstack((self.gp.X, x))
self.gp.Y = np.vstack((self.gp.Y, y))
alpha, _ = dpotrs(L_new, self.gp.Y, lower=1)
self.gp.posterior = Posterior(woodbury_chol=L_new,
woodbury_vector=alpha,
K=K_new)
# Increment time step
self.t += 1
开发者ID:marlithjdm,项目名称:SafeOpt,代码行数:59,代码来源:gp_opt.py
示例13: combine2
def combine2(networks):
"""Combines several networks, treating the output layers as independent.
"""
combined = Network([networks[0].sizes[0],sum([net.sizes[1] for net in networks]),sum([net.sizes[-1] for net in networks])],cost = CrossEntropyCost)
combined.weights = [np.bmat([[net.weights[0]] for net in networks]).A , sc.block_diag(*[net.weights[1] for net in networks])]
combined.biases = [np.bmat([[net.biases[0]] for net in networks]).A , np.bmat([[net.biases[1]] for net in networks]).A]
return combined
开发者ID:math-reader-nn,项目名称:neural-networks-and-deep-learning,代码行数:7,代码来源:network2.py
示例14: DilateArray
def DilateArray(ar, scale):
if ar.shape[1]==1:
return np.bmat([[ar]]*scale)
else:
background = np.array([[np.zeros(ar.shape)]*scale]*scale, dtype=ar.dtype)
for i in xrange(scale):
background[i,i] = ar
return np.array(np.bmat(background.tolist()), dtype=np.int32)
开发者ID:telamonian,项目名称:stocuda,代码行数:8,代码来源:lac_operon_confined.py
示例15: FindEAndD
def FindEAndD(fundamental):
## print fundamental
U,S,Vh = numpy.linalg.svd(fundamental)
V = Vh.T
## print U
## print S
## print V
e0 = V[:,2] / V[2,2]
## print e0
a = -e0[1]
b = e0[0]
c = numpy.mat("0")
d0 = numpy.bmat('a; b; c')
## print d0
## print U
e1 = U[:3,2] / U[2,2]
## print e1
####''' Alternate method in matlab, not using'''
##
## D,V = numpy.linalg.eig(fundamental)
#### print D
#### print V
## e0 = V[:,0]
## a = -e0[1]
## b = e0[0]
## c = numpy.mat("0")
## d0 = numpy.bmat('a; b; c')
## print V
## print e0
## print d0
##
## D,V = numpy.linalg.eig(fundamental.T)
## e1 = V[:,0]
#### print V
## print e1
Fd0 = fundamental * d0
## print Fd0
Fd0[2] = Fd0[2]**2
Fd0 = Fd0 / math.sqrt(Fd0.sum())
## print Fd0
a = -Fd0[1]
b = Fd0[0]
c = numpy.mat("0")
d1 = numpy.bmat('a;b;c')
## print d1
return e0, d0, e1, d1
开发者ID:maxnovak,项目名称:viewmorphpy,代码行数:55,代码来源:H1H2Calc.py
示例16: compute_sources_and_receivers
def compute_sources_and_receivers(distance_data, dim):
# number of sources and receivers
M,N = distance_data.shape
# construct D matrix
D = distance_data**2
# reconstruct S and R matrix up to a transformation
U,si,V_h = np.linalg.svd(D)
R_hat = np.mat(U[:,:dim].T)
S_hat = np.mat(np.eye(dim)*si[:dim]) * np.mat(V_h[:dim,:])
hr = np.ones((1,N)) * np.linalg.pinv(S_hat)
I = np.eye(4)
zeros = np.zeros((4,1))
Hr = np.bmat('hr; zeros I')
R_hatHr = (R_hat.T * np.linalg.inv(Hr)).H
hs = np.linalg.pinv(R_hatHr).H * np.ones((M,1))
zeros = np.zeros((1,4))
Hs = np.bmat('I; zeros')
Hs = np.linalg.inv(np.bmat('Hs hs'))
S_prime = Hs*Hr*S_hat
A = np.array(S_prime[4,:])
XYZ = np.array(S_prime[1:4,:])
X = np.array(S_prime[1,:])
Y = np.array(S_prime[2,:])
Z = np.array(S_prime[3,:])
qq = np.vstack( (np.ones((1,N)), 2*XYZ, XYZ**2, 2*X*Y,
2*X*Z, 2*Y*Z) ).T
q = np.linalg.pinv(qq).dot(A.T)
Q = np.vstack( (np.hstack( (np.squeeze(q[:4].T), -0.5) ),
np.hstack([q[1], q[4], q[7], q[8], 0]),
np.hstack([q[2], q[7], q[5], q[9], 0]),
np.hstack([q[3],q[8],q[9],q[6],0]),
np.array([-0.5,0,0,0,0]) ) )
if np.all(np.linalg.eigvals(Q[1:4,1:4]) > 0):
C = np.linalg.cholesky(Q[1:4,1:4]).T
else:
C = np.eye(3)
Hq = np.vstack(( np.array([1,0,0,0,0]),
np.hstack( (np.zeros((3,1)), C, np.zeros((3,1)))),
np.hstack( (-q[0], -2*np.squeeze(q[1:4].T), 1))
))
H = np.mat(Hq) * Hs * Hr
Se = (H*S_hat)[1:4,:]
Re = 0.5 * (np.linalg.inv(H).H*R_hat)[1:4,:]
return Re, Se
开发者ID:chenxiao60,项目名称:Thesis,代码行数:55,代码来源:distance.py
示例17: train
def train(self):
if (self.status != 'init'):
print("Please load train data and init W first.")
return self.W
self.status = 'train'
original_X = self.train_X[:, 1:]
K = utility.Kernel.kernel_matrix(self, original_X)
# P = Q, q = p, G = -A, h = -c
P = cvxopt.matrix(np.bmat([[K, -K], [-K, K]]))
q = cvxopt.matrix(np.bmat([self.epsilon - self.train_Y, self.epsilon + self.train_Y]).reshape((-1, 1)))
G = cvxopt.matrix(np.bmat([[-np.eye(2 * self.data_num)], [np.eye(2 * self.data_num)]]))
h = cvxopt.matrix(np.bmat([[np.zeros((2 * self.data_num, 1))], [self.C * np.ones((2 * self.data_num, 1))]]))
# A = cvxopt.matrix(np.append(np.ones(self.data_num), -1 * np.ones(self.data_num)), (1, 2*self.data_num))
# b = cvxopt.matrix(0.0)
cvxopt.solvers.options['show_progress'] = False
solution = cvxopt.solvers.qp(P, q, G, h)
# Lagrange multipliers
alpha = np.array(solution['x']).reshape((2, -1))
self.alpha_upper = alpha[0]
self.alpha_lower = alpha[1]
self.beta = self.alpha_upper - self.alpha_lower
sv = abs(self.beta) > 1e-5
self.sv_index = np.arange(len(self.beta))[sv]
self.sv_beta = self.beta[sv]
self.sv_X = original_X[sv]
self.sv_Y = self.train_Y[sv]
free_sv_upper = np.logical_and(self.alpha_upper > 1e-5, self.alpha_upper < self.C)
self.free_sv_index_upper = np.arange(len(self.alpha_upper))[free_sv_upper]
self.free_sv_alpha_upper = self.alpha_upper[free_sv_upper]
self.free_sv_X_upper = original_X[free_sv_upper]
self.free_sv_Y_upper = self.train_Y[free_sv_upper]
free_sv_lower = np.logical_and(self.alpha_lower > 1e-5, self.alpha_lower < self.C)
self.free_sv_index_lower = np.arange(len(self.alpha_lower))[free_sv_lower]
self.free_sv_alpha_lower = self.alpha_lower[free_sv_lower]
self.free_sv_X_lower = original_X[free_sv_lower]
self.free_sv_Y_lower = self.train_Y[free_sv_lower]
short_b_upper = self.free_sv_Y_upper[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_upper[0], self.sv_X)) - self.epsilon
short_b_lower = self.free_sv_Y_lower[0] - np.sum(self.sv_beta * utility.Kernel.kernel_matrix_xX(self, self.free_sv_X_lower[0], self.sv_X)) + self.epsilon
self.sv_avg_b = (short_b_upper + short_b_lower) / 2
return self.W
开发者ID:fukuball,项目名称:fuku-ml,代码行数:53,代码来源:SupportVectorRegression.py
示例18: construct_A_matrix
def construct_A_matrix(n_gates, filt):
"""
Construct a row-augmented A matrix. Equation 5 in Giangrande et al, 2012.
A is a block matrix given by:
.. math::
\\bf{A} = \\begin{bmatrix} \\bf{I} & \\bf{-I} \\\\\\\\
\\bf{-I} & \\bf{I} \\\\\\\\ \\bf{Z}
& \\bf{M} \\end{bmatrix}
where
:math:`\\bf{I}` is the identity matrix
:math:`\\bf{Z}` is a matrix of zeros
:math:`\\bf{M}` contains our differential constraints.
Each block is of shape n_gates by n_gates making
shape(:math:`\\bf{A}`) = (3 * n, 2 * n).
Note that :math:`\\bf{M}` contains some side padding to deal with edge
issues
Parameters
----------
n_gates : int
Number of gates, determines size of identity matrix
filt : array
Input filter.
Returns
-------
a : matrix
Row-augmented A matrix.
"""
Identity = np.eye(n_gates)
filter_length = len(filt)
M_matrix_middle = np.diag(np.ones(n_gates - filter_length + 1), k=0) * 0.0
posn = np.linspace(-1.0 * (filter_length - 1) / 2, (filter_length - 1)/2,
filter_length)
for diag in range(filter_length):
M_matrix_middle = M_matrix_middle + np.diag(np.ones(
int(n_gates - filter_length + 1 - np.abs(posn[diag]))),
k=int(posn[diag])) * filt[diag]
side_pad = (filter_length - 1) // 2
M_matrix = np.bmat(
[np.zeros([n_gates-filter_length + 1, side_pad], dtype=float),
M_matrix_middle, np.zeros(
[n_gates-filter_length+1, side_pad], dtype=float)])
Z_matrix = np.zeros([n_gates - filter_length + 1, n_gates])
return np.bmat([[Identity, -1.0 * Identity], [Identity, Identity],
[Z_matrix, M_matrix]])
开发者ID:deeplycloudy,项目名称:pyart,代码行数:53,代码来源:phase_proc.py
示例19: test_falker
def test_falker(self):
"""Test matrices giving some Nan generalized eigen values."""
M = diag(array(([1,0,3])))
K = array(([2,-1,-1],[-1,2,-1],[-1,-1,2]))
D = array(([1,-1,0],[-1,1,0],[0,0,0]))
Z = zeros((3,3))
I = identity(3)
A = bmat([[I,Z],[Z,-K]])
B = bmat([[Z,I],[M,D]])
olderr = np.seterr(all='ignore')
try:
self._check_gen_eig(A, B)
finally:
np.seterr(**olderr)
开发者ID:BeeRad-Johnson,项目名称:scipy-refactor,代码行数:15,代码来源:test_decomp.py
示例20: ratNormScroll
def ratNormScroll(degList,dataType):
# return the matrix for the RNS w/ degrees in degList
blocks = []
numBlocks = len(degList)
maxVal = max(degList[:-1]+[degList[-1]-1]) + 1
for i in xrange(len(degList)):
thisBlock = np.zeros((numBlocks,degList[i]+1),dtype=dataType)
# ith row = ones
for j in xrange(degList[i]+1):
thisBlock[i][j]=1
thisBlock[numBlocks-1][j] = j
blocks.append(thisBlock)
toReturn = np.bmat(blocks)
lastRow = maxVal - np.sum(toReturn,axis=0)
return np.bmat([[toReturn],[lastRow]])
开发者ID:nbliss,项目名称:Likelihood-MRC,代码行数:15,代码来源:iterativeProportionalScaling.py
注:本文中的numpy.bmat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论