本文整理汇总了Python中scipy.linalg.norm函数的典型用法代码示例。如果您正苦于以下问题:Python norm函数的具体用法?Python norm怎么用?Python norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了norm函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: poseLinearCalibration
def poseLinearCalibration(objectPoints, imagePoints, cameraMatrix, distCoeffs, model, retMatrix=False):
'''
takes calibration points and estimate linearly camera pose. re
'''
# map coordinates with z=0
xm, ym = objectPoints.T[:2]
# undistort ccd points, x,y homogenous undistorted
xp, yp = cl.ccd2homUndistorted(imagePoints, cameraMatrix, distCoeffs, model)
A = dataMatrixPoseCalib(xm, ym, xp, yp)
_, s, v = ln.svd(A)
m = v[-1] # select right singular vector of smaller singular value
# normalize and ensure that points are in front of the camera
m /= np.sqrt(ln.norm(m[:3])*ln.norm(m[3:6])) * np.sign(m[-1])
# rearrange as rVec, tVec
R = np.array([m[:3], m[3:6], np.cross(m[:3], m[3:6])]).T
rVec = cv2.Rodrigues(R)[0]
tVec = m[6:]
if retMatrix:
return rVec, tVec, A
return rVec, tVec
开发者ID:sebalander,项目名称:sebaPhD,代码行数:26,代码来源:calibExternNov2016.py
示例2: pcosine
def pcosine(u, v):
"""Computes the Cosine distance (positive space) between 1-D arrays.
The Cosine distance (positive space) between `u` and `v` is defined as
.. math::
d(u, v) = 1 - abs \\left( \\frac{u \\cdot v}{||u||_2 ||v||_2} \\right)
where :math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
Parameters
----------
u : array
Input array.
v : array
Input array.
Returns
-------
cosine : float
Cosine distance between `u` and `v`.
"""
# validate vectors like scipy does
u = ssd._validate_vector(u)
v = ssd._validate_vector(v)
dist = 1. - np.abs(np.dot(u, v) / (linalg.norm(u) * linalg.norm(v)))
return dist
开发者ID:HugoDLopes,项目名称:BioSPPy,代码行数:32,代码来源:metrics.py
示例3: fgmres
def fgmres(self,rhs,tol=1e-6,restrt=None,maxiter=None,callback=None):
if maxiter == None:
maxiter = len(rhs)
if restrt == None:
restrt = 2*maxiter
# implemented as in [Saad, 1993]
# start
x = zeros(len(rhs))
H = zeros((restrt+1, restrt))
V = zeros((len(rhs),restrt))
Z = zeros((len(rhs),restrt))
# Arnoldi process (with modified Gramm-Schmidt)
res = 1.
j = 0
r = rhs - self.point.matvec(x)
beta = norm(r)
V[:,0]=r/beta
while j < maxiter and res > tol:
Z[:,j] = self.point.psolve(V[:,j])
w = self.point.matvec(Z[:,j])
for i in range(j+1):
H[i,j]=dot(w,V[:,i])
w = w - H[i,j]*V[:,i]
H[j+1,j] = norm(w)
V[:,j+1]=w/H[j+1,j]
e = zeros(j+2)
e[0]=1.
y, res, rank, sing_val = lstsq(H[:j+2,:j+1],beta*e)
j += 1
print "# GMRES| iteration :", j, "res: ", res/beta
self.resid = r_[self.resid,res/beta]
Zy = dot(Z[:,:j],y)
x = x + Zy
info = 1
return (x,info)
开发者ID:pvnuffel,项目名称:fokkerplanck,代码行数:35,代码来源:GMRESLinearSolver.py
示例4: test_lasso_lars_vs_lasso_cd_early_stopping
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
# same test, with normalization
for alpha_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=alpha_min)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:25,代码来源:test_least_angle.py
示例5: getVelocity
def getVelocity(self,p, V, E, last=False):
"""
This function calculates the velocity for the robot with RRT.
The inputs are (given in order):
p = the current x-y position of the robot
E = edges of the tree (2 x No. of nodes on the tree)
V = points of the tree (2 x No. of vertices)
last = True, if the current region is the last region
= False, if the current region is NOT the last region
"""
pose = mat(p).T
#dis_cur = distance between current position and the next point
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
heading = E[1,self.E_current_column] # index of the current heading point on the tree
if norm(dis_cur) < 1.5*self.radius: # go to next point
if not heading == shape(V)[1]-1:
self.E_current_column = self.E_current_column + 1
dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- pose
#else:
# dis_cur = vstack((V[1,E[1,self.E_current_column]],V[2,E[1,self.E_current_column]]))- vstack((V[1,E[0,self.E_current_column]],V[2,E[0,self.E_current_column]]))
Vel = zeros([2,1])
Vel[0:2,0] = dis_cur/norm(dis_cur)*0.5 #TUNE THE SPEED LATER
return Vel
开发者ID:RobertVillalba,项目名称:LTLMoP,代码行数:27,代码来源:RRTController.py
示例6: get_neuromag_transform
def get_neuromag_transform(lpa, rpa, nasion):
"""Creates a transformation matrix from RAS to Neuromag-like space
Resets the origin to mid-distance of peri-auricular points with nasion
passing through y-axis.
(mne manual, pg. 97)
Parameters
----------
lpa : numpy.array, shape = (1, 3)
Left peri-auricular point coordinate.
rpa : numpy.array, shape = (1, 3)
Right peri-auricular point coordinate.
nasion : numpy.array, shape = (1, 3)
Nasion point coordinate.
Returns
-------
trans : numpy.array, shape = (3, 3)
Transformation matrix to Neuromag-like space.
"""
origin = (lpa + rpa) / 2
nasion = nasion - origin
lpa = lpa - origin
rpa = rpa - origin
axes = np.empty((3, 3))
axes[1] = nasion / linalg.norm(nasion)
axes[2] = np.cross(axes[1], lpa - rpa)
axes[2] /= linalg.norm(axes[2])
axes[0] = np.cross(axes[1], axes[2])
trans = linalg.inv(axes)
return trans
开发者ID:ashwinashok9111993,项目名称:mne-python,代码行数:33,代码来源:coreg.py
示例7: test_rank_deficient_design
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:25,代码来源:test_least_angle.py
示例8: compute_innovationFactor
def compute_innovationFactor(y, H, m_minus):
innov = y - np.dot(H, m_minus)
if la.norm(innov) > (1.0/3.0):
innov_s = sigmaUnitSwitch(y) - np.dot(H, m_minus)
if la.norm(innov_s) < la.norm(innov):
innov = innov_s
return innov
开发者ID:atharris,项目名称:DINO_CREx,代码行数:7,代码来源:attKalmanAlgs.py
示例9: batched_decode
def batched_decode(df):
s = time.time()
data = []
r_data = []
df = df.reindex(numpy.random.permutation(df.index))
for start_i in range(0, len(df), batch_size):
if verbose:
print(start_i)
batched_df = df[start_i:start_i+batch_size]
text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks, labels = \
prepare(batched_df, model['utable'], worddict, model['uoptions'], use_eos)
uff = model['f_w2v'](text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks)
r_uff = model['f_w2v'](hypothesis_embeddings, hypothesis_masks, text_embeddings, text_masks)
text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks, labels = \
prepare(batched_df, model['btable'], worddict, model['boptions'], use_eos)
bff = model['f_w2v2'](text_embeddings, text_masks, hypothesis_embeddings, hypothesis_masks)
r_bff = model['f_w2v2'](hypothesis_embeddings, hypothesis_masks, text_embeddings, text_masks)
if use_norm:
for j in range(len(uff)):
uff[j] /= norm(uff[j])
bff[j] /= norm(bff[j])
r_uff[j] /= norm(r_uff[j])
r_bff[j] /= norm(r_bff[j])
ff = numpy.concatenate([uff, bff], axis=1)
r_ff = numpy.concatenate([r_uff, r_bff], axis=1)
data.append(ff)
r_data.append(r_ff)
data = numpy.concatenate(data)
r_data = numpy.concatenate(r_data)
print('used {0} seconds'.format(time.time() - s))
return data, r_data, df.label.values
开发者ID:junfenglx,项目名称:skip-thoughts,代码行数:32,代码来源:snli_on_skipthoughts.py
示例10: _admm_ips
def _admm_ips(S, support, rho=1., tau_inc=2., tau_decr=2., mu=None, tol=1e-6,
max_iter=100, Xinit=None):
"""
returns:
-------
Z : numpy.ndarray
the split variable with correct support
r_ : list of floats
normalised norm of difference between split variables
s_ : list of floats
convergence of the variable Z in normalised norm
r_.append(linalg.norm(X - Z))
s_.append(np.inf)
normalisation is based on division by the number of elements
"""
p = S.shape[0]
dof = np.count_nonzero(support)
Z = (1 + rho) * np.identity(p)
U = np.zeros((p, p))
if Xinit is None:
X = np.identity(p)
else:
X = Xinit
r_ = list()
s_ = list()
f_vals_ = list()
rho_ = [rho]
r_.append(linalg.norm(X - Z) / dof)
s_.append(np.inf)
f_vals_.append(_pen_neg_log_likelihood(X, S))
iter_count = 0
while True:
try:
Z_old = Z.copy()
# closed form optimization for X
eigvals, eigvecs = linalg.eigh(rho * (Z - U) - S)
eigvals = (eigvals + (eigvals ** 2 + rho) ** (1. / 2)) / rho
X = eigvecs.dot(np.diag(eigvals).dot(eigvecs.T))
# proximal operator for Z: projection on support
Z = support * (X + U)
# update scaled dual variable
U = U + X - Z
r_.append(linalg.norm(X - Z) / (p ** 2))
s_.append(linalg.norm(Z - Z_old) / dof)
func_val = -np.linalg.slogdet(support * X)[1] + \
np.sum(S * X * support)
f_vals_.append(func_val)
if mu is not None:
rho = _update_rho(U, rho, r_[-1], s_[-1],
mu, tau_inc, tau_decr)
rho_.append(rho)
iter_count += 1
if (_check_convergence(X, Z, Z_old, U, rho, tol_abs=tol) or
iter_count > max_iter):
raise StopIteration
except StopIteration:
return X, Z, r_, s_, f_vals_, rho_
开发者ID:rphlypo,项目名称:connectivity,代码行数:60,代码来源:covariance_learn.py
示例11: _check_dipoles
def _check_dipoles(dipoles, fwd, stc, evoked, residual=None):
src = fwd['src']
pos1 = fwd['source_rr'][np.where(src[0]['vertno'] ==
stc.vertices[0])]
pos2 = fwd['source_rr'][np.where(src[1]['vertno'] ==
stc.vertices[1])[0] +
len(src[0]['vertno'])]
# Check the position of the two dipoles
assert_true(dipoles[0].pos[0] in np.array([pos1, pos2]))
assert_true(dipoles[1].pos[0] in np.array([pos1, pos2]))
ori1 = fwd['source_nn'][np.where(src[0]['vertno'] ==
stc.vertices[0])[0]][0]
ori2 = fwd['source_nn'][np.where(src[1]['vertno'] ==
stc.vertices[1])[0] +
len(src[0]['vertno'])][0]
# Check the orientation of the dipoles
assert_true(np.max(np.abs(np.dot(dipoles[0].ori[0],
np.array([ori1, ori2]).T))) > 0.99)
assert_true(np.max(np.abs(np.dot(dipoles[1].ori[0],
np.array([ori1, ori2]).T))) > 0.99)
if residual is not None:
picks_grad = mne.pick_types(residual.info, meg='grad')
picks_mag = mne.pick_types(residual.info, meg='mag')
rel_tol = 0.02
for picks in [picks_grad, picks_mag]:
assert_true(linalg.norm(residual.data[picks], ord='fro') <
rel_tol *
linalg.norm(evoked.data[picks], ord='fro'))
开发者ID:olafhauk,项目名称:mne-python,代码行数:33,代码来源:test_rap_music.py
示例12: PicardTolerance
def PicardTolerance(x,u_k,b_k,FSpaces,dim,NormType,iter):
X = IO.vecToArray(x)
uu = X[0:dim[0]]
bb = X[dim[0]+dim[1]:dim[0]+dim[1]+dim[2]]
u = Function(FSpaces[0])
u.vector()[:] = u.vector()[:] + uu
diffu = u.vector().array() - u_k.vector().array()
b = Function(FSpaces[2])
b.vector()[:] = b.vector()[:] + bb
diffb = b.vector().array() - b_k.vector().array()
if (NormType == '2'):
epsu = splin.norm(diffu)/sqrt(dim[0])
epsb = splin.norm(diffb)/sqrt(dim[0])
elif (NormType == 'inf'):
epsu = splin.norm(diffu, ord=np.Inf)
epsb = splin.norm(diffb, ord=np.Inf)
else:
print "NormType must be 2 or inf"
quit()
print 'iter=%d: u-norm=%g b-norm=%g ' % (iter, epsu,epsb)
u_k.assign(u)
b_k.assign(b)
return u_k,b_k,epsu,epsb
开发者ID:wathen,项目名称:PhD,代码行数:28,代码来源:IterOperations.py
示例13: hess
def hess(A):
"""Computes the upper Hessenberg form of A using Householder reflectors.
input: A, mxn array
output: Q, orthogonal mxm array
H, upper Hessenberg
s.t. Q.dot(H).dot(Q.T) = A
"""
# similar approach as the householder function.
# again, not perfectly optimized, but good enough.
Q = np.eye(A.shape[0]).T
H = np.array(A, order="C")
# initialize m and n for convenience
m, n = H.shape
# avoid reallocating v in the for loop
v = np.empty(A.shape[1]-1)
for k in xrange(n-2):
# get a slice of the temporary array
vk = v[k:]
# fill it with corresponding values from R
vk[:] = H[k+1:,k]
# add in the term that makes the reflection work
vk[0] += copysign(la.norm(vk), vk[0])
# normalize it so it's an orthogonal transform
vk /= la.norm(vk)
# apply projection to H on the left
H[k+1:,k:] -= 2 * np.outer(vk, vk.dot(H[k+1:,k:]))
# apply projection to H on the right
H[:,k+1:] -= 2 * np.outer(H[:,k+1:].dot(vk), vk)
# Apply it to Q
Q[k+1:] -= 2 * np.outer(vk, vk.dot(Q[k+1:]))
return Q, H
开发者ID:byuimpactrevisions,项目名称:numerical_computing,代码行数:31,代码来源:ct.py
示例14: test_randomized_svd_power_iteration_normalizer
def test_randomized_svd_power_iteration_normalizer():
# randomized_svd with power_iteration_normalized='none' diverges for
# large number of power iterations on this dataset
rng = np.random.RandomState(42)
X = make_low_rank_matrix(100, 500, effective_rank=50, random_state=rng)
X += 3 * rng.randint(0, 2, size=X.shape)
n_components = 50
# Check that it diverges with many (non-normalized) power iterations
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
U, s, V = randomized_svd(X, n_components, n_iter=20,
power_iteration_normalizer='none')
A = X - U.dot(np.diag(s).dot(V))
error_20 = linalg.norm(A, ord='fro')
assert_greater(np.abs(error_2 - error_20), 100)
for normalizer in ['LU', 'QR', 'auto']:
U, s, V = randomized_svd(X, n_components, n_iter=2,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error_2 = linalg.norm(A, ord='fro')
for i in [5, 10, 50]:
U, s, V = randomized_svd(X, n_components, n_iter=i,
power_iteration_normalizer=normalizer,
random_state=0)
A = X - U.dot(np.diag(s).dot(V))
error = linalg.norm(A, ord='fro')
assert_greater(15, np.abs(error_2 - error))
开发者ID:BasilBeirouti,项目名称:scikit-learn,代码行数:33,代码来源:test_extmath.py
示例15: calculate_examples
def calculate_examples(mean, sigma, weights, c = 2):
from scipy.linalg import norm
mean_p = mean + c * (weights/norm(weights)) * norm(sigma)
mean_m = mean - c * (weights/norm(weights)) * norm(sigma)
return np.array([mean_p, mean_m])
开发者ID:robbisg,项目名称:mvpa_itab_wu,代码行数:7,代码来源:cross_decoding.py
示例16: test_cov_estimation_on_raw_segment
def test_cov_estimation_on_raw_segment():
"""Test estimation from raw on continuous recordings (typically empty room)
"""
tempdir = _TempDir()
raw = Raw(raw_fname, preload=False)
cov = compute_raw_data_covariance(raw)
cov_mne = read_cov(erm_cov_fname)
assert_true(cov_mne.ch_names == cov.ch_names)
assert_true(linalg.norm(cov.data - cov_mne.data, ord='fro')
/ linalg.norm(cov.data, ord='fro') < 1e-4)
# test IO when computation done in Python
cov.save(op.join(tempdir, 'test-cov.fif')) # test saving
cov_read = read_cov(op.join(tempdir, 'test-cov.fif'))
assert_true(cov_read.ch_names == cov.ch_names)
assert_true(cov_read.nfree == cov.nfree)
assert_array_almost_equal(cov.data, cov_read.data)
# test with a subset of channels
picks = pick_channels(raw.ch_names, include=raw.ch_names[:5])
cov = compute_raw_data_covariance(raw, picks=picks)
assert_true(cov_mne.ch_names[:5] == cov.ch_names)
assert_true(linalg.norm(cov.data - cov_mne.data[picks][:, picks],
ord='fro') / linalg.norm(cov.data, ord='fro') < 1e-4)
# make sure we get a warning with too short a segment
raw_2 = raw.crop(0, 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
cov = compute_raw_data_covariance(raw_2)
assert_true(len(w) == 1)
开发者ID:LizetteH,项目名称:mne-python,代码行数:30,代码来源:test_cov.py
示例17: _assert_cov
def _assert_cov(cov, cov_desired, tol=0.005, nfree=True):
assert_equal(cov.ch_names, cov_desired.ch_names)
err = (linalg.norm(cov.data - cov_desired.data, ord='fro') /
linalg.norm(cov.data, ord='fro'))
assert_true(err < tol, msg='%s >= %s' % (err, tol))
if nfree:
assert_equal(cov.nfree, cov_desired.nfree)
开发者ID:jdammers,项目名称:mne-python,代码行数:7,代码来源:test_cov.py
示例18: getRank
def getRank(M, beta = 0.8, eps = 1e-6):
''' Loop until get right rank
Args: M: rank matrix; beta: non-teleport weight; eps: epsilon
Returns: Each node's Rank
'''
# Preparation
n1, n2 = M.shape
r1 = 1.0 / n2 * np.ones((n2, 1))
r0 = np.ones((n2, 1))
n = 0
print '|Loop| epsilon|time(s)|'
print '|----|---------|-------|'
# Loop
while norm(r1 - r0, 1) > eps:
t0 = time.clock()
n += 1
r0 = r1
r1 = beta * M.dot(r0)
r1 = r1 + (1 - beta) / n2
sum_r1 = r1.sum()
r1 = r1 + (1 - sum_r1) / n2
t1 = time.clock() - t0
print '|%4d|%6.3e|%7.3f|' % (n, norm(r1 - r0, 1), t1)
return r1
开发者ID:CenturySee,项目名称:Git4Century,代码行数:26,代码来源:pageRank1.py
示例19: test_lasso_lars_vs_lasso_cd
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
开发者ID:MartinThoma,项目名称:scikit-learn,代码行数:35,代码来源:test_least_angle.py
示例20: initial_cond
def initial_cond(coords, mass, dipole, temp, F):
cm_coords = coords - tile(center_of_mass(coords, mass), (coords.shape[0], 1))
print "computing inertia tensor and principal axes of inertia"
mol_I, mol_Ix = eig(inertia_tensor(cm_coords, mass))
mol_I.sort()
print "principal moments of inertia are: ", mol_I
# compute the ratio of the dipole energy to the
# rotational energy
print "x = (mu*F / kB*T_R) = ", norm(dipole) * F / kB_au / temp
# random initial angular velocity vector
# magnitude set so that 0.5 * I * w**2.0 = kT
w_mag = sqrt(2.0 * kB_au * temp / mol_I.mean())
w0 = 2.0 * rand(3) - 1.0
w0 = w0 / norm(w0) * w_mag
# random initial orientation / random unit quaternion
q0 = 2.0 * rand(4) - 1.0
q0 = q0 / norm(q0)
return q0, w0
开发者ID:jbowlan,项目名称:asym_rotor,代码行数:26,代码来源:render.py
注:本文中的scipy.linalg.norm函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论