本文整理汇总了Python中sklearn.utils.extmath.safe_sparse_dot函数的典型用法代码示例。如果您正苦于以下问题:Python safe_sparse_dot函数的具体用法?Python safe_sparse_dot怎么用?Python safe_sparse_dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了safe_sparse_dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: compute
def compute(function, x, A, b, args, coordinate=None):
L2 = args["L2"]
if function == "loss":
"""Compute the square error."""
reg = 0.5 * L2 * np.sum(x ** 2)
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
return ((b - b_pred) ** 2).sum() / 2 + reg
elif function == "gradient":
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
residual = b_pred - b
if coordinate is None:
grad = safe_sparse_dot(residual, A)
grad += L2 * x
else:
grad = safe_sparse_dot(residual, A[:, coordinate])
grad += (L2 * x[coordinate])
return grad
elif function == "lipschitz":
lipschitz_values = np.sum(A ** 2, axis=0) + L2
return lipschitz_values
开发者ID:IssamLaradji,项目名称:ICML2015_GaussSouthwellCoordinateDescent,代码行数:35,代码来源:least_squares.py
示例2: _decision_scores
def _decision_scores(self, X):
"""Predict using the ELM model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data.
Returns
-------
y_pred : array-like, shape (n_samples,) or (n_samples, n_outputs)
The predicted values.
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if self.batch_size is None:
hidden_activations = self._compute_hidden_activations(X)
y_pred = safe_sparse_dot(hidden_activations, self.coef_output_)
else:
n_samples = X.shape[0]
batches = gen_batches(n_samples, self.batch_size)
y_pred = np.zeros((n_samples, self.n_outputs_))
for batch in batches:
h_batch = self._compute_hidden_activations(X[batch])
y_pred[batch] = safe_sparse_dot(h_batch, self.coef_output_)
return y_pred
开发者ID:IssamLaradji,项目名称:extreme-learning-machines,代码行数:28,代码来源:extreme_learning_machines.py
示例3: _bilinear_cd
def _bilinear_cd(U, V, X_left, X_right, y, alpha):
n_samples, n_features_left = X_left.shape
n_components = V.shape[1]
XrV = safe_sparse_dot(X_right, V)
viol = 0
for j in range(n_features_left):
for s in range(n_components):
XlU = safe_sparse_dot(X_left, U)
y_pred = np.sum(XlU * XrV, axis=1)
# grad_loss = loss.dloss(y_pred, y)
grad_loss = y_pred - y
grad = np.dot(grad_loss * X_left[:, j], XrV[:, s])
# grad /= n_samples
grad += alpha * U[j, s]
inv_step_size = np.dot(X_left[:, j] ** 2, XrV[:, s] ** 2)
# inv_step_size /= np.sqrt(n_samples)
inv_step_size += alpha
update = grad / inv_step_size
viol += np.abs(update)
U[j, s] -= update
XlU = safe_sparse_dot(X_left, U)
y_pred = np.sum(XlU * XrV, axis=1)
lv = 0.5 * np.sum((y_pred - y) ** 2)
lv += 0.5 * alpha * (np.sum(U ** 2) + np.sum(V ** 2))
return viol, lv
开发者ID:vene,项目名称:bilearn,代码行数:35,代码来源:test_cd.py
示例4: _svd
def _svd(self, array, n_components, n_discard):
"""Returns first `n_components` left and right singular
vectors u and v, discarding the first `n_discard`.
"""
if self.svd_method == "randomized":
kwargs = {}
if self.n_svd_vecs is not None:
kwargs["n_oversamples"] = self.n_svd_vecs
u, _, vt = randomized_svd(array, n_components, random_state=self.random_state, **kwargs)
elif self.svd_method == "arpack":
u, _, vt = svds(array, k=n_components, ncv=self.n_svd_vecs)
if np.any(np.isnan(vt)):
# some eigenvalues of A * A.T are negative, causing
# sqrt() to be np.nan. This causes some vectors in vt
# to be np.nan.
_, v = eigsh(safe_sparse_dot(array.T, array), ncv=self.n_svd_vecs)
vt = v.T
if np.any(np.isnan(u)):
_, u = eigsh(safe_sparse_dot(array, array.T), ncv=self.n_svd_vecs)
assert_all_finite(u)
assert_all_finite(vt)
u = u[:, n_discard:]
vt = vt[n_discard:]
return u, vt.T
开发者ID:VirgileFritsch,项目名称:scikit-learn,代码行数:27,代码来源:spectral.py
示例5: _backprop
def _backprop(self, X, y, n_samples, a_hidden, a_output, delta_o):
"""Computes the MLP cost function
and its corresponding derivatives with respect to the
different parameters given in the initialization.
Parameters
----------
theta : array-like, shape (size(W1) * size(W2) * size(b1) * size(b2))
A vector comprising the flattened weights :
"W1, W2, b1, b2"
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
y : numpy array of shape (n_samples)
Subset of the target values.
n_samples : int
Number of samples
Returns
-------
cost : float
grad : array-like, shape (size(W1) * size(W2) * size(b1) * size(b2))
"""
# Forward propagate
a_hidden[:] = self.activation_func(safe_sparse_dot(X,
self.coef_hidden_) +
self.intercept_hidden_)
a_output[:] = self.output_func(safe_sparse_dot(a_hidden,
self.coef_output_) +
self.intercept_output_)
# get cost
cost = self.loss_functions[self.loss](y, a_output)
# add regularization term to cost
cost += (0.5 * self.alpha) * (np.sum(self.coef_hidden_ ** 2) +
np.sum(self.coef_output_ ** 2)) \
/ n_samples
# backward propagate
diff = y - a_output
delta_o[:] = -diff
delta_h = np.dot(delta_o, self.coef_output_.T) *\
self.derivative_func(a_hidden)
# get regularized gradient
W1grad = (safe_sparse_dot(X.T,
delta_h) + (self.alpha *
self.coef_hidden_)) / n_samples
W2grad = (safe_sparse_dot(a_hidden.T,
delta_o) + (self.alpha *
self.coef_output_)) / n_samples
b1grad = np.mean(delta_h, 0)
b2grad = np.mean(delta_o, 0)
return cost, W1grad, W2grad, b1grad, b2grad
开发者ID:ddofer,项目名称:NeuralNetworks,代码行数:59,代码来源:multilayer_perceptron.py
示例6: complement_joint_log_likelihood
def complement_joint_log_likelihood(self, X, i):
"""Calculate the posterior log probability of the samples X
1 - (|c| - 1) * ((P(¬c)ΠP(w_i|¬c)) / (ΣP(¬c)ΠP(w_i|¬c)))"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (1 - (len(self.classes_) - 1)) * np.array(safe_sparse_dot(X, self.complement_feature_log_prob_.T) -
np.sum(self.class_log_prior_[i] + safe_sparse_dot(X, self.complement_feature_log_prob_.T)))
开发者ID:ikegami-yukino,项目名称:misc,代码行数:8,代码来源:selective_nb.py
示例7: _neg_free_energy
def _neg_free_energy(self,V):
''' Compute -1 * free energy (i.e. log p(V) * Z, where Z - normalizer) '''
# sum_j = 1:M b_j * Vj
fe = safe_sparse_dot(V,self.bias_visible_,dense_output = True)
# sum_j=1:M log( 1 + exp(sum_i=1:N Wij * Vj))
fe += np.log( 1 + np.exp( self.bias_hidden_ +
safe_sparse_dot(V,self.weights_.T))).sum(1)
return fe
开发者ID:AmazaspShumik,项目名称:sklearn-bayes,代码行数:8,代码来源:rbm.py
示例8: transform
def transform(self, X):
# compute hidden layer activation
if hasattr(self, 'weights_u_') and hasattr(self, 'weights_v_'):
projected = safe_sparse_dot(X, self.weights_u_, dense_output=True)
projected = safe_sparse_dot(projected, self.weights_v_)
else:
projected = safe_sparse_dot(X, self.weights_, dense_output=True)
return self._activate(projected + self.biases_)
开发者ID:ddofer,项目名称:Kaggle-HUJI-ML,代码行数:8,代码来源:ELM.py
示例9: compute
def compute(function, x, A, b, args, coordinate=None):
L1 = args["L1"]
if function == "loss":
reg = L1 * np.sum(np.abs(x))
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
loss = np.sum((b - b_pred) ** 2) / 2 + reg
return loss
elif function == "gradient":
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
loss = b_pred - b
if coordinate is None:
grad = safe_sparse_dot(A.T, loss)
else:
grad = safe_sparse_dot(A[:, coordinate], loss)
return grad
elif function == "proximal_step":
L = args["prox_lipschitz"]
g_func = args["g_func"]
L1 = args["L1"]
g = g_func(x, A, b, args, coordinate)
if coordinate is None:
x_half = x - g / L
# soft thresholding
x = np.sign(x_half) * np.maximum(0, np.abs(x_half) - L1 / L)
else:
L = args["prox_lipschitz"][coordinate]
x_half = x[coordinate] - g / L
# soft thresholding
x[coordinate] = np.sign(x_half) * np.maximum(0, np.abs(x_half) - L1 / L)
return x
elif function == "lipschitz":
lipschitz_values = np.sum(A ** 2, axis=0)
return lipschitz_values
开发者ID:IssamLaradji,项目名称:ICML2015_GaussSouthwellCoordinateDescent,代码行数:57,代码来源:lasso.py
示例10: _joint_log_likelihood
def _joint_log_likelihood(self, X, i):
"""Calculate the posterior log probability of the samples X
P(c) * Π P(w_i|c) / ΣP(c) * Π P(w_i|c)"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
numerator = self.class_log_prior_[i] + safe_sparse_dot(X, self.feature_log_prob_.T)
denominator = np.sum(self.class_log_prior_[i] + safe_sparse_dot(X, self.feature_log_prob_.T))
return np.array(numerator - denominator)
开发者ID:ikegami-yukino,项目名称:misc,代码行数:9,代码来源:selective_nb.py
示例11: compute
def compute(function, x, A, b, args, coordinate=None):
np.testing.assert_equal(np.unique(b), np.array([-1, 1]))
L2 = args["L2"]
if function == "loss":
reg = 0.5 * L2 * np.sum(x ** 2)
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
loss = np.sum(np.log(1 + np.exp(- b*b_pred))) + reg
return loss
elif function == "gradient":
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
residual = - b / (1. + np.exp(b * b_pred))
if coordinate is None:
grad = safe_sparse_dot(A.T, residual)
grad += L2 * x
else:
grad = safe_sparse_dot(A[:, coordinate].T, residual)
grad += (L2 * x[coordinate])
return grad
elif function == "hessian":
if "b_pred" not in args:
b_pred = safe_sparse_dot(A, x)
else:
b_pred = args["b_pred"]
sig = 1. / (1. + np.exp(- b * b_pred))
if coordinate is None:
hessian = A.T.dot(np.diag(sig * (1-sig)).dot(A))
hessian += L2
else:
hessian = A[:, coordinate].T.dot(np.diag(sig * \
(1-sig)).dot(A[:, coordinate]))
hessian += L2
return hessian
elif function == "lipschitz":
lipschitz_values = 0.25 * np.sum(A ** 2, axis=0) + L2
return lipschitz_values
开发者ID:IssamLaradji,项目名称:ICML2015_GaussSouthwellCoordinateDescent,代码行数:56,代码来源:logistic.py
示例12: _emission_log_probs_params
def _emission_log_probs_params(self, emission_params, X):
'''
Computes log of emission probabilities
'''
success = emission_params['success_prob']
fail = emission_params['fail_prob']
log_total = psi(success + fail)
log_success = psi(success) - log_total
log_fail = psi(fail) - log_total
return safe_sparse_dot(X,log_success.T) + safe_sparse_dot(np.ones(X.shape) - X, log_fail.T)
开发者ID:Ferrine,项目名称:sklearn-bayes,代码行数:10,代码来源:hmm.py
示例13: fit
def fit(self, X, y):
"""
Learn the idf vector (global term weights)
:param X: sparse matrix, [n_samples, n_features]
X must be a matrix of term counts
:param y: class_label, [n_samples]
:return: [n_class, n_features]
"""
if self.use_idf:
labelbin = LabelBinarizer()
# 计算样本属于哪个类别 [n_samples, n_classes]
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
# 计算类别下的文档数 [n_classes]
class_count_ = np.sum(Y, axis=0)
class_size = class_count_.shape[0]
# 计算每个特征词属于每个类别的样本数 [n_classes, n_features]
class_df_ = vectorize.class_df(X, Y)
# 计算类别下的词汇数 [n_classes]
self.class_freq_ = np.sum(safe_sparse_dot(Y.T, X), axis=1)
# 计算出现特征词的类别数 [n_features]
feature_count_ = np.sum(vectorize.tobool(class_df_), axis=0)
# 如果特征词所在的类别不确定或不知道时,用这个特征词出现的总样本数来代替
unknow_class_count_ = np.array([np.sum(class_count_, axis=0)])
class_count_ = np.concatenate((class_count_, unknow_class_count_))
unknow_class_df_ = np.sum(class_df_, axis=0).reshape(1, -1)
class_df_ = np.concatenate((class_df_, unknow_class_df_), axis=0)
unknow_class_freq_ = np.array([np.sum(self.class_freq_, axis=0)])
self.class_freq_ = np.concatenate((self.class_freq_, unknow_class_freq_))
self.classes_ = np.concatenate((self.classes_, np.array(["unknow"])), axis=0)
# smooth class_count_, class_df_, feature_count_
class_count_ += int(self.smooth_idf)
class_df_ += int(self.smooth_idf)
feature_count_ += int(self.smooth_idf)
_, n_features = X.shape
# [n_classes, n_features]
first_part = np.log(np.divide(class_count_.reshape(-1, 1), class_df_)) + 1.0
# [n_features]
second_part = np.log(class_size / feature_count_) + 1.0
second_part_diag = sp.spdiags(second_part, diags=0, m=n_features, n=n_features)
self._idf = safe_sparse_dot(first_part, second_part_diag)
return self
开发者ID:zqlhuanying,项目名称:Image_Emotion,代码行数:55,代码来源:another_improve_tf_idf.py
示例14: predict
def predict(self, X_left, X_right):
y_pred = _bilinear_forward(self.U_, self.V_, X_left, X_right)
if self.fit_linear:
y_pred += safe_sparse_dot(X_left, self.w_left_)
y_pred += safe_sparse_dot(X_right, self.w_right_)
if self.fit_diag:
y_pred += safe_sparse_dot(safe_sparse_mul(X_left, X_right),
self.diag_)
return y_pred
开发者ID:vene,项目名称:bilearn,代码行数:12,代码来源:sg_theano.py
示例15: add_fit
def add_fit(self,X):
n_samples = X.shape[0]
# old
first = safe_sparse_dot(self.hidden_activations_.T, self.hidden_activations_)
M = pinv2(first+1*np.identity(first.shape[0]))
beta = self.coef_output_
# new
H = self._get_hidden_activations(X)
# update
first = pinv2(1*np.identity(n_samples)+safe_sparse_dot(safe_sparse_dot(H,M),H.T))
second = safe_sparse_dot(safe_sparse_dot(safe_sparse_dot(safe_sparse_dot(M,H.T),first),H),M)
M = M - second
self.coef_output_ = beta + safe_sparse_dot(safe_sparse_dot(M,H.T),(X - safe_sparse_dot(H,beta)))
开发者ID:YISION,项目名称:yision.github.io,代码行数:14,代码来源:elm_autoencoder.py
示例16: _free_energy
def _free_energy(self, v):
"""Computes the free energy F(v) = - log sum_h exp(-E(v,h)).
v : array-like, shape (n_samples, n_features)
Values of the visible layer.
Returns
-------
free_energy : array-like, shape (n_samples,)
The value of the free energy.
"""
return (- safe_sparse_dot(v, self.intercept_visible_)
- np.logaddexp(0, safe_sparse_dot(v, self.components_.T)
+ self.intercept_hidden_).sum(axis=1))
开发者ID:CalculatedContent,项目名称:char-rbm,代码行数:14,代码来源:RBM.py
示例17: instance_proba
def instance_proba(self, X):
"""Calculates the probability of each instance in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array-like, shape = [n_samples]
"""
feat_prob = safe_sparse_dot(np.exp(self.class_log_prior_),
np.exp(self.feature_log_prob_)).T
instance_log_prob = safe_sparse_dot(X, np.log(feat_prob))
return np.exp(instance_log_prob)
开发者ID:lucianosilvi,项目名称:mit0110_tesis,代码行数:15,代码来源:featmultinomial.py
示例18: least_square_gradient
def least_square_gradient(X, y, theta, alpha=0, y_pred=None, coordinate=None):
"""Compute the gradient for each feature."""
if y_pred is None:
y_pred = safe_sparse_dot(X, theta)
loss = y_pred - y
if coordinate is None:
grad = safe_sparse_dot(X.T, loss)
grad += alpha * theta
else:
grad = safe_sparse_dot(X[:, coordinate], loss)
grad += (alpha * theta[coordinate])
return grad
开发者ID:Lolluminati,项目名称:ICML2015_GaussSouthwellCoordinateDescent,代码行数:15,代码来源:coordinate_descent.py
示例19: test_epoch
def test_epoch():
U = rng.randn(*true_U.shape)
U2 = U.copy()
viol, lv = _bilinear_cd(U, true_V, X_left, X_right, y, 1.0)
dataset = get_dataset(X_left, 'fortran')
# precomputing for cython
y_pred = _bilinear_forward(U2, true_V, X_left, X_right)
XrV = safe_sparse_dot(X_right, true_V)
VtGsq = safe_sparse_dot(XrV.T ** 2, X_left ** 2)
v2 = _cd_bilinear_epoch(U2, dataset, XrV, y, y_pred, VtGsq, 1.0)
assert_almost_equal(viol, v2)
assert_array_almost_equal(U, U2)
开发者ID:vene,项目名称:bilearn,代码行数:16,代码来源:test_cd.py
示例20: _joint_log_likelihood
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
X = atleast2d_or_csr(X)
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
开发者ID:katyasosa,项目名称:TSA,代码行数:7,代码来源:bayesian_naive_bayes.py
注:本文中的sklearn.utils.extmath.safe_sparse_dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论