本文整理汇总了Python中sklearn.utils.array2d函数的典型用法代码示例。如果您正苦于以下问题:Python array2d函数的具体用法?Python array2d怎么用?Python array2d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了array2d函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: _joint_log_likelihood
def _joint_log_likelihood(self, X, mask=None):
X = array2d(X)
if mask is not None:
mask = array2d(mask)
X = X.copy()
X[mask] = np.nan
joint_log_likelihood = np.zeros((len(self.classes_), X.shape[0]))
for i in range(np.size(self.classes_)):
joint_log_likelihood[i, :] = self._jll(X, i)
return joint_log_likelihood.T
开发者ID:2dpodcast,项目名称:anytime_recognition,代码行数:10,代码来源:gaussian_nb.py
示例2: predict
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples] or [n_samples, n_outputs]
The predicted values.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
# Parallel loop
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]], X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / len(self.estimators_)
return y_hat
开发者ID:djajetic,项目名称:AutoML3,代码行数:34,代码来源:forest16.py
示例3: predict
def predict(self, X):
"""
Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the trees in the forest.
Parameters
----------
X : array, shape = (n_samples, n_features)
The input samples. Internally, it will be converted to
`dtype=np.float32`.
Returns
-------
y : array, shape = (n_samples, )
The predicted values.
"""
# A call to predict(...) preceding a call to fit(...).
if not self.estimators_:
return self.bias
X = array2d(X, dtype=DTYPE, copy=False, force_all_finite=False)
all_y_hat = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, backend="threading")(
delayed(_parallel_helper)(tree, "predict", X) for tree in self.estimators_
)
return sum(all_y_hat) / len(self.estimators_)
开发者ID:dmitru,项目名称:rankpy,代码行数:29,代码来源:tree.py
示例4: transform
def transform(self, X):
"""
Transform new points into embedding space.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
Notes
-----
Because of scaling performed by this method, it is discouraged to use
it together with methods that are not scale-invariant (like SVMs)
"""
X = array2d(X)
ind = self.nbrs_.kneighbors(X, n_neighbors=self.n_neighbors,
return_distance=False)
weights = barycenter_weights(X, self.nbrs_._fit_X[ind],
reg=self.reg)
X_new = np.empty((X.shape[0], self.n_components))
for i in range(X.shape[0]):
X_new[i] = np.dot(self.embedding_[ind[i]].T, weights[i])
return X_new
开发者ID:bernease,项目名称:Mmani,代码行数:26,代码来源:locally_linear.py
示例5: f_test
def f_test(self, contrast, pval=False):
from sklearn.utils import array2d
#Ypred = self.predict(self.X)
#betas = self.coef
#ss_errors = np.sum((self.Y - self.y_hat) ** 2, axis=0)
C1 = array2d(contrast).T
n, p = self.X.shape
#Xpinv = scipy.linalg.pinv(X)
rank_x = np.linalg.matrix_rank(self.pinv)
C0 = np.eye(p) - np.dot(C1, scipy.linalg.pinv2(C1)) # Ortho. cont. to C1
X0 = np.dot(self.X, C0) # Design matrix of the reduced model
X0pinv = scipy.linalg.pinv2(X0)
rank_x0 = np.linalg.matrix_rank(X0pinv)
# Find the subspace (X1) of Xc1, which is orthogonal to X0
# The projection matrix M due to X1 can be derived from the residual
# forming matrix of the reduced model X0
# R0 is the residual forming matrix of the reduced model
R0 = np.eye(n) - np.dot(X0, X0pinv)
# R is the residual forming matrix of the full model
R = np.eye(n) - np.dot(self.X, self.pinv)
# compute the projection matrix
M = R0 - R
#Ypred = np.dot(self.X, betas)
y_hat = self.predict(self.X)
SS = np.sum(y_hat * np.dot(M, y_hat), axis=0)
df_c1 = rank_x - rank_x0
df_res = n - rank_x
## Broadcast over self.err_ss of Y
f_stats = (SS * df_res) / (self.err_ss * df_c1)
if not pval:
return (f_stats, None)
else:
p_vals = stats.f.sf(f_stats, df_c1, df_res)
return f_stats, p_vals
开发者ID:neurospin,项目名称:pylearn-mulm,代码行数:34,代码来源:models.py
示例6: fit
def fit(self, X, y=None, headers=None, verbose=False):
X = array2d(X)
if (X.ndim != 2):
raise ValueError('X must have dimension 2, ndim='+X.ndim)
# n_samples, self.n_features_ = X.shape
y = np.atleast_1d(y)
# y = y.astype(DOUBLE)
if self.target is not None:
if y is None:
y = [None]*len(X)
if (len(y) != len(X)):
raise ValueError('y must be same shape as X, len(X)='+str(len(X))+', len(y)='+str(len(y)))
if headers is not None:
if (len(headers) != len(X)):
raise ValueError('headers must be same shape as X, len(X)='+str(len(X))+', len(headers)='+str(len(headers)))
for x,t in zip(X,y):
if verbose: print x,t
event = array2json(x,headers)
if self.target is not None:
event[self.target] = t
self.stream.train(event)
开发者ID:bjarkih,项目名称:featurestream-client,代码行数:28,代码来源:sklearn.py
示例7: l1_multiply
def l1_multiply(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = array2d(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) / 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] * X[(k + 1) : n_samples])
return D, ij.astype(np.int)
开发者ID:pietersavenberg,项目名称:thesis,代码行数:35,代码来源:nonstat.py
示例8: transform
def transform(self, sequences):
"""Apply the dimensionality reduction on X.
Parameters
----------
sequences: list of array-like, each of shape (n_samples_i, n_features)
Training data, where n_samples_i in the number of samples
in sequence i and n_features is the number of features.
Returns
-------
sequence_new : list of array-like, each of shape (n_samples_i, n_components)
"""
check_iter_of_sequences(sequences, max_iter=3) # we might be lazy-loading
sequences_new = []
for X in sequences:
X = array2d(X)
if self.means_ is not None:
X = X - self.means_
X_transformed = np.dot(X, self.components_.T)
if self.weighted_transform:
X_transformed *= self.timescales_
sequences_new.append(X_transformed)
return sequences_new
开发者ID:schwancr,项目名称:msmbuilder,代码行数:29,代码来源:tica.py
示例9: fit
def fit(self, X, y=None, **params):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
Notes
-----
Calling multiple times will update the components
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
if self.iteration != 0 and n_features != self.components_.shape[1]:
raise ValueError("The dimensionality of the new data and the existing components_ does not match")
# incrementally fit the model
for i in range(0, X.shape[0]):
self.partial_fit(X[i, :])
return self
开发者ID:gaoyuankidult,项目名称:pyIPCA,代码行数:31,代码来源:hall_ipca.py
示例10: predict
def predict(self, X):
"""Predict regression target for X.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y: array of shape = [n_samples]
The predicted values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# TODO - validate n_features is correct?
n_samples, n_features = X.shape
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is {} and "
" input n_features is {}".format(
self._n_features, n_features))
result = np.empty(n_samples, dtype=DTYPE)
return self._evaluator.predict(X, result)
开发者ID:arnabkd,项目名称:sklearn-compiledtrees,代码行数:26,代码来源:compiled.py
示例11: predict_proba
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the trees in the forest.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
# Check data
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
# Assign chunk of trees to jobs
n_jobs, n_trees, starts = _partition_estimators(self)
# Bugfix for _parallel_predict_proba which expects a list for multi-label and integer for single-label problems
if not isinstance(self.n_classes_, int) and len(self.n_classes_) == 1:
n_classes_ = self.n_classes_[0]
else:
n_classes_ = self.n_classes_
# Parallel loop
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose,
backend="threading")(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
X,
n_classes_,
self.n_outputs_)
for i in range(n_jobs))
# Reduce
proba = all_proba[0]
if self.n_outputs_ == 1:
for j in xrange(1, len(all_proba)):
proba += all_proba[j]
proba /= len(self.estimators_)
else:
for j in xrange(1, len(all_proba)):
for k in xrange(self.n_outputs_):
proba[k] += all_proba[j][k]
for k in xrange(self.n_outputs_):
proba[k] /= self.n_estimators
return proba
开发者ID:djajetic,项目名称:AutoML3,代码行数:58,代码来源:forest16.py
示例12: fit
def fit(self, X, y=None):
X = array2d(X)
X = as_float_array(X, copy = self.copy)
print X.shape
sigma = np.dot(X.T,X) / X.shape[1]
U, S, V = linalg.svd(sigma)
tmp = np.dot(U, np.diag(1/np.sqrt(S+self.regularization)))
self.components_ = np.dot(tmp, U.T)
return self
开发者ID:alan-y-w,项目名称:ml_expression,代码行数:9,代码来源:ZCA.py
示例13: predict
def predict(self, X):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like of shape = [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
if getattr(X, "dtype", None) != DTYPE or X.ndim != 2:
X = array2d(X, dtype=DTYPE)
n_samples, n_features = X.shape
if self.tree_ is None:
raise Exception("Tree not initialized. Perform a fit first")
if self.n_features_ != n_features:
raise ValueError(
"Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s " % (self.n_features_, n_features)
)
proba = self.tree_.predict(X)
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in xrange(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(np.argmax(proba[:, k], axis=1), axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
开发者ID:rexshihaoren,项目名称:MSPrediction-Python,代码行数:54,代码来源:tree.py
示例14: _joint_log_likelihood
def _joint_log_likelihood(self, X):
X = array2d(X)
joint_log_likelihood = []
for i in xrange(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = -0.5 * np.sum(np.log(np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) / (self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
开发者ID:JOSMANC,项目名称:nyan,代码行数:11,代码来源:naive_bayes.py
示例15: fit_transform
def fit_transform(self, X, y=None):
"""
Fit the model to the data X and transform it.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
"""
X = array2d(X)
self.fit(X, y)
return self.transform(X)
开发者ID:LiaoPan,项目名称:amazon_challenge,代码行数:13,代码来源:rbm.py
示例16: fit
def fit(self, X, y=None):
"""
Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self
"""
X = array2d(X)
dtype = np.float32 if X.dtype.itemsize == 4 else np.float64
rng = check_random_state(self.random_state)
self.components_ = np.asarray(
rng.normal(0, 0.01, (self.n_components, X.shape[1])),
dtype=dtype,
order='fortran')
self.intercept_hidden_ = np.zeros(self.n_components, dtype=dtype)
self.intercept_visible_ = np.zeros(X.shape[1], dtype=dtype)
self.h_samples_ = np.zeros((self.batch_size, self.n_components),
dtype=dtype)
inds = np.arange(X.shape[0])
rng.shuffle(inds)
n_batches = int(np.ceil(len(inds) / float(self.batch_size)))
verbose = self.verbose
for iteration in xrange(self.n_iter):
pl = 0.
if verbose:
begin = time.time()
for minibatch in xrange(n_batches):
pl_batch = self._fit(X[inds[minibatch::n_batches]], rng)
if verbose:
pl += pl_batch.sum()
if verbose:
pl /= X.shape[0]
end = time.time()
print("Iteration %d, pseudo-likelihood = %.2f, time = %.2fs"
% (iteration, pl, end - begin))
return self
开发者ID:LiaoPan,项目名称:amazon_challenge,代码行数:50,代码来源:rbm.py
示例17: transform
def transform(self, X):
"""
Computes the probabilities ``P({\bf h}_j=1|{\bf v}={\bf X})``.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
h: array-like, shape (n_samples, n_components)
"""
X = array2d(X)
return self._mean_hiddens(X)
开发者ID:LiaoPan,项目名称:amazon_challenge,代码行数:14,代码来源:rbm.py
示例18: fit
def fit(self, X, y=None, **params):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
Notes
-----
Calling multiple times will update the components
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# init
if self.iteration == 0:
self.mean_ = np.zeros([n_features], np.float)
self.components_ = np.zeros([self.n_components,n_features], np.float)
else:
if n_features != self.components_.shape[1]:
raise ValueError('The dimensionality of the new data and the existing components_ does not match')
# incrementally fit the model
for i in range(0,X.shape[0]):
self.partial_fit(X[i,:])
# update explained_variance_ratio_
self.explained_variance_ratio_ = np.sqrt(np.sum(self.components_**2,axis=1))
# sort by explained_variance_ratio_
idx = np.argsort(-self.explained_variance_ratio_)
self.explained_variance_ratio_ = self.explained_variance_ratio_[idx]
self.components_ = self.components_[idx,:]
# re-normalize
self.explained_variance_ratio_ = (self.explained_variance_ratio_ / self.explained_variance_ratio_.sum())
for r in range(0,self.components_.shape[0]):
self.components_[r,:] /= np.sqrt(np.dot(self.components_[r,:],self.components_[r,:]))
return self
开发者ID:gaoyuankidult,项目名称:pyIPCA,代码行数:50,代码来源:ccipca.py
示例19: detect
def detect(self, X):
X = array2d(X)
n_samples, n_features = X.shape
N_obs = self.N_obs if self.N_obs is not None else n_features
if N_obs > self.N_ref:
raise ValueError
i_pred = []
for X_i in X:
detection = detect_stream(X_i, N_obs,
self.R_pos_, self.R_neg_,
self.gamma, self.theta, self.D_req)
i_pred.append(detection)
return i_pred
开发者ID:norbert,项目名称:hearsay,代码行数:15,代码来源:nikolov.py
示例20: fit
def fit(self, X, y=None):
X = array2d(X)
X = as_float_array(X, copy = self.copy)
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
X = X.T
examples = np.shape(X)[1]
sigma = np.dot(X,X.T) / (examples - 1)
U, S, V = linalg.svd(sigma)
d = np.sqrt(1/S[0:100])
dd = np.append(d, np.zeros((np.shape(X)[0] - 100)))
#tmp = np.dot(U, np.diag(1/np.sqrt(S +self.regularization)))
tmp = np.dot(U, np.diag(dd))
self.components_ = np.dot(tmp, U.T)
return self
开发者ID:asez73,项目名称:dl-playground,代码行数:15,代码来源:ZCA.py
注:本文中的sklearn.utils.array2d函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论