• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linalg.pinv函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.linalg.pinv函数的典型用法代码示例。如果您正苦于以下问题:Python pinv函数的具体用法?Python pinv怎么用?Python pinv使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了pinv函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_pinv_Z_alt

def test_pinv_Z_alt():
    ## Test pseudo-inverse computation of a complex double precision distributed matrix
    m, n = 7, 4

    gA = np.random.standard_normal((m, n)).astype(np.float64)
    gA = gA + 1.0J * np.random.standard_normal((m, n)).astype(np.float64)
    gA = np.dot(gA, gA.T.conj())
    assert np.linalg.matrix_rank(gA) < gA.shape[0] # no full rank
    gA = np.asfortranarray(gA)

    m, n = gA.shape

    dA = core.DistributedMatrix.from_global_array(gA, rank=0)

    pinvA = rt.pinv(dA)
    gpinvA = pinvA.to_global_array()
    gpinvA = gpinvA[:n, :]

    if rank == 0:
        assert not allclose(gA, np.dot(gA, np.dot(gpinvA, gA)))
        assert not allclose(gpinvA, np.dot(gpinvA, np.dot(gA, gpinvA)))
        spinvA = la.pinv(gA)
        assert allclose(gA, np.dot(gA, np.dot(spinvA, gA)))
        assert allclose(spinvA, np.dot(spinvA, np.dot(gA, spinvA)))
        assert not allclose(gpinvA, la.pinv(gA)) # compare with scipy result
开发者ID:jrs65,项目名称:scalapy,代码行数:25,代码来源:test_pinv.py


示例2: __m__

    def __m__(self, C1_ts, C2_ts, P_ts, P_t_tm1s, P_tm1s, G_ts):

        # Same as e step, we used initial observation to calculate the prior.
        n = self.n_observations() + 1

        C1_sum = np.sum(C1_ts, axis=2)
        P_t_sum = np.sum(P_ts, axis=2)
        P_t_tm1_sum_1tT = np.sum(P_t_tm1s[:, :, :], axis=2)
        P_tm1_sum_1tT = np.sum(P_tm1s[:, :, :], axis=2)

        # Output matrix fit.
        C = C1_sum @ pinv(P_t_sum)

        # Observation covariance fit.
        R = np.zeros((self.observations_size, self.observations_size))
        for t in range(n):
            R += C2_ts[:, :, t] - C @ G_ts[:, :, t]
        R *= 1.0/n

        # State dynamics
        A = P_t_tm1_sum_1tT @ pinv(P_tm1_sum_1tT)

        # Hidden Noise
        Q = 1.0/(n-1) * (np.sum(P_ts[:, :, 1:], axis=2) - A @ P_t_tm1_sum_1tT)

        # Control signal
        B = self.Bs[:, :, 0]
        D = self.Ds[:, :, 0]

        # Initial state
        init_mu = self.mus[:, :, 0]
        init_V = P_ts[:, :, 0] - init_mu @ init_mu.T

        return (A, B, C, D, Q, R), init_mu, init_V
开发者ID:Karlos7692,项目名称:Thesis,代码行数:34,代码来源:kalman.py


示例3: _fit

    def _fit(self, cov_a, cov_b):
        """Aux Function (modifies cov_a and cov_b in-place)."""
        cov_a /= np.trace(cov_a)
        cov_b /= np.trace(cov_b)
        # computes the eigen values
        lambda_, u = linalg.eigh(cov_a + cov_b)
        # sort them
        ind = np.argsort(lambda_)[::-1]
        lambda2_ = lambda_[ind]

        u = u[:, ind]
        p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)

        # Compute the generalized eigen value problem
        w_a = np.dot(np.dot(p, cov_a), p.T)
        w_b = np.dot(np.dot(p, cov_b), p.T)
        # and solve it
        vals, vecs = linalg.eigh(w_a, w_b)
        # sort vectors by discriminative power using eigen values
        ind = np.argsort(np.maximum(vals, 1.0 / vals))[::-1]
        vecs = vecs[:, ind]
        # and project
        w = np.dot(vecs.T, p)

        self.filters_ = w
        self.patterns_ = linalg.pinv(w).T
开发者ID:rajul,项目名称:mne-python,代码行数:26,代码来源:csp.py


示例4: __init__

    def __init__(self, U, Y, statedim, reg=None):
        if size(shape(U)) == 1:
            U = reshape(U, (-1,1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1,1))
        if reg is None:
            reg = 0

        yDim = size(Y,1)
        uDim = size(U,1)

        self.output_size = size(Y,1) # placeholder

        # number of samples of past/future we'll mash together into a 'state'
        width = 1
        # total number of past/future pairings we get as a result
        K = size(U,0) - 2 * width + 1

        # build hankel matrices containing pasts and futures
        U_p = array([ravel(U[t : t + width]) for t in range(K)]).T
        U_f = array([ravel(U[t + width : t + 2 * width]) for t in range(K)]).T
        Y_p = array([ravel(Y[t : t + width]) for t in range(K)]).T
        Y_f = array([ravel(Y[t + width : t + 2 * width]) for t in range(K)]).T

        # solve the eigenvalue problem
        YfUfT = dot(Y_f, U_f.T)
        YfUpT = dot(Y_f, U_p.T)
        YfYpT = dot(Y_f, Y_p.T)
        UfUpT = dot(U_f, U_p.T)
        UfYpT = dot(U_f, Y_p.T)
        UpYpT = dot(U_p, Y_p.T)
        F = bmat([[None, YfUfT, YfUpT, YfYpT],
                  [YfUfT.T, None, UfUpT, UfYpT],
                  [YfUpT.T, UfUpT.T, None, UpYpT],
                  [YfYpT.T, UfYpT.T, UpYpT.T, None]])
        Ginv = bmat([[pinv(dot(Y_f,Y_f.T)), None, None, None],
                     [None, pinv(dot(U_f,U_f.T)), None, None],
                     [None, None, pinv(dot(U_p,U_p.T)), None],
                     [None, None, None, pinv(dot(Y_p,Y_p.T))]])
        F = F - eye(size(F, 0)) * reg

        # Take smallest eigenvalues
        _, W = eigs(Ginv.dot(F), k=statedim, which='SR')

        # State sequence is a weighted combination of the past
        W_U_p = W[ width * (yDim + uDim) : width * (yDim + uDim + uDim), :]
        W_Y_p = W[ width * (yDim + uDim + uDim):, :]
        X_hist = dot(W_U_p.T, U_p) + dot(W_Y_p.T, Y_p)

        # Regress; trim inputs to match the states we retrieved
        R = concatenate((X_hist[:, :-1], U[width:-width].T), 0)
        L = concatenate((X_hist[:, 1: ], Y[width:-width].T), 0)
        RRi = pinv(dot(R, R.T))
        RL  = dot(R, L.T)
        Sys = dot(RRi, RL).T
        self.A = Sys[:statedim, :statedim]
        self.B = Sys[:statedim, statedim:]
        self.C = Sys[statedim:, :statedim]
        self.D = Sys[statedim:, statedim:]
开发者ID:riscy,项目名称:mllm,代码行数:59,代码来源:system_identifier.py


示例5: _update_precisions

    def _update_precisions(self, X, z):
        """Update the variational distributions for the precisions"""
        n_features = X.shape[1]
        if self.covariance_type == 'spherical':
            self.dof_ = 0.5 * n_features * np.sum(z, axis=0)
            for k in xrange(self.n_components):
                # could be more memory efficient ?
                sq_diff = np.sum((X - self.means_[k]) ** 2, axis=1)
                self.scale_[k] = 1.
                self.scale_[k] += 0.5 * np.sum(z.T[k] * (sq_diff + n_features))
                self.bound_prec_[k] = (
                    0.5 * n_features * (
                        digamma(self.dof_[k]) - np.log(self.scale_[k])))
            self.precs_ = np.tile(self.dof_ / self.scale_, [n_features, 1]).T

        elif self.covariance_type == 'diag':
            for k in xrange(self.n_components):
                self.dof_[k].fill(1. + 0.5 * np.sum(z.T[k], axis=0))
                sq_diff = (X - self.means_[k]) ** 2  # see comment above
                self.scale_[k] = np.ones(n_features) + 0.5 * np.dot(
                    z.T[k], (sq_diff + 1))
                self.precs_[k] = self.dof_[k] / self.scale_[k]
                self.bound_prec_[k] = 0.5 * np.sum(digamma(self.dof_[k])
                                                    - np.log(self.scale_[k]))
                self.bound_prec_[k] -= 0.5 * np.sum(self.precs_[k])

        elif self.covariance_type == 'tied':
            self.dof_ = 2 + X.shape[0] + n_features
            self.scale_ = (X.shape[0] + 1) * np.identity(n_features)
            for k in xrange(self.n_components):
                    diff = X - self.means_[k]
                    self.scale_ += np.dot(diff.T, z[:, k:k + 1] * diff)
            self.scale_ = linalg.pinv(self.scale_)
            self.precs_ = self.dof_ * self.scale_
            self.det_scale_ = linalg.det(self.scale_)
            self.bound_prec_ = 0.5 * wishart_log_det(
                self.dof_, self.scale_, self.det_scale_, n_features)
            self.bound_prec_ -= 0.5 * self.dof_ * np.trace(self.scale_)

        elif self.covariance_type == 'full':
            for k in xrange(self.n_components):
                sum_resp = np.sum(z.T[k])
                self.dof_[k] = 2 + sum_resp + n_features
                self.scale_[k] = (sum_resp + 1) * np.identity(n_features)
                diff = X - self.means_[k]
                self.scale_[k] += np.dot(diff.T, z[:, k:k + 1] * diff)
                self.scale_[k] = linalg.pinv(self.scale_[k])
                self.precs_[k] = self.dof_[k] * self.scale_[k]
                self.det_scale_[k] = linalg.det(self.scale_[k])
                self.bound_prec_[k] = 0.5 * wishart_log_det(self.dof_[k],
                                                            self.scale_[k],
                                                            self.det_scale_[k],
                                                           n_features)
                self.bound_prec_[k] -= 0.5 * self.dof_[k] * np.trace(
                    self.scale_[k])
开发者ID:WeatherGod,项目名称:scikit-learn,代码行数:55,代码来源:dpgmm.py


示例6: ICC_rep_anova

def ICC_rep_anova(Y):

    # the data Y are entered as a 'table' ie subjects are in rows and repeated
    # measures in columns
    # flag = 1 returns design figure + question


    # ------------------------------------------------------------------------------------------
    #                   One Sample Repeated measure ANOVA
    #                   Y = XB + E with X = [FaTor / SubjeT]
    # ------------------------------------------------------------------------------------------

    [nb_subjects, nb_conditions] = Y.shape
    #print nb_subjects, nb_conditions
    df = nb_conditions - 1
    dfe = nb_subjects*nb_conditions - nb_subjects - df
    dfmodel = nb_subjects - df

    # create the design matrix for the different levels
    # ------------------------------------------------

    x = kron(eye(nb_conditions), ones((nb_subjects, 1)))# effect
    x0 = tile(eye(nb_subjects), (nb_conditions, 1))# subjeT
    X = hstack([x, x0])

    # Compute the repeated measure effect
    # ------------------------------------
    Y = Y.flatten(1)

    # Sum Square Total
    SST = dot((Y.reshape(-1,1) - tile(mean(Y), (Y.shape[0], 1))).T, (Y.reshape(-1,1) - tile(mean(Y), (Y.shape[0], 1))))

    # Sum Square SubjeT (error in the ANOVA model)
    M = dot(dot(X, pinv(dot(X.T,X))), X.T)
    R = eye(Y.shape[0]) - M
    SSS = dot(dot(Y.T,R),Y)
    MSS = SSS / dfe

    # Sum square effect (repeated measure)
    Betas = dot(pinv(x),Y)# compute without cst/subjects
    yhat = dot(x,Betas)
    SSE = diag(dot((yhat.reshape(-1,1) - tile(mean(yhat), (yhat.shape[0], 1))).T, (yhat.reshape(-1,1) - tile(mean(yhat), (yhat.shape[0], 1)))))
    # MSE    = SSE / df;

    # Sum Square error
    SSError = SST - SSS - SSE
    MSError = SSError / dfmodel

    # ICC(3,1) = (mean square subjeT - mean square error) / (mean square subjeT + (k-1)*-mean square error) 
    return -((MSS - MSError) / (MSS + df * MSError))
    

#Y = array([[1,2],[2,3.5]])
#print ICC_rep_anova(Y)
开发者ID:JohnGriffiths,项目名称:neuroutils,代码行数:54,代码来源:icc.py


示例7: bayesian_regression

def bayesian_regression(X, Y, K):
    d = K.shape[0]
    alpha = model_evidence(X, Y, K)
    V1_inv = np.eye(d)
    V2_inv = K
    # beta = (V1inv+alpha*V2inv)\(V1inv*Y'*X)/(X'*X)
    YTX = np.dot(Y.T, X)    
    XTX = np.dot(X.T, X)
    beta = np.dot(np.dot(linalg.pinv(V1_inv + alpha * V2_inv), 
                         np.dot(V1_inv, YTX)), linalg.pinv(XTX))
    return beta
开发者ID:bernardng,项目名称:codeSync,代码行数:11,代码来源:bayesian_regression.py


示例8: _cp3

def _cp3(X, n_components, tol, max_iter, init_type, random_state=None):
    """
    3 dimensional CANDECOMP/PARAFAC decomposition.

    This code is meant to be a tutorial/testing example... in general _cpN
    should be more compact and equivalent mathematically.
    """

    if len(X.shape) != 3:
        raise ValueError("CP3 decomposition only supports 3 dimensions!")

    if init_type == "random":
        A, B, C = _random_init(X, n_components, random_state)
    elif init_type == "hosvd":
        A, B, C = _hosvd_init(X, n_components)
    grams = [np.dot(arr.T, arr) for arr in (A, B, C)]
    err = 1E10

    for itr in range(max_iter):
        err_old = err
        A = matricize(X, 0).dot(kr(C, B)).dot(linalg.pinv(grams[1] * grams[2]))
        if itr == 0:
            normalization = np.sqrt((A ** 2).sum(axis=0))
        else:
            normalization = A.max(axis=0)
            normalization[normalization < 1] = 1
        A /= normalization
        grams[0] = np.dot(A.T, A)

        B = matricize(X, 1).dot(kr(C, A)).dot(linalg.pinv(grams[0] * grams[2]))
        if itr == 0:
            normalization = np.sqrt((B ** 2).sum(axis=0))
        else:
            normalization = B.max(axis=0)
            normalization[normalization < 1] = 1
        B /= normalization
        grams[1] = np.dot(B.T, B)

        C = matricize(X, 2).dot(kr(B, A)).dot(linalg.pinv(grams[0] * grams[1]))
        if itr == 0:
            normalization = np.sqrt((C ** 2).sum(axis=0))
        else:
            normalization = C.max(axis=0)
            normalization[normalization < 1] = 1
        C /= normalization
        grams[2] = np.dot(C.T, C)

        err = linalg.norm(matricize(X, 0) - np.dot(A, kr(C, B).T)) ** 2
        thresh = np.abs(err - err_old) / err_old
        if (thresh < tol) or (itr > max_iter):
            break

    return A, B, C
开发者ID:fabianp,项目名称:tensorlib,代码行数:53,代码来源:decomposition.py


示例9: _nipals_twoblocks_inner_loop

def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06,
                                 norm_y_weights=False):
    """Inner loop of the iterative NIPALS algorithm.

    Provides an alternative to the svd(X'Y); returns the first left and right
    singular vectors of X'Y.  See PLS for the meaning of the parameters.  It is
    similar to the Power method for determining the eigenvectors and
    eigenvalues of a X'Y.
    """
    y_score = Y[:, [0]]
    x_weights_old = 0
    ite = 1
    X_pinv = Y_pinv = None
    eps = np.finfo(X.dtype).eps
    # Inner loop of the Wold algo.
    while True:
        # 1.1 Update u: the X weights
        if mode == "B":
            if X_pinv is None:
                X_pinv = linalg.pinv(X)   # compute once pinv(X)
            x_weights = np.dot(X_pinv, y_score)
        else:  # mode A
            # Mode A regress each X column on y_score
            x_weights = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
        # 1.2 Normalize u
        x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps
        # 1.3 Update x_score: the X latent scores
        x_score = np.dot(X, x_weights)
        # 2.1 Update y_weights
        if mode == "B":
            if Y_pinv is None:
                Y_pinv = linalg.pinv(Y)    # compute once pinv(Y)
            y_weights = np.dot(Y_pinv, x_score)
        else:
            # Mode A regress each Y column on x_score
            y_weights = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
        ## 2.2 Normalize y_weights
        if norm_y_weights:
            y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps
        # 2.3 Update y_score: the Y latent scores
        y_score = np.dot(Y, y_weights) / (np.dot(y_weights.T, y_weights) + eps)
        ## y_score = np.dot(Y, y_weights) / np.dot(y_score.T, y_score) ## BUG
        x_weights_diff = x_weights - x_weights_old
        if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:
            break
        if ite == max_iter:
            warnings.warn('Maximum number of iterations reached')
            break
        x_weights_old = x_weights
        ite += 1
    return x_weights, y_weights, ite
开发者ID:belltailjp,项目名称:scikit-learn,代码行数:51,代码来源:pls_.py


示例10: _update_precisions

    def _update_precisions(self):
        """Update the variational distributions for the precisions"""
        if self.cvtype == "spherical":
            self._a = 0.5 * self.n_features * np.sum(self._z, axis=0)
            for k in xrange(self.n_components):
                # XXX: how to avoid this huge temporary matrix in memory
                dif = self._X - self._means[k]
                self._b[k] = 1.0
                d = np.sum(dif * dif, axis=1)
                self._b[k] += 0.5 * np.sum(self._z.T[k] * (d + self.n_features))
                self._bound_prec[k] = 0.5 * self.n_features * (digamma(self._a[k]) - np.log(self._b[k]))
            self._precs = self._a / self._b

        elif self.cvtype == "diag":
            for k in xrange(self.n_components):
                self._a[k].fill(1.0 + 0.5 * np.sum(self._z.T[k], axis=0))
                ddif = self._X - self._means[k]  # see comment above
                for d in xrange(self.n_features):
                    self._b[k, d] = 1.0
                    dd = ddif.T[d] * ddif.T[d]
                    self._b[k, d] += 0.5 * np.sum(self._z.T[k] * (dd + 1))
                self._precs[k] = self._a[k] / self._b[k]
                self._bound_prec[k] = 0.5 * np.sum(digamma(self._a[k]) - np.log(self._b[k]))
                self._bound_prec[k] -= 0.5 * np.sum(self._precs[k])

        elif self.cvtype == "tied":
            self._a = 2 + self._X.shape[0] + self.n_features
            self._B = (self._X.shape[0] + 1) * np.identity(self.n_features)
            for i in xrange(self._X.shape[0]):
                for k in xrange(self.n_components):
                    dif = self._X[i] - self._means[k]
                    self._B += self._z[i, k] * np.dot(dif.reshape((-1, 1)), dif.reshape((1, -1)))
            self._B = linalg.pinv(self._B)
            self._precs = self._a * self._B
            self._detB = linalg.det(self._B)
            self._bound_prec = 0.5 * detlog_wishart(self._a, self._B, self._detB, self.n_features)
            self._bound_prec -= 0.5 * self._a * np.trace(self._B)

        elif self.cvtype == "full":
            for k in xrange(self.n_components):
                T = np.sum(self._z.T[k])
                self._a[k] = 2 + T + self.n_features
                self._B[k] = (T + 1) * np.identity(self.n_features)
                for i in xrange(self._X.shape[0]):
                    dif = self._X[i] - self._means[k]
                    self._B[k] += self._z[i, k] * np.dot(dif.reshape((-1, 1)), dif.reshape((1, -1)))
                self._B[k] = linalg.pinv(self._B[k])
                self._precs[k] = self._a[k] * self._B[k]
                self._detB[k] = linalg.det(self._B[k])
                self._bound_prec[k] = 0.5 * detlog_wishart(self._a[k], self._B[k], self._detB[k], self.n_features)
                self._bound_prec[k] -= 0.5 * self._a[k] * np.trace(self._B[k])
开发者ID:yikuizhai,项目名称:scikit-learn,代码行数:51,代码来源:dpgmm.py


示例11: __init__

    def __init__(self, X, Y, rank, reg=None):
        if size(shape(X)) == 1:
            X = reshape(X, (-1, 1))
        if size(shape(Y)) == 1:
            Y = reshape(Y, (-1, 1))
        if reg is None:
            reg = 0
        self.rank = rank

        CXX = dot(X.T, X) + reg * eye(size(X, 1))
        CXY = dot(X.T, Y)
        _U, _S, V = svd(dot(CXY.T, dot(pinv(CXX), CXY)))
        self.W = V[0:rank, :].T
        self.A = dot(pinv(CXX), dot(CXY, self.W)).T
开发者ID:riscy,项目名称:mllm,代码行数:14,代码来源:reduced_rank_regressor.py


示例12: _nipals_twoblocks_inner_loop

def _nipals_twoblocks_inner_loop(X, Y, mode="A", max_iter=500, tol=1e-06):
    """Inner loop of the iterative NIPALS algorithm. provide an alternative
    of the svd(X'Y) ie. return the first left and rigth singular vectors of X'Y
    See PLS for the meaning of the parameters.
    It is similar to the Power method for determining the eigenvectors and
    eigenvalues of a X'Y
    """
    y_score = Y[:, [0]]
    u_old = 0
    ite = 1
    X_pinv = Y_pinv = None
    # Inner loop of the Wold algo.
    while True:
        # 1.1 Update u: the X weights
        if mode is "B":
            if X_pinv is None:
                X_pinv = linalg.pinv(X)   # compute once pinv(X)
            u = np.dot(X_pinv, y_score)
        else:  # mode A
        # Mode A regress each X column on y_score
            u = np.dot(X.T, y_score) / np.dot(y_score.T, y_score)
        # 1.2 Normalize u
        u /= np.sqrt(np.dot(u.T, u))
        # 1.3 Update x_score: the X latent scores
        x_score = np.dot(X, u)

        # 2.1 Update v: the Y weights
        if mode is "B":
            if Y_pinv is None:
                Y_pinv = linalg.pinv(Y)    # compute once pinv(Y)
            v = np.dot(Y_pinv, x_score)
        else:
            # Mode A regress each X column on y_score
            v = np.dot(Y.T, x_score) / np.dot(x_score.T, x_score)
        # 2.2 Normalize v
        v /= np.sqrt(np.dot(v.T, v))
        # 2.3 Update y_score: the Y latent scores
        y_score = np.dot(Y, v)

        u_diff = u - u_old
        if np.dot(u_diff.T, u_diff) < tol or Y.shape[1] == 1:
            break
        if ite == max_iter:
            warnings.warn('Maximum number of iterations reached')
            break
        u_old = u
        ite += 1
    return u, v
开发者ID:dattanchu,项目名称:scikit-learn,代码行数:48,代码来源:pls.py


示例13: preProcess

def preProcess(u,y,NumDict):
    
    NumInputs = u.shape[0]
    NumOutputs = y.shape[0]
    NumRows = NumDict['Rows']
    NumCols = NumDict['Columns']
    NSig = NumDict['Dimension']
    UPast,UFuture = getHankelMatrices(u,NumRows,NumCols)
    YPast,YFuture = getHankelMatrices(y,NumRows,NumCols)
    Data = np.vstack((UPast,UFuture,YPast))
    L = la.lstsq(Data.T,YFuture.T)[0].T
    Z = np.dot(L,Data)
    DataShift = np.vstack((UPast,UFuture[NumInputs:],YPast))
    LShift = la.lstsq(DataShift.T,YFuture[NumOutputs:].T)[0].T
    ZShift = np.dot(LShift,DataShift)

    L1 = L[:,:NumInputs*NumRows]
    L3 = L[:,2*NumInputs*NumRows:]

    LPast = np.hstack((L1,L3))
    DataPast = np.vstack((UPast,YPast))

    U, S, Vt = la.svd(np.dot(LPast,DataPast))
    
    Sig = np.diag(S[:NSig])
    SigRt = np.diag(np.sqrt(S[:NSig]))
    Gamma = np.dot(U[:,:NSig],SigRt)
    GammaLess = Gamma[:-NumOutputs]

    GammaPinv = la.pinv(Gamma)
    GammaLessPinv = la.pinv(GammaLess)

    GamShiftSolve = la.lstsq(GammaLess,ZShift)[0]


    GamSolve = la.lstsq(Gamma,Z)[0]
    GamData = np.vstack((GamSolve,UFuture))

    GamYData = np.vstack((GamShiftSolve,YFuture[:NumOutputs]))
    # Should probably move to a better output structure
    # One that doesn't depent so heavily on ordering

    GammaDict = {'Data':GamData,
                 'DataLess':GammaLess,
                 'DataY':GamYData,
                 'Pinv': GammaPinv,
                 'LessPinv': GammaLessPinv}
    return GammaDict,S
开发者ID:AndyLamperski,项目名称:pyN4SID,代码行数:48,代码来源:ssid.py


示例14: calculateGradient

    def calculateGradient(self):
        # normalize rewards
        # self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))

        # initialize variables
        R = ones((self.dataset.getNumSequences(), 1), float)
        X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)

        # collect sufficient statistics
        print self.dataset.getNumSequences()
        for n in range(self.dataset.getNumSequences()):
            _state, _action, reward = self.dataset.getSequence(n)
            seqidx = ravel(self.dataset['sequence_index'])
            if n == self.dataset.getNumSequences() - 1:
                # last sequence until end of dataset
                loglh = self.loglh['loglh'][seqidx[n]:, :]
            else:
                loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]

            X[n, :-1] = sum(loglh, 0)
            R[n, 0] = sum(reward, 0)

        # linear regression
        beta = dot(pinv(X), R)
        return beta[:-1]
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:25,代码来源:enac.py


示例15: test_simple_complex

 def test_simple_complex(self):
     a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
          + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
     a_pinv = pinv(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
     a_pinv = pinv2(a)
     assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
开发者ID:7924102,项目名称:scipy,代码行数:7,代码来源:test_basic.py


示例16: testPI

    def testPI(self, level=1):
	""" test TRAIN_PI with zero input and feedback """
        
	# init network
	self.net.setSimAlgorithm(SIM_STD)
	self.net.setTrainAlgorithm(TRAIN_PI)
	self.net.init()
	
	# train network
	washout = 2
	# test with zero input:
	indata = N.zeros((self.ins,self.train_size),self.dtype)
	outdata = N.random.rand(self.outs,self.train_size) * 2 - 1
	indata = N.asfarray( indata, self.dtype )
	outdata = N.asfarray( outdata, self.dtype )
	self.net.train( indata, outdata, washout )
	wout_target = self.net.getWout().copy()
	
	# teacher forcing, collect states
	X = self._teacherForcing(indata,outdata)
	
	# restructure data
	M = N.r_[X,indata]
	M = M[:,washout:self.train_size].T
	T = outdata[:,washout:self.train_size].T
	
	# calc pseudo inverse: wout = pinv(M) * T
	wout = ( N.dot(pinv(M),T) ).T
	
	# normalize result for comparison
	wout = wout / abs(wout).max()
	wout_target = wout_target / abs(wout_target).max()
	assert_array_almost_equal(wout_target,wout,2)
开发者ID:HerrPeterPaul,项目名称:anna,代码行数:33,代码来源:test_train.py


示例17: invmodes_m

    def invmodes_m(self, mi, threshold=None):
        """Get the inverse modes.

        If the true inverse has been cached, return the modes for the current
        `threshold`. Otherwise generate the Moore-Penrose pseudo-inverse.

        Parameters
        ----------
        mi : integer
            m-mode to generate for.
        threshold : scalar
            S/N threshold to use.

        Returns
        -------
        invmodes : np.ndarray
        """

        evals = self.evals_m(mi, threshold)

        with h5py.File(self._evfile % mi, 'r') as f:
            if 'evinv' in f:
                inv = f['evinv'][:]

                if threshold != None:
                    nevals = evals.size
                    inv = inv[(-nevals):]

                return inv.T

            else:
                print "Inverse not cached, generating pseudo-inverse."
                return la.pinv(self.modes_m(mi, threshold)[1])
开发者ID:SaulAryehKohn,项目名称:driftscan,代码行数:33,代码来源:kltransform.py


示例18: correct_covariance

    def correct_covariance(self, data):
        """Apply a correction to raw Minimum Covariance Determinant estimates.

        Correction using the empirical correction factor suggested
        by Rousseeuw and Van Driessen in [Rouseeuw1984]_.

        Parameters
        ----------
        data: array-like, shape (n_samples, n_features)
          The data matrix, with p features and n samples.
          The data set must be the one which was used to compute
          the raw estimates.

        Returns
        -------
        covariance_corrected: array-like, shape (n_features, n_features)
          Corrected robust covariance estimate.

        """
        X_centered = data - self.raw_location_
        dist = np.sum(
            np.dot(X_centered, linalg.pinv(self.raw_covariance_)) * X_centered,
            1)
        correction = np.median(dist) / chi2(data.shape[1]).isf(0.5)
        covariance_corrected = self.raw_covariance_ * correction
        self._set_estimates(covariance_corrected)
        return covariance_corrected
开发者ID:AlexLerman,项目名称:scikit-learn,代码行数:27,代码来源:robust_covariance.py


示例19: score

    def score(self, X_test, assume_centered=False):
        """Computes the log-likelihood of a gaussian data set with
        `self.covariance_` as an estimator of its covariance matrix.

        Parameters
        ----------
        X_test : array-like, shape = [n_samples, n_features]
          Test data of which we compute the likelihood,
          where n_samples is the number of samples and n_features is
          the number of features.

        Returns
        -------
        res: float
          The likelihood of the data set with self.covariance_ as an estimator
          of its covariance matrix.

        """
        # compute empirical covariance of the test set
        test_cov = empirical_covariance(X_test, assume_centered=assume_centered)
        # compute log likelihood
        if self.store_precision:
            res = log_likelihood(test_cov, self.precision_)
        else:
            res = log_likelihood(test_cov, linalg.pinv(self.covariance_))

        return res
开发者ID:aayushsaxena15,项目名称:projects,代码行数:27,代码来源:empirical_covariance_.py


示例20: precompute_D_step

def precompute_D_step(z_hat, size_z, rho):
    """Computes to cache the values of Z^.T and (Z^.T*Z^ + rho*I)^-1 as in algorithm"""
    
    n = size_z[0]
    k = size_z[1]
    
    zhat_mat = np.transpose(z_hat.transpose(0,1,3,2).reshape(n, k, -1), [2, 0, 1])    
    zhat_inv_mat = np.zeros((zhat_mat.shape[0], k, k), dtype=imaginary_type)
    inv_rho_z_hat_z_hat_t = np.zeros((zhat_mat.shape[0], n, n), dtype=imaginary_type)
    z_hat_mat_t = np.transpose(np.ma.conjugate(zhat_mat), [0, 2, 1])
    
    #Compute Z_hat * Z_hat^T for each pixel
    z_hat_z_hat_t = np.einsum('knm,kmj->knj',zhat_mat, z_hat_mat_t)
    
    for i in range(zhat_mat.shape[0]):
        z_hat_z_hat_t_plus_rho = z_hat_z_hat_t[i]
        z_hat_z_hat_t_plus_rho.flat[::n + 1] += rho
        inv_rho_z_hat_z_hat_t[i] = linalg.pinv(z_hat_z_hat_t_plus_rho)
                         
    zhat_inv_mat = 1.0/rho * (np.eye(k) - 
                              np.einsum('knm,kmj->knj',                              
                                        np.einsum('knm,kmj->knj',
                                                  z_hat_mat_t,
                                                  inv_rho_z_hat_z_hat_t),
                                        zhat_mat))
    
    print ('Done precomputing for D')                                
    return [zhat_mat, zhat_inv_mat]
开发者ID:jasmainak,项目名称:ConvolutionalSparseCoding,代码行数:28,代码来源:CSC_image.py



注:本文中的scipy.linalg.pinv函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linalg.pinv2函数代码示例发布时间:2022-05-27
下一篇:
Python linalg.pascal函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap