• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.ix_函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.ix_函数的典型用法代码示例。如果您正苦于以下问题:Python ix_函数的具体用法?Python ix_怎么用?Python ix_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ix_函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: Barabasi_Albert

def Barabasi_Albert(m0, m, N):
#   if m > m0:
#       raise ValueError('m must be smaller than or equal to m0')
#   # initial graph
#   Graph = [Node() for _ in range(m0)]
#   for (ix, node) in enumerate(Graph):
#       node.connect(Graph[ix + 1:])
#   degrees = np.array([node.degree for node in Graph])
#   cum_degrees = np.float(np.cumsum(degrees)) / np.sum(degrees)
    K = np.eye(N, dtype=np.bool)
    K[np.ix_(np.arange(m0), np.arange(m0))] = True
    for ix in np.arange(m0, N):
        selected = np.zeros((ix,), dtype=np.bool)
        for conn in np.arange(m):
            free = np.logical_not(selected)
            p = np.array(np.sum(K[..., free], axis=0), dtype=np.float)
            cdf = np.cumsum(p) / np.sum(p)
            r = np.random.uniform()
            link = np.where(np.logical_and(r < cdf,
                                           np.logical_not(r >= cdf)))
            K[ix, free[link]] = True
            K[free[link], ix] = True
            selected[free[link]] = True
    rp = np.random.permutation(N)
    return K[np.ix_(rp, rp)]
开发者ID:rphlypo,项目名称:connectivity,代码行数:25,代码来源:graph_models.py


示例2: test_chol_add_remove

def test_chol_add_remove():
    N = 5
    X = np.random.randn(10,N)
    A = X.T.dot(X)
    L = np.linalg.cholesky(A)

    Am = A[:-1,:-1]
    bm = A[:-1,-1]
    cm = A[-1,-1]
    Lm = np.linalg.cholesky(Am)

    # Get chol by adding row
    assert np.allclose(L, chol_add_row(Lm, bm, cm))

    # Now get chol by removing a row
    def to_range(start, stop):
        return np.setdiff1d(np.arange(N), np.arange(start,stop))
    assert np.allclose(
        np.linalg.cholesky(A[np.ix_(to_range(4,5),
                                    to_range(4,5))]),
                           chol_remove_row(L,4,5))

    assert np.allclose(
        np.linalg.cholesky(A[np.ix_(to_range(1,3),
                                    to_range(1,3))]),
                           chol_remove_row(L,1,3))
开发者ID:sheqi,项目名称:pyglm,代码行数:26,代码来源:test_linalg.py


示例3: subset

    def subset(self, variables=None, samples=None):
        """Returns a subset of the dataset (and metadata).
        
        Specify the variables and samples for creating a subset of the data.
        variables and samples should be a list of ids. If not specified, it is
        assumed to be all variables or samples. 

        Some examples:
        
            - d.subset([3], [4])
            - d.subset([3,1,2])
            - d.subset(samples=[5,2,7,1])
        
        Note: order matters! d.subset([3,1,2]) != d.subset([1,2,3])

        """

        variables = variables if variables is not None else range(self.variables.size)
        samples = samples if samples is not None else range(self.samples.size)
        skip_stats = not (self.has_interventions or self.has_missing)
        d = Dataset(
            self.observations[N.ix_(samples,variables)],
            self.missing[N.ix_(samples,variables)],
            self.interventions[N.ix_(samples,variables)],
            self.variables[variables],
            self.samples[samples],
            skip_stats = skip_stats
        )
        
        # if self does not have interventions or missing, the subset can't.
        if skip_stats:
            d._has_interventions = False
            d._has_missing = False

        return d
开发者ID:Alwnikrotikz,项目名称:pebl-project,代码行数:35,代码来源:data.py


示例4: _safe_split

def _safe_split(estimator, X, y, indices, train_indices=None):
    """Create subset of dataset and properly handle kernels."""
    from ..gaussian_process.kernels import Kernel as GPKernel

    if (hasattr(estimator, 'kernel') and callable(estimator.kernel) and
            not isinstance(estimator.kernel, GPKernel)):
        # cannot compute the kernel values with custom function
        raise ValueError("Cannot use a custom kernel function. "
                         "Precompute the kernel matrix instead.")

    if not hasattr(X, "shape"):
        if getattr(estimator, "_pairwise", False):
            raise ValueError("Precomputed kernels or affinity matrices have "
                             "to be passed as arrays or sparse matrices.")
        X_subset = [X[index] for index in indices]
    else:
        if getattr(estimator, "_pairwise", False):
            # X is a precomputed square kernel matrix
            if X.shape[0] != X.shape[1]:
                raise ValueError("X should be a square kernel matrix")
            if train_indices is None:
                X_subset = X[np.ix_(indices, indices)]
            else:
                X_subset = X[np.ix_(indices, train_indices)]
        else:
            X_subset = safe_indexing(X, indices)

    if y is not None:
        y_subset = safe_indexing(y, indices)
    else:
        y_subset = None

    return X_subset, y_subset
开发者ID:IsaacHaze,项目名称:scikit-learn,代码行数:33,代码来源:metaestimators.py


示例5: _split

def _split(estimator, X, y, indices, train_indices=None):
    """Create subset of dataset."""
    if hasattr(estimator, 'kernel') and callable(estimator.kernel):
        # cannot compute the kernel values with custom function
        raise ValueError("Cannot use a custom kernel function. "
                         "Precompute the kernel matrix instead.")

    if not hasattr(X, "shape"):
        if getattr(estimator, "_pairwise", False):
            raise ValueError("Precomputed kernels or affinity matrices have "
                             "to be passed as arrays or sparse matrices.")
        X_subset = [X[idx] for idx in indices]
    else:
        if getattr(estimator, "_pairwise", False):
            # X is a precomputed square kernel matrix
            if X.shape[0] != X.shape[1]:
                raise ValueError("X should be a square kernel matrix")
            if train_indices is None:
                X_subset = X[np.ix_(indices, indices)]
            else:
                X_subset = X[np.ix_(indices, train_indices)]
        else:
            X_subset = X[safe_mask(X, indices)]

    if y is not None:
        y_subset = y[safe_mask(y, indices)]
    else:
        y_subset = None

    return X_subset, y_subset
开发者ID:Idan-M,项目名称:scikit-learn,代码行数:30,代码来源:grid_search.py


示例6: runTest

    def runTest(self):
        F=lambda x,y: 100.0*((x>=0.4)&(x<=0.6)&(y>=0.4)&(y<=0.6))
        G=lambda x,y: (y==0)*1.0+(y==1)*(-1.0)

        a=fasm.AssemblerElement(self.mesh,felem.ElementTriP1())

        dudv=lambda du,dv: du[0]*dv[0]+du[1]*dv[1]
        K=a.iasm(dudv)

        uv=lambda u,v: u*v
        B=a.fasm(uv)
        
        fv=lambda v,x: F(x[0],x[1])*v
        f=a.iasm(fv)

        gv=lambda v,x: G(x[0],x[1])*v
        g=a.fasm(gv)

        D=np.nonzero(self.mesh.p[0,:]==0)[0]
        I=np.setdiff1d(np.arange(0,self.mesh.p.shape[1]),D)

        x=np.zeros(K.shape[0])
        x[I]=scipy.sparse.linalg.spsolve(K[np.ix_(I,I)]+B[np.ix_(I,I)],
                                         f[I]+g[I])

        self.assertAlmostEqual(np.max(x),1.89635971369,places=2)
开发者ID:kinnala,项目名称:sp.fem,代码行数:26,代码来源:test_asm.py


示例7: penalty_function

def penalty_function(vocab_indices, summary_indices, sentence_similarity, config):
    """
        This is the penalty function that is described in the paper
        Graph-Based Submodular selection for extractive Summarization
    Args:
        vocab_indices: list
        summary_indices: list
        sentence_similarity: ndarray
        config: dictionary
        Some of the methods require some hyper parameters
        to be set

        This penalises redundancy
    Returns: The value of the graph cut function
    """
    penalty_lambda = config["penalty_lambda"]
    sentence_similartiy_ = np.copy(sentence_similarity)
    np.fill_diagonal(sentence_similartiy_, 0.0)

    if len(summary_indices) == 0:
        fn_value = 0.0
    else:
        v_not_in_s = list(set(vocab_indices) - set(summary_indices))
        rows = v_not_in_s
        cols = summary_indices
        # USING THE ADVANCED INDEXING OF THE NUMPY ARRAY
        fn_value = np.sum(sentence_similarity[np.ix_(rows, cols)]) - \
                   penalty_lambda * np.sum(sentence_similartiy_[np.ix_(summary_indices, summary_indices)])

    return fn_value
开发者ID:abhinavkashyap92,项目名称:extractive_summarisation,代码行数:30,代码来源:sub_modular_functions.py


示例8: load_weight_files

def load_weight_files(weights_files, genes, patients, typeToGeneIndex, typeToPatientIndex, masterGeneToIndex, masterPatientToIndex):
    # Master matrix of all weights
    P = np.zeros((len(genes), len(patients)))
    for i, weights_file in enumerate(weights_files):
        # Load the weights matrix for this cancer type and update the entries appropriately.
        # Note that since genes/patients can be measured in multiple types, we need to map
        # each patient to the "master" index.
        type_P                 = np.load(weights_file)

        ty_genes               = set(typeToGeneIndex[i].keys()) & genes
        ty_gene_indices        = [ typeToGeneIndex[i][g] for g in ty_genes ]
        master_gene_indices    = [ masterGeneToIndex[g] for g in ty_genes ]

        ty_patients            = set(typeToPatientIndex[i].keys()) & patients
        ty_patient_indices     = [ typeToPatientIndex[i][p] for p in ty_patients ]
        master_patient_indices = [ masterPatientToIndex[p] for p in ty_patients ]

        master_mesh            = np.ix_(master_gene_indices, master_patient_indices)
        ty_mesh                = np.ix_(ty_gene_indices, ty_patient_indices)

        if np.any( P[master_mesh] > 0 ):
            raise ValueError("Different weights for same gene-patient pair")
        else:
            P[ master_mesh ] = type_P[ ty_mesh  ]

    # Set any zero entries to the minimum (pseudocount). The only reason for zeros is if
    #  a gene wasn't mutated at all in a particular dataset.
    P[P == 0] = np.min(P[P > 0])

    return dict( (g, P[masterGeneToIndex[g]]) for g in genes )
开发者ID:raphael-group,项目名称:wext,代码行数:30,代码来源:find_sets.py


示例9: normal_eq_comb

def normal_eq_comb(AtA, AtB, PassSet = None):
    num_cholesky = 0
    num_eq = 0
    if AtB.size == 0:
        Z = np.zeros([])

    elif (PassSet is None) or np.all(PassSet):
        Z = nla.solve(AtA, AtB)
        num_cholesky = 1
        num_eq = AtB.shape[1]

    else:
        Z = np.zeros(AtB.shape) #(n, k)
        if PassSet.shape[1] == 1:
            if np.any(PassSet):
                cols = np.nonzero(PassSet)[0]
                Z[cols] = nla.solve(AtA[np.ix_(cols, cols)], AtB[cols])
                num_cholesky = 1
                num_eq = 1
        else:
            groups = column_group(PassSet)

            for g in groups:
                cols = np.nonzero(PassSet[:, g[0]])[0]

                if cols.size > 0:
                    ix1 = np.ix_(cols, g)
                    ix2 = np.ix_(cols, cols)

                    Z[ix1] = nla.solve(AtA[ix2], AtB[ix1])
                    num_cholesky += 1
                    num_eq += len(g)
                    num_eq += len(g)
    return Z, num_cholesky, num_eq
开发者ID:crcrpar,项目名称:DataAnalysis,代码行数:34,代码来源:function.py


示例10: classify_binomial

def classify_binomial(x, data, counts, y):
    classes, y = np.unique(y, return_inverse=True)
    max_label = None
    max = None
    for class_label in np.nditer(classes):
        class_examples = data[np.ix_(y == class_label)]
        class_counts = counts[np.ix_(y == class_label)]
        total_class_counts = sum(class_counts)
        alfas = (class_examples.sum(axis=0) + 0.01)/(total_class_counts + 0.01)

        prior = len(class_examples) / len(data)
        membership = getMembershipBinomial(x,alfas, prior, class_counts, total_class_counts)
        if(max_label is None):
            max_label = class_label
            max = membership
        else:
            if(class_label == 0):
                if membership>max:
                    max = membership
                    max_label = class_label
            else:
                if membership>(max+8.5):
                    max = membership
                    max_label = class_label
    return max_label
开发者ID:marrosenfeld,项目名称:Machine-Learning,代码行数:25,代码来源:naive_bayes.py


示例11: conditional

    def conditional(self, in_dims, out_dims):
        conditionals = []

        for k, (weight_k, mean_k, covar_k) in enumerate(self):
            conditionals.append(conditional(mean_k, covar_k,
                                            in_dims, out_dims,
                                            self.covariance_type))

        cond_weights = lambda v: [(weight_k * Gaussian(mean_k[in_dims].reshape(-1,),
                                  covar_k[ix_(in_dims, in_dims)]).normal(v.reshape(-1,)))
                                  for k, (weight_k, mean_k, covar_k) in enumerate(self)]

        def res(v):
            gmm = GMM(n_components=self.n_components,
                      covariance_type=self.covariance_type,
                      random_state=self.random_state, thresh=self.thresh,
                      min_covar=self.min_covar, n_iter=self.n_iter, n_init=self.n_init,
                      params=self.params, init_params=self.init_params)
            gmm.weights_ = cond_weights(v)
            means_covars = [f(v) for f in conditionals]
            gmm.means_ = array([mc[0] for mc in means_covars]).reshape(self.n_components,
                                                                       -1)
            gmm._set_covars(array([mc[1] for mc in means_covars]))
            return gmm

        return res

        self.in_dims = array(in_dims)
        self.out_dims = array(out_dims)
        means = zeros((self.n_components, len(out_dims)))
        covars = zeros((self.n_components, len(out_dims), len(out_dims)))
        weights = zeros((self.n_components,))
        sig_in = []
        inin_inv = []
        out_in = []
        mu_in = []
        for k, (weight_k, mean_k, covar_k) in enumerate(self):
            sig_in.append(covar_k[ix_(in_dims, in_dims)])
            inin_inv.append(matrix(sig_in).I)
            out_in.append(covar_k[ix_(out_dims, in_dims)])
            mu_in.append(mean_k[in_dims].reshape(-1, 1))

            means[k, :] = (mean_k[out_dims] +
                           (out_in *
                            inin_inv *
                            (value - mu_in)).T)

            covars[k, :, :] = (covar_k[ix_(out_dims, out_dims)] -
                               out_in *
                               inin_inv *
                               covar_k[ix_(in_dims, out_dims)])
            weights[k] = weight_k * Gaussian(mu_in.reshape(-1,),
                                             sig_in).normal(value.reshape(-1,))
        weights /= sum(weights)

        def p(value):
            # hard copy of useful matrices local to the function
            pass

        return p
开发者ID:flowersteam,项目名称:explauto,代码行数:60,代码来源:gmminf.py


示例12: dctt1

def dctt1(a):
    """ dct  Discrete cosine transform.
    y = dct(a) returns the discrete cosine transform of a.
    The vector y is the same size as `a` and contains the
    discrete cosine transform coefficients.
    """
    if len(a.shape)==1:
        a = a.reshape(a.size,1)
    n,m = a.shape
    aa = a[:,:]
    #Compute weights to multiply DFT coefficients
    ww = arrayexp(n)
    if n%2 == 1:
        y = np.zeros([2*n,m])
        y[:n,:] = aa
        y[n:2*n,:] = np.flipud(aa)
        # Compute the FFT and keep the appropriate portion:
        yy = np.fft.fft(y,axis=0)
        yy = yy[:n,:]
    else:
        # Re-order the elements of the columns of x
        y = np.concatenate((aa[np.ix_(range(0,n,2))],\
                            aa[np.ix_(range(1,n,2)[::-1])]), axis=0)
        yy = np.fft.fft(y,axis=0)
        ww = 2*ww  # Double the weights for even-length case 

    wy = np.empty([n,m], complex)
    for j in range(m):
        wy[:,j]  = ww
    # Multiply FFT by weights:
    b = np.multiply(wy,yy)
    
    return b[:n,:m].real
开发者ID:modeha,项目名称:lsq_solver,代码行数:33,代码来源:dctt.py


示例13: comp

        def comp(self,mean,var,covar,resp):

            # Store the indices to the missing and observed responses.
            miss=numpy.isnan(resp)
            obs=numpy.logical_not(miss)

            if miss.all():
                return mean,var,covar

            # Store the size of the model.
            numresp,numpred=numpy.size(self.gain)

            kalmgain=numpy.eye(numresp)
            josgain=numpy.eye(numresp)

            # Fill in the Kalman and Joseph gain matrices.
            ind=numpy.ix_(miss,obs)
            kalmgain[ind]=linalg.solve(self.noise[numpy.ix_(obs,obs)],
                self.noise[ind].transpose()).transpose()
            josgain[:,obs]=josgain[:,obs]-kalmgain[:,obs]

            # Compute the predictor/response co-variance.
            covar=covar.dot(josgain.transpose())

            # Condition the response mean/variance on the observations.
            mean=josgain.dot(mean)+numpy.dot(kalmgain[:,obs],resp[obs])
            var=numpy.dot(josgain,numpy.dot(var,josgain.transpose()))

            return mean,var,covar
开发者ID:gabrieag,项目名称:glds,代码行数:29,代码来源:glds.py


示例14: consgpattern

        def consgpattern():
            """
            binary pattern of the sparse nonlinear constraint gradient

            """

            vfdxpat = ( self.vfielddxpattern
                        if self.vfielddxpattern is not None
                        else np.ones( (self.Nstates,self.Nstates) ) )
            vfdupat = ( self.vfielddupattern
                        if self.vfielddupattern is not None
                        else np.ones( (self.Nstates,self.Ninputs) ) )
            if( self.Ncons > 0 ):
                consdxpat = ( self.consdxpattern
                              if self.consdxpattern is not None
                              else np.ones( (self.Ncons,self.Ninputs) ) )

            out = np.zeros( ( feuler.Ncons, feuler.N ), dtype=np.int )

            for k in range( Nsamples ):
                out[ np.ix_( dconsidx[:,k+1], stidx[:,k] ) ] = vfdxpat
                out[ np.ix_( dconsidx[:,k+1], uidx[:,k] ) ] = vfdupat

                if( self.Ncons > 0 ):
                    out[ np.ix_( iconsidx[:,k], stidx[:,k+1] ) ] = consdxpat

            return out
开发者ID:hgonzale,项目名称:optwrapper,代码行数:27,代码来源:ocp.py


示例15: get_corr_pred

    def get_corr_pred( self, sctx, u, du, tn, tn1 ):

        n_ip_arr, ip_coords_arr, ip_weights_arr = self.ip_scheme

        self.F_int[:] = 0.0
        self.k_arr[...] = 0.0

        B_mtx_grid = None
        J_det_grid = None

        ip_offset = 0
        k_list = []
        for e_id, ( elem, n_ip ) in enumerate( zip( self.sdomain.elements, n_ip_arr ) ):
            ip_coords = ip_coords_arr[ ip_offset : ip_offset + n_ip ]
            ip_weights = ip_weights_arr[ ip_offset : ip_offset + n_ip ]
            ix = elem.get_dof_map()
            sctx.elem = elem
            sctx.elem_state_array = self.state_array[ ip_offset : ip_offset + n_ip ].flatten()
            sctx.X = elem.get_X_mtx()
            if self.cache_geo_matrices:
                B_mtx_grid = self.B_mtx_grid[ e_id, ... ]
                J_det_grid = self.J_det_grid[ e_id, ... ]
            f, k = self.fets_eval.get_corr_pred( sctx, u[ix_( ix )], du[ix_( ix )],
                                                 tn, tn1,
                                                 B_mtx_grid = B_mtx_grid,
                                                 J_det_grid = J_det_grid,
                                                 ip_coords = ip_coords,
                                                 ip_weights = ip_weights )

            self.k_arr[ e_id ] = k
            self.F_int[ ix_( ix ) ] += f
            ip_offset += n_ip

        return self.F_int, SysMtxArray( mtx_arr = self.k_arr, dof_map_arr = self.sdomain.elem_dof_map )
开发者ID:simvisage,项目名称:simvisage,代码行数:34,代码来源:dots_unstructured_eval.py


示例16: multiClassSVM

def multiClassSVM(distances, trainingIndice, testingIndice, semanticLabels, kernelType):

    distances = distances ** 2
    trainDistance = distances[np.ix_(trainingIndice, trainingIndice)]

    gamma = 1.0 / np.mean(trainDistance)
    kernelParam = []
    kernelParam.append(gamma)


    tempList = []
    tempList.append(kernelType)
    baseKernel = constructBaseKernels(tempList, kernelParam, distances)

    trainGramMatrix = baseKernel[0][np.ix_(trainingIndice, trainingIndice)]
    testGramMatrix = baseKernel[0][np.ix_(testingIndice, trainingIndice)]

    trainLabels = [semanticLabels[i] for i in trainingIndice]
    testLabels = [semanticLabels[i] for i in testingIndice]

    clf = SVC(kernel = "precomputed")
    clf.fit(trainGramMatrix, trainLabels)
    SVMResults = clf.predict(testGramMatrix)

    correct = sum(1.0 * (SVMResults == testLabels))
    accuracy = correct / len(testLabels)

    return accuracy
开发者ID:esokullu,项目名称:Image-Recognition,代码行数:28,代码来源:classification.py


示例17: nd_bootstrap

def nd_bootstrap(data, iterations, axis=None, strip_tuple_if_one=True):
    """
    Bootstrap iterator for several n-dimensional data arrays.

    :param data: Iterable containing the data arrays
    :param iterations: Number of bootstrap iterations.
    :param axis: Bootstrapping is performed along this axis.
    """
    shape0 = data[0].shape
    if axis is None:
        axis = 0
        data = [d.ravel() for d in data]

    n = len(data[0].shape)
    K = len(data)
    data0 = []

    if axis is not None:
        m = data[0].shape[axis]
        to = tuple([axis]) + tuple(range(axis)) + tuple(range(axis + 1, n))
        fro = tuple(range(1, axis + 1)) + (0,) + tuple(range(axis + 1, n))
        for i in range(K):
            data0.append(data[i].transpose(to))

        for i in range(iterations):
            idx = np.random.randint(m, size=(m,))
            if len(data) == 1 and strip_tuple_if_one:
                yield (data0[0][np.ix_(idx), ...].squeeze().
                       transpose(fro).reshape(shape0))
            else:
                yield tuple(a[np.ix_(idx), ...].squeeze().
                            transpose(fro).reshape(shape0) for a in data0)
开发者ID:philippberens,项目名称:PyCircStat,代码行数:32,代码来源:iterators.py


示例18: _steadystate_direct_sparse

def _steadystate_direct_sparse(L, use_rcm=True, use_umfpack=False):
    """
    Direct solver that uses scipy sparse matrices
    """
    if settings.debug:
        print('Starting direct solver...')
    dims=L.dims[0]
    weight=np.abs(L.data.max())
    n = prod(L.dims[0][0])
    b = np.zeros((n ** 2, 1), dtype=complex)
    b[0,0] = weight
    L = L.data + sp.csr_matrix((weight*np.ones(n), (np.zeros(n), [nn * (n + 1) for nn in range(n)])),
                               shape=(n ** 2, n ** 2))
    L.sort_indices()
    use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack)
    if use_rcm:
        perm = symrcm(L)
        L = sparse_permute(L,perm,perm)
        b = b[np.ix_(perm,)]
    
    v = spsolve(L, b)
    if use_rcm:
        rev_perm = np.argsort(perm)
        v = v[np.ix_(rev_perm,)]
    
    data = vec2mat(v)
    data = 0.5 * (data + data.conj().T)
    return Qobj(data, dims=dims, isherm=True)
开发者ID:i2000s,项目名称:qutip,代码行数:28,代码来源:steadystate.py


示例19: patch

def patch(data, rows, cols = None):
	"""
	data = data matrix, 1D or 2D array (matrix) 
	rows = iterator of rows (list) to select, None means selecting all rows
	cols = iterator of cols (list) to select, None means selecting all cols 
	return np.array (of the patch shape), but the DIM of return should be 
	the same as data (1D or 2D)
	if data is a sparse matrix, the return the matrix will be dense np.array
	"""
	if not sparse.issparse(data):
		data = np.asarray(data)
	dim = get_dim(data)
	if dim == 1:
		## ignore cols
		return data[rows] if rows is not None else data
	elif dim == 2:
		nrows, ncols = data.shape
		rows = rows if rows is not None else xrange(nrows)
		cols = cols if cols is not None else  xrange(ncols)
		if sparse.issparse(data):
			return data.toarray()[np.ix_(rows, cols)]
		else:
			return data[np.ix_(rows, cols)]
	else:
		raise RuntimeError('only supports 1D or 2D array') 
开发者ID:EricChanBD,项目名称:tutorials,代码行数:25,代码来源:features.py


示例20: _steadystate_lu

def _steadystate_lu(L, use_rcm=True, use_umfpack=False):
    """
    Find the steady state(s) of an open quantum system by computing the
    LU decomposition of the underlying matrix.
    """
    if settings.debug:
        print('Starting LU solver...')
    dims=L.dims[0]
    weight=np.abs(L.data.max())
    n = prod(L.dims[0][0])
    b = np.zeros(n ** 2, dtype=complex)
    b[0] = weight
    L = L.data.tocsc() + sp.csc_matrix((weight*np.ones(n),
                    (np.zeros(n), [nn * (n + 1) for nn in range(n)])),
        shape=(n ** 2, n ** 2))
    
    L.sort_indices()
    use_solver(assumeSortedIndices=True, useUmfpack=use_umfpack)
    if use_rcm:
        perm = symrcm(L)
        L = sparse_permute(L,perm,perm)
        b = b[np.ix_(perm,)]
    
    solve = factorized(L)
    v = solve(b)
    if use_rcm:
        rev_perm = np.argsort(perm)
        v = v[np.ix_(rev_perm,)]
    data = vec2mat(v)
    data = 0.5 * (data + data.conj().T)

    return Qobj(data, dims=dims, isherm=True)
开发者ID:i2000s,项目名称:qutip,代码行数:32,代码来源:steadystate.py



注:本文中的numpy.ix_函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.kaiser函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.iterable函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap