• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.ones函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ones函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_pore2centroid

 def test_pore2centroid(self):
     temp_coords = self.net['pore.coords']
     self.geo['pore.centroid'] = sp.ones([self.geo.num_pores(), 3])
     vo.pore2centroid(self.net)
     assert sp.sum(self.net['pore.coords'] -
                   sp.ones([self.geo.num_pores(), 3])) == 0.0
     self.net['pore.coords'] = temp_coords
开发者ID:amirdezashibi,项目名称:OpenPNM,代码行数:7,代码来源:VertexOpsTest.py


示例2: kalman_filter

def kalman_filter(b,
                  V,
                  Phi,
                  y,
                  X,
                 sigma,
                  Sigma,
                  switch = 0,
                  D = None,
                  d = None,
                  G = None,
                  a = None,
                  c = None):
    r"""
    
    .. math::
       :nowrap:

       \begin{eqnarray*}
       \beta_{t|t-1} = \Phi \: \beta_{t-1|t-1}\\
       V_{t|t-1} = \Phi  V_{t-1|t-1} \Phi ^T + \Sigma \\
       e_t = y_t -  X_t \beta_{t|t-1}\\
       K_t =  V_{t|t-1} X_t^T (\sigma + X_t V_{t|t-1} X_t )^{-1}\\
       \beta_{t|t} = \beta_{t|t-1} + K_t e_t\\
       V_{t|t} = (I - K_t X_t^T) V_{t|t-1}\\
       \end{eqnarray*}

    """

    n = scipy.shape(X)[1]
    beta = scipy.empty(scipy.shape(X))
    n = len(b)
    if D is None:
        D = scipy.ones((1, n))
    if d is None:
        d = scipy.matrix(1.)
    if G is None:
        G = scipy.identity(n)
    if a is None:
        a = scipy.zeros((n, 1))
    if c is None:
        c = scipy.ones((n, 1))
#        import code; code.interact(local=locals())
    (b, V) = kalman_predict(b, V, Phi, Sigma)
    for i in xrange(len(X)):
        beta[i] = scipy.array(b).T
        (b, V, e, K) = kalman_upd(b,
                                V,
                                y[i],
                                X[i],
                                sigma,
                                Sigma,
                                switch,
                                D,
                                d,
                                G,
                                a,
                                c)
        (b, V) = kalman_predict(b, V, Phi, Sigma)
    return beta
开发者ID:idaohang,项目名称:KF,代码行数:60,代码来源:libregression.py


示例3: calculateGradient

    def calculateGradient(self):
        # normalize rewards
        # self.dataset.data['reward'] /= max(ravel(abs(self.dataset.data['reward'])))

        # initialize variables
        R = ones((self.dataset.getNumSequences(), 1), float)
        X = ones((self.dataset.getNumSequences(), self.loglh.getDimension('loglh') + 1), float)

        # collect sufficient statistics
        print self.dataset.getNumSequences()
        for n in range(self.dataset.getNumSequences()):
            _state, _action, reward = self.dataset.getSequence(n)
            seqidx = ravel(self.dataset['sequence_index'])
            if n == self.dataset.getNumSequences() - 1:
                # last sequence until end of dataset
                loglh = self.loglh['loglh'][seqidx[n]:, :]
            else:
                loglh = self.loglh['loglh'][seqidx[n]:seqidx[n + 1], :]

            X[n, :-1] = sum(loglh, 0)
            R[n, 0] = sum(reward, 0)

        # linear regression
        beta = dot(pinv(X), R)
        return beta[:-1]
开发者ID:DanSGraham,项目名称:School-Projects,代码行数:25,代码来源:enac.py


示例4: createLargeSubMatrix

def createLargeSubMatrix():

    # Create a large matrix, but with same amount of 'ones' as the small submatrix

    t1 = time.time()

    m=40000
    n=1000000

    M=sparse.lil_matrix((m,n))

    m=500
    n=20000

    # Populate some of the matrix
    M[0,:]=ones(n)
    M[:,0]=1
    M[(m/2),:]=ones(n)
    M[:,(n/2)]=1
    M[(m-1),:]=ones(n)
    M[:,(n-1)]=1

    t2 = time.time()
    print 'Time used: ',(t2-t1)

    return M
开发者ID:hmbachelor,项目名称:bachelor,代码行数:26,代码来源:testModule.py


示例5: phenSpecificEffects

def phenSpecificEffects(snps,pheno1,pheno2,K=None,covs=None,test='lrt'):
    """
    Univariate fixed effects interaction test for phenotype specific SNP effects

    Args:
        snps:   [N x S] SP.array of S SNPs for N individuals (test SNPs)
        pheno1: [N x 1] SP.array of 1 phenotype for N individuals
        pheno2: [N x 1] SP.array of 1 phenotype for N individuals
        K:      [N x N] SP.array of LMM-covariance/kinship koefficients (optional)
                        If not provided, then linear regression analysis is performed
        covs:   [N x D] SP.array of D covariates for N individuals
        test:    'lrt' for likelihood ratio test (default) or 'f' for F-test

    Returns:
        limix LMM object
    """
    N=snps.shape[0]
    if K is None:
        K=SP.eye(N)
    assert (pheno1.shape[1]==pheno2.shape[1]), "Only consider equal number of phenotype dimensions"
    if covs is None:
        covs = SP.ones(N,1)
    assert (pheno1.shape[1]==1 and pheno2.shape[1]==1 and pheno1.shape[0]==N and pheno2.shape[0]==N and K.shape[0]==N and K.shape[1]==N and covs.shape[0]==N), "shapes missmatch"
    Inter = SP.zeros((N*2,1))
    Inter[0:N,0]=1
    Inter0 = SP.ones((N*2,1))
    Yinter=SP.concatenate((pheno1,pheno2),0)
    Xinter = SP.tile(snps,(2,1))
    Covitner= SP.tile(covs(2,1))
    lm = simple_interaction(snps=Xinter,pheno=Yinter,covs=Covinter,Inter=Inter,Inter0=Inter0,test=test)
    return lm
开发者ID:jlmaccal,项目名称:limix,代码行数:31,代码来源:qtl_old.py


示例6: estimateBeta

def estimateBeta(X,Y,K,C=None,addBiasTerm=False,numintervals0=100,ldeltamin0=-5.0,ldeltamax0=5.0):
    """ compute all pvalues
    If numintervalsAlt==0 use EMMA-X trick (keep delta fixed over alternative models)
    """
    n,s=X.shape;
    n_pheno=Y.shape[1];
    S,U=LA.eigh(K);
    UY=SP.dot(U.T,Y);
    UX=SP.dot(U.T,X);
    if (C==None):
        Ucovariate=SP.dot(U.T,SP.ones([n,1]));
    else:
        if (addBiasTerm):
            C_=SP.concatenate((C,SP.ones([n,1])),axis=1)
            Ucovariate=SP.dot(U.T,C_);
        else:
            Ucovariate=SP.dot(U.T,C);
    n_covar=Ucovariate.shape[1];
    beta = SP.empty((n_pheno,s,n_covar+1));
    LL=SP.ones((n_pheno,s))*(-SP.inf);
    ldelta=SP.empty((n_pheno,s));
    sigg2=SP.empty((n_pheno,s));
    pval=SP.ones((n_pheno,s))*(-SP.inf);
    for phen in SP.arange(n_pheno):
        UY_=UY[:,phen];
        ldelta[phen]=optdelta(UY_,Ucovariate,S,ldeltanull=None,numintervals=numintervals0,ldeltamin=ldeltamin0,ldeltamax=ldeltamax0);
        for snp in SP.arange(s):
            UX_=SP.hstack((UX[:,snp:snp+1],Ucovariate));
            nLL_, beta_, sigg2_=nLLeval(ldelta[phen,snp],UY_,UX_,S,MLparams=True);
            beta[phen,snp,:]=beta_;
            sigg2[phen,snp]=sigg2_;
            LL[phen,snp]=-nLL_;
    return beta, ldelta
开发者ID:PMBio,项目名称:limix,代码行数:33,代码来源:lmm_fast.py


示例7: _additionalInit

    def _additionalInit(self):
        assert self.numberOfCenters == 1, 'Mixtures of Gaussians not supported yet.'

        xdim = self.numParameters
        self.alphas = ones(self.numberOfCenters) / float(self.numberOfCenters)
        self.mus = []
        self.sigmas = []

        if self.rangemins == None:
            self.rangemins = -ones(xdim)
        if self.rangemaxs == None:
            self.rangemaxs = ones(xdim)
        if self.initCovariances == None:
            if self.diagonalOnly:
                self.initCovariances = ones(xdim)
            else:
                self.initCovariances = eye(xdim)

        for _ in range(self.numberOfCenters):
            self.mus.append(rand(xdim) * (self.rangemaxs - self.rangemins) + self.rangemins)
            self.sigmas.append(dot(eye(xdim), self.initCovariances))

        self.samples = list(range(self.windowSize))
        self.fitnesses = zeros(self.windowSize)
        self.generation = 0
        self.allsamples = []
        self.muevals = []
        self.allmus = []
        self.allsigmas = []
        self.allalphas = []
        self.allUpdateSizes = []
        self.allfitnesses = []
        self.meanShifts = [zeros((self.numParameters)) for _ in range(self.numberOfCenters)]

        self._oneEvaluation(self._initEvaluable)
开发者ID:Angeliqe,项目名称:pybrain,代码行数:35,代码来源:fem.py


示例8: make_data_twoclass

def make_data_twoclass(N=50):
    # generates some toy data
    mu = sp.array([[0,2],[0,-2]]).T
    C = sp.array([[5.,4.],[4.,5.]])
    X = sp.hstack((mvn(mu[:,0],C,N/2).T, mvn(mu[:,1],C,N/2).T))
    Y = sp.hstack((sp.ones((1,N/2.)),-sp.ones((1,N/2.))))
    return X,Y
开发者ID:nikste,项目名称:doubly_random_svm,代码行数:7,代码来源:training_comparison_functions.py


示例9: plot_median_errors

def plot_median_errors(RefinementLevels):
        for i in RefinementLevels[0].cases:
            x =[];
            y =[];
            print "Analyzing median error on: ", i ;
            for r in RefinementLevels:                
                x.append(r.LUT.D_dim*r.LUT.P_dim)
                r.get_REL_ERR_SU2(i)
                y.append(r.SU2[i].median_ERR*100)
            
            x = sp.array(x)
            y = sp.array(y)            
            y = y[sp.argsort(x)]
            x = x[sp.argsort(x)]
                                    
            LHM = sp.ones((len(x),2))
            RHS = sp.ones((len(x),1))            
            LHM[:,1] = sp.log10(x)
            RHS[:,0] = sp.log10(y)

            sols = sp.linalg.lstsq(LHM,RHS)
            b = -sols[0][1]
            plt.loglog(x,y, label='%s, %s'%(i,r'$O(\frac{1}{N})^{%s}$'%str(sp.around(b,2))), basex=10, basey=10, \
                       subsy=sp.linspace(10**(-5), 10**(-2),20),\
                       subsx=sp.linspace(10**(2), 10**(5),50))
            
            #for r in RefinementLevels:                
               # x.append(r.LUT.D_dim*r.LUT.P_dim)
              #  r.get_REL_ERR_SciPy(i)
             #   y.append(r.SciPy[i].median_ERR*100)
            #plt.plot(x,y, label='SciPy: %s'%i)
        plt.grid(which='both')
        plt.xlabel('Grid Nodes (N)')
        plt.ylabel('Median relative error [%]')
        return;
开发者ID:MatejKosec,项目名称:LUTStandAlone,代码行数:35,代码来源:ConvergenceLibrary.py


示例10: addFixedEffect

    def addFixedEffect(self, F=None, A=None, Ftest=None):
        """
        add fixed effect term to the model

        Args:
            F:     sample design matrix for the fixed effect [N,K]
            A:     trait design matrix for the fixed effect (e.g. sp.ones((1,P)) common effect; sp.eye(P) any effect) [L,P]
            Ftest: sample design matrix for test samples [Ntest,K]
        """
        if A is None:
            A = sp.eye(self.P)
        if F is None:
            F = sp.ones((self.N,1))
            if self.Ntest is not None:
                Ftest = sp.ones((self.Ntest,1))

        assert A.shape[1]==self.P, 'VarianceDecomposition:: A has incompatible shape'
        assert F.shape[0]==self.N, 'VarianceDecimposition:: F has incompatible shape'

        if Ftest is not None:
            assert self.Ntest is not None, 'VarianceDecomposition:: specify Ntest for predictions (method VarianceDecomposition::setTestSampleSize)'
            assert Ftest.shape[0]==self.Ntest, 'VarianceDecimposition:: Ftest has incompatible shape'
            assert Ftest.shape[1]==F.shape[1], 'VarianceDecimposition:: Ftest has incompatible shape'

        # add fixed effect
        self.sample_designs.append(F)
        self.sample_test_designs.append(Ftest)
        self.trait_designs.append(A)
 
        self._desync()
开发者ID:PMBio,项目名称:limix,代码行数:30,代码来源:varianceDecomposition.py


示例11: __init__

    def __init__(self, evaluator, evaluable, **parameters):
        BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
        self.alphas = ones(self.numberOfCenters)/self.numberOfCenters
        self.mus = []
        self.sigmas = []

        self.tau = 1.
        if self.rangemins == None:
            self.rangemins = -ones(self.xdim)
        if self.rangemaxs == None:
            self.rangemaxs = ones(self.xdim)
        if self.initCovariances == None:
            self.initCovariances = eye(self.xdim)
            
        if self.elitist and self.numberOfCenters == 1 and not self.noisyEvaluator:
            # in the elitist case seperate evaluations are not necessary. 
            # CHECKME: maybe in the noisy case?
            self.evalMus = False
            
        assert not(self.useCauchy and self.numberOfCenters > 1)
            
        for dummy in range(self.numberOfCenters):
            self.mus.append(rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins)
            self.sigmas.append(dot(eye(self.xdim), self.initCovariances))
        self.reset()
开发者ID:HKou,项目名称:pybrain,代码行数:25,代码来源:fem.py


示例12: gensquexpIPdraw

def gensquexpIPdraw(d,lb,ub,sl,su,sfn,sls,cfn):
    #axis = 0 value = sl
    #d dimensional objective +1 for s
    nt=25
    #print sp.hstack([sp.array([[sl]]),lb])
    #print sp.hstack([sp.array([[su]]),ub])
    [X,Y,S,D] = ESutils.gen_dataset(nt,d+1,sp.hstack([sp.array([[sl]]),lb]).flatten(),sp.hstack([sp.array([[su]]),ub]).flatten(),GPdc.SQUEXP,sp.array([1.5]+[sls]+[0.30]*d))
    G = GPdc.GPcore(X,Y,S,D,GPdc.kernel(GPdc.SQUEXP,d+1,sp.array([1.5]+[sls]+[0.30]*d)))
    def obj(x,s,d,override=False):
        x = x.flatten()
        if sfn(x)==0. or override:
            noise = 0.
        else:
            noise = sp.random.normal(scale=sp.sqrt(sfn(x)))
        
        return [G.infer_m(x,[d])[0,0]+noise,cfn(x)]
    def dirwrap(x,y):
        z = obj(sp.array([[sl]+[i for i in x]]),sl,[sp.NaN],override=True)
        return (z,0)
    [xmin0,ymin0,ierror] = DIRECT.solve(dirwrap,lb,ub,user_data=[], algmethod=1, maxf=89000, logfilename='/dev/null')
    lb2 = xmin0-sp.ones(d)*1e-4
    ub2 = xmin0+sp.ones(d)*1e-4
    [xmin,ymin,ierror] = DIRECT.solve(dirwrap,lb2,ub2,user_data=[], algmethod=1, maxf=89000, logfilename='/dev/null')
    #print "RRRRR"+str([xmin0,xmin,ymin0,ymin,xmin0-xmin,ymin0-ymin])
    return [obj,xmin,ymin]
开发者ID:markm541374,项目名称:GPc,代码行数:25,代码来源:OPTutils.py


示例13: lossTraces

def lossTraces(fwrap, aclass, dim, maxsteps, storesteps=None, x0=None,
               initNoise=0., minLoss=1e-10, algoparams={}):
    """ Compute a number of loss curves, for the provided settings,
    stored at specific storestep points. """
    if not storesteps:
        storesteps = range(maxsteps + 1)
    
    # initial points, potentially noisy
    if x0 is None:
        x0 = ones(dim) + randn(dim) * initNoise
    
    # tracking progress by callback
    paramtraces = {'index':-1}
    def storer(a):
        lastseen = paramtraces['index']
        for ts in [x for x in storesteps if x > lastseen and x <= a._num_updates]:
            paramtraces[ts] = a.bestParameters.copy()
        paramtraces['index'] = a._num_updates
        
    # initialization    
    algo = aclass(fwrap, x0, callback=storer, **algoparams)
    print algo, fwrap, dim, maxsteps,
    
    # store initial step   
    algo.callback(algo)
    algo.run(maxsteps)

    # process learning curve
    del paramtraces['index']
    paramtraces = array([x for _, x in sorted(paramtraces.items())])
    oloss = mean(fwrap.stochfun.expectedLoss(ones(100) * fwrap.stochfun.optimum))
    ls = abs(fwrap.stochfun.expectedLoss(ravel(paramtraces)) - oloss) + minLoss
    ls = reshape(ls, paramtraces.shape)
    print median(ls[-1])
    return ls
开发者ID:bitfort,项目名称:py-optim,代码行数:35,代码来源:experiments.py


示例14: do_compare_wedges

def do_compare_wedges(file1="stars-82.txt", file2="Stripe82_coadd.csv", stripe=82,
                      mag=0, size=1.0):
    """ Modify if size is not 1.0 """
    one_run = fi.read_data(file1)
    or_l = len(one_run[:,0])
    or_hist = sv.plot_wedge_density(one_run, stripe, q=0.458, r0=19.4,
                                    name="_rho1", mag=mag, plot=0, size=size)
    coadd = fi.read_data(file2)
    ca_l = len(coadd[:,0])
    ca_hist = sv.plot_wedge_density(coadd, stripe, q=0.458, r0=19.4,
                       name="_rho2", mag=mag, plot=0, size=size)
    # Separate into heights
    or_h = or_hist[:,1]
    ca_h = ca_hist[:,1]
    # Divide the first data set by the second
    if len(or_h) < len(ca_h):
        l = len(or_h)
        extra_h = -0.1*sc.ones((len(ca_h)-l))
    else:
        l = len(ca_h)
        extra_h = 0.1*sc.ones((len(or_h)-l))
    diff_h = sc.zeros(l)
    for i in range(l):
        diff_h[i] = ( or_h[i] / ca_h[i] )
    out = sc.zeros((l,3))
    for i in range(l):
        out[i,0], out[i,1] = ca_hist[i,0], diff_h[i]
        out[i,2] = 1.0 #ma.sqrt(or_hist[i,2]*or_hist[i,2] + ca_hist[i,2]*ca_hist[i,2])
    return out
开发者ID:MNewby,项目名称:Newby-tools,代码行数:29,代码来源:compare_wedges.py


示例15: __init__

    def __init__(self, evaluator, evaluable, **parameters):
        BlackBoxOptimizer.__init__(self, evaluator, evaluable, **parameters)
        
        self.numParams = self.xdim + self.xdim * (self.xdim+1) / 2
                
        if self.momentum != None:
            self.momentumVector = zeros(self.numParams)
        if self.learningRateSigma == None:
            self.learningRateSigma = self.learningRate
        
        if self.rangemins == None:
            self.rangemins = -ones(self.xdim)
        if self.rangemaxs == None:
            self.rangemaxs = ones(self.xdim)
        if self.initCovariances == None:
            if self.diagonalOnly:
                self.initCovariances = ones(self.xdim)
            else:
                self.initCovariances = eye(self.xdim)

        self.x = rand(self.xdim) * (self.rangemaxs-self.rangemins) + self.rangemins
        self.sigma = dot(eye(self.xdim), self.initCovariances)
        self.factorSigma = cholesky(self.sigma)
        
        self.reset()
开发者ID:HKou,项目名称:pybrain,代码行数:25,代码来源:ves.py


示例16: svm_gradient_batch_fast

def svm_gradient_batch_fast(X_pred, X_exp, y, X_pred_ids, X_exp_ids, w, C=.0001, sigma=1.):
    # sample Kernel
    rnpred = X_pred_ids#sp.random.randint(low=0,high=len(y),size=n_pred_samples)
    rnexpand = X_exp_ids#sp.random.randint(low=0,high=len(y),size=n_expand_samples)
    #K = GaussKernMini_fast(X_pred.T,X_exp.T,sigma)
    X1 = X_pred.T
    X2 = X_exp.T
    if sp.sparse.issparse(X1):
        G = sp.outer(X1.multiply(X1).sum(axis=0), sp.ones(X2.shape[1]))
    else:
        G = sp.outer((X1 * X1).sum(axis=0), sp.ones(X2.shape[1]))
    if sp.sparse.issparse(X2):
        H = sp.outer(X2.multiply(X2).sum(axis=0), sp.ones(X1.shape[1]))
    else:
        H = sp.outer((X2 * X2).sum(axis=0), sp.ones(X1.shape[1]))
    K = sp.exp(-(G + H.T - 2. * fast_dot(X1.T, X2)) / (2. * sigma ** 2))
    # K = sp.exp(-(G + H.T - 2.*(X1.T.dot(X2)))/(2.*sigma**2))
    if sp.sparse.issparse(X1) | sp.sparse.issparse(X2): K = sp.array(K)

    # compute predictions
    yhat = fast_dot(K,w[rnexpand])
    # compute whether or not prediction is in margin
    inmargin = (yhat * y[rnpred]) <= 1
    # compute gradient
    G = C * w[rnexpand] - fast_dot((y[rnpred] * inmargin), K)
    return G,rnexpand
开发者ID:nikste,项目名称:doubly_random_svm,代码行数:26,代码来源:dsekl.py


示例17: range_query_geno_local

 def range_query_geno_local(self, idx_start=None, idx_end=None, chrom=None,pos_start=None, pos_end=None,windowsize=0):
     """
     return an index for a range query on the genotypes
     """
     if idx_start==None and idx_end==None and pos_start==None and pos_end==None and chrom==None:
         return  sp.arange(0,self.num_snps)
     elif idx_start is not None or idx_end is not None:
         if idx_start is None:
             idx_start = 0
         if idx_end is None:
             idx_end = self.num_snps
         res =  sp.arange(idx_start,idx_end)
         return res
     elif chrom is not None:
         res = self.geno_pos["chrom"]==chrom
     elif pos_start is not None or pos_end is not None:
         if pos_start is not None and pos_end is not None:
             assert pos_start[0] == pos_end[0], "chromosomes have to match"
         
         if pos_start is None:
             idx_larger =  sp.ones(self.num_snps,dtype=bool)
         else:
             idx_larger = (self.geno_pos["pos"]>=(pos_start[1]-windowsize)) & (self.geno_pos["chrom"]==pos_start[0])
         if pos_end is None:
             idx_smaller =  sp.ones(self.num_snps,dtype=bool)
         else:
             idx_smaller = (self.geno_pos["pos"]<=(pos_end[1]+windowsize)) & (self.geno_pos["chrom"]==pos_end[0])
         res = idx_smaller & idx_larger
     else:
         raise Exception("This should not be triggered")#res =  sp.ones(self.geno_pos.shape,dtype=bool)
     return  sp.where(res)[0]
开发者ID:MMesbahU,项目名称:limix,代码行数:31,代码来源:data.py


示例18: __init__

 def __init__(self, render=True, realtime=True, ip="127.0.0.1", port="21560"):
     # initialize base class
     self.render = render
     if self.render:
         self.updateDone = True
         self.updateLock = threading.Lock()
         self.server = UDPServer(ip, port)
     self.actLen = 12
     self.mySensors = sensors.Sensors(["EdgesReal"])
     self.dists = array([20.0, sqrt(2.0) * 20, sqrt(3.0) * 20])
     self.gravVect = array([0.0, -100.0, 0.0])
     self.centerOfGrav = zeros((1, 3), float)
     self.pos = ones((8, 3), float)
     self.vel = zeros((8, 3), float)
     self.SpringM = ones((8, 8), float)
     self.d = 60.0
     self.dt = 0.02
     self.startHight = 10.0
     self.dumping = 0.4
     self.fraktMin = 0.7
     self.fraktMax = 1.3
     self.minAkt = self.dists[0] * self.fraktMin
     self.maxAkt = self.dists[0] * self.fraktMax
     self.reset()
     self.count = 0
     self.setEdges()
     self.act(array([20.0] * 12))
     self.euler()
     self.realtime = realtime
     self.step = 0
开发者ID:DanSGraham,项目名称:code,代码行数:30,代码来源:environment.py


示例19: alloc_numpy_arrays

def alloc_numpy_arrays(number_cells, space_direction, initval=0,
		dtype='f'):
	"""
	"""
	space = [sc.ones((1,1,1), dtype),\
			sc.ones((1,1,1), dtype),\
			sc.ones((1,1,1), dtype)]
	number_cells = tuple(number_cells)
	
	if 'x' in space_direction:
		space[x_axis] = sc.zeros(number_cells, dtype)
	if 'y' in space_direction:
		space[y_axis] = sc.zeros(number_cells, dtype)
	if 'z' in space_direction:
		space[z_axis] = sc.zeros(number_cells, dtype)
		
	if initval != 0:
		if len(number_cells) == 3:
			space[x_axis][:,:,:] = initval
			space[y_axis][:,:,:] = initval
			space[z_axis][:,:,:] = initval
		elif len(number_cells) == 2:
			space[x_axis][:,:] = initval
			space[y_axis][:,:] = initval
			space[z_axis][:,:] = initval
			
	return space
开发者ID:wbkifun,项目名称:fdtd_accelerate,代码行数:27,代码来源:kufdtd_base.py


示例20: sqcover

def sqcover(A,n):
    edge = sp.sqrt(A) # the length of an edge
    d = edge/n # the distance between two adjacent points
    r = d/2 # the "radius of "
    end = edge - r # end point
    base = sp.linspace(r, end, n)
    first_line = sp.transpose(sp.vstack((base, r*sp.ones(n))))
    increment = sp.transpose(sp.vstack((sp.zeros(n), d*sp.ones(n))))
    pts = first_line
    y_diff = increment
    for i in range(n-1):
        pts = sp.vstack((pts, first_line + y_diff))
        y_diff = y_diff + increment
    
    # Color matter
    colors = []
    for p in pts:
        cval = n*p[0] + p[1] # the x-coord has a higher weight
        cval = colormap.Spectral(cval/((n+1)*end)) # normalize by the max value that cval can take.
        colors.append(cval)

    colors = sp.array(colors)

    cover = (pts, r, colors)
    return cover
开发者ID:atkm,项目名称:reed-modeling,代码行数:25,代码来源:ga_shapes.py



注:本文中的scipy.ones函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.ones_like函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.nonzero函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap