• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.cov函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.cov函数的典型用法代码示例。如果您正苦于以下问题:Python cov函数的具体用法?Python cov怎么用?Python cov使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了cov函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, Y=None, Xr=None, Rg=None, Ug=None, Sg=None, factr=1e7, debug=False):
     """
     Args:
         Y:          [N, P] phenotype matrix
         Xr:         [N, S] genotype data of the set component
         R:          [N, S] genotype data of the set component
         factr:      paramenter that determines the accuracy of the solution
                     (see scipy.optimize.fmin_l_bfgs_b for more details)
     """
     # assert Xr
     Xr-= Xr.mean(0)
     Xr/= Xr.std(0)
     Xr/= sp.sqrt(Xr.shape[1])
     self.Y = Y
     self.Xr = Xr
     if Sg is None or Ug is None:
         Sg, Ug = la.eigh(Rg)
     self.Rg = Rg
     self.Ug = Ug
     self.Sg = Sg
     self.covY = sp.cov(Y.T)
     self.factr = factr 
     self.debug = debug
     self.gp = {}
     self.info = {}
     #_trRr = sp.diagonal(sp.dot(self.Ug, sp.dot(sp.diag(self.Sg), self.Ug.T))).sum()
     self.trRg = ((self.Ug*self.Sg**0.5)**2).sum()
开发者ID:PMBio,项目名称:limix,代码行数:27,代码来源:mvSetFull.py


示例2: __init__

 def __init__(self, Y=None, Xr=None, F=None, Rr=None, factr=1e7, debug=False):
     """
     Args:
         Y:          [N, P] phenotype matrix
         Xr:         [N, S] genotype data of the set component
         R:          [N, S] genotype data of the set component
         factr:      paramenter that determines the accuracy of the solution
                     (see scipy.optimize.fmin_l_bfgs_b for more details)
     """
     # avoid SVD failure by adding some jitter 
     Xr+= 2e-6*(sp.rand(*Xr.shape)-0.5)
     # make sure it is normalised 
     Xr-= Xr.mean(0)
     Xr/= Xr.std(0)
     Xr/= sp.sqrt(Xr.shape[1])
     self.Y = Y
     self.F = F
     self.Xr = Xr
     self.covY = sp.cov(Y.T)
     self.factr = factr 
     self.debug = debug
     self.gp = {}
     self.info = {}
     self.lowrank = Xr.shape[1]<Xr.shape[0]
     if Rr is not None:
         self.Rr = Rr
     else:
         if self.lowrank:        self.Rr = None
         else:                   self.Rr = sp.dot(Xr, Xr.T)
开发者ID:PMBio,项目名称:limix,代码行数:29,代码来源:mvSet.py


示例3: _maximum_likelihood

    def _maximum_likelihood(self, X):
        n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
        n_components = self.n_components

        # Predict mean
        mu = X.mean(axis=0)

        # Predict covariance
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)
        sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
                  (n_features - n_components))  # FIXME: M < D?

        weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
        M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
        inv_M = spla.inv(M)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2    # FIXME!
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
开发者ID:Yevgnen,项目名称:prml,代码行数:28,代码来源:pca.py


示例4: learn_gmm

 def learn_gmm(self,x,y,tau=None):
     '''
     Function that learns the GMM from training samples
         It is possible to add a regularizer term Sigma = Sigma + tau*I 
     Input:
         x : the training samples
         y :  the labels
         tau : the value of the regularizer, if tau = None (default) no regularization
     Output:
         the mean, covariance and proportion of each class
     '''
     ## Get information from the data
     C = int(y.max(0))   # Number of classes
     n = x.shape[0]  # Number of samples
     d = x.shape[1]  # Number of variables
     
     ## Initialization
     self.ni = sp.empty((C,1))    # Vector of number of samples for each class
     self.prop = sp.empty((C,1))  # Vector of proportion
     self.mean = sp.empty((C,d))  # Vector of means
     self.cov = sp.empty((C,d,d)) # Matrix of covariance
     
     ## Learn the parameter of the model for each class
     for i in range(C):
         j = sp.where(y==(i+1))[0]
         self.ni[i] = float(j.size)    
         self.prop[i] = self.ni[i]/n
         self.mean[i,:] = sp.mean(x[j,:],axis=0)
         self.cov[i,:,:] = sp.cov(x[j,:],bias=1,rowvar=0)  # Normalize by ni to be consistent with the update formulae
     if tau is not None:
         self.tau = tau*sp.eye(d)
开发者ID:Sandy4321,项目名称:FFFS,代码行数:31,代码来源:npfs.py


示例5: plot_covariance

def plot_covariance(history, dist_X):
   
    for dist_name in list(history.keys()):
        nTypes = len(history[dist_name].keys())
        errors = sp.zeros((2,nTypes))
        fig = plt.figure()
        fig.set_size_inches(6*nTypes,5)       
        plt.subplot(1,nTypes+1,1)
        plt.imshow(dist_X.corr_matrix,cmap=plt.cm.gray,interpolation='none')

        counter = 0
        for samp_name in list(history[dist_name].keys()):
            counter += 1
            hist_single = history[dist_name][samp_name]
            nsteps = len(hist_single)
            nbatch = hist_single[-1]['X'].shape[1]
            N = hist_single[0]['X'].shape[0]

            X = sp.zeros((N,nbatch,nsteps))
            P = sp.zeros((N,nbatch,nsteps))
            for tt in range(nsteps):
                X[:,:,tt] = hist_single[tt]['X']
                P[:,:,tt] = hist_single[tt]['P']
                
            ax = plt.subplot(1,nTypes+1,counter+1)
            inv_var_diags = sp.diag(10.**sp.linspace(-dist_X.log_conditioning, 0, N))**.5
            corr_matrix_calc = sp.dot(sp.dot(inv_var_diags**.5,sp.cov(X.reshape(N,nbatch*nsteps),rowvar = 1)),inv_var_diags**.5)
            plt.imshow(corr_matrix_calc,cmap=plt.cm.gray,interpolation='none')
           
            print (corr_matrix_calc)
        plt.show()
开发者ID:niragkadakia,项目名称:Chaotic-Monte-Carlo,代码行数:31,代码来源:plot.py


示例6: pca

def pca(data, dim):
    """ Return the first dim principal components as colums of a matrix.

    Every row of the matrix resembles a point in the data space.
    """

    assert dim <= data.shape[1], \
        "dim must be less or equal than the original dimension"

    # We have to make a copy of the original data and substract the mean
    # of every entry
    data = makeCentered(data)
    cm = cov(data.T)

    # OPT only calculate the dim first eigenvectors here
    # The following calculation may seem a bit "weird" but also correct to me.
    # The eigenvectors with the dim highest eigenvalues have to be selected
    # We keep track of the indexes via enumerate to restore the right ordering
    # later.
    eigval, eigvec = eig(cm)
    eigval = [(val, ind) for ind, val  in enumerate(eigval)]
    eigval.sort()
    eigval[:-dim] = []  # remove all but the highest dim elements

    # now we have to bring them back in the right order
    eig_indexes = [(ind, val) for val, ind in eigval]
    eig_indexes.sort(reverse=True)
    eig_indexes = [ind for ind, val in eig_indexes]

    return eigvec.take(eig_indexes, 1).T
开发者ID:Angeliqe,项目名称:pybrain,代码行数:30,代码来源:pca.py


示例7: fit

    def fit(self, data):
        """Fit VAR model to data.
        
        Parameters
        ----------
        data : array, shape (trials, channels, samples) or (channels, samples)
            Epoched or continuous data set.
            
        Returns
        -------
        self : :class:`VAR`
            The :class:`VAR` object to facilitate method chaining (see usage
            example).
        """
        data = atleast_3d(data)

        if self.delta == 0 or self.delta is None:
            # ordinary least squares
            x, y = self._construct_eqns(data)
        else:
            # regularized least squares (ridge regression)
            x, y = self._construct_eqns_rls(data)

        b, res, rank, s = sp.linalg.lstsq(x, y)

        self.coef = b.transpose()

        self.residuals = data - self.predict(data)
        self.rescov = sp.cov(cat_trials(self.residuals[:, :, self.p:]))

        return self
开发者ID:cbrnr,项目名称:scot,代码行数:31,代码来源:var.py


示例8: _initParams_fast

	def _initParams_fast(self):
		""" 
		initialize the gp parameters
			1) project Y on the known factor X0 -> Y0
				average variance of Y0 is used to initialize the variance explained by X0
			2) considers the residual Y1 = Y-Y0 (this equivals to regress out X0)
			3) perform PCA on cov(Y1) and considers the first k PC for initializing X
			4) the variance of all other PCs is used to initialize the noise
			5) the variance explained by interaction is set to a small random number 
		"""
		Xd = LA.pinv(self.X0)
		Y0 = self.X0.dot(Xd.dot(self.Y))
		Y1 = self.Y-Y0
		YY = SP.cov(Y1)
		S,U = LA.eigh(YY)
		X = U[:,-self.k:]*SP.sqrt(S[-self.k:])
		a = SP.array([SP.sqrt(Y0.var(0).mean())])
		b = 1e-3*SP.randn(1)
		c = SP.array([SP.sqrt((YY-SP.dot(X,X.T)).diagonal().mean())])
		# gp hyper params
		params = limix.CGPHyperParams()
		if self.interaction:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F'),SP.ones(1),b])
		else:
			params['covar'] = SP.concatenate([a,X.reshape(self.N*self.k,order='F')])
		params['lik'] = c
		return params
开发者ID:Shicheng-Guo,项目名称:scLVM,代码行数:27,代码来源:gp_clvm.py


示例9: fit

    def fit(self, data):
        """ Fit VAR model to data.
        
        Parameters
        ----------
        data : array-like, shape = [n_samples, n_channels, n_trials] or [n_samples, n_channels]
            Continuous or segmented data set.
            
        Returns
        -------
        self : :class:`VAR`
            The :class:`VAR` object to facilitate method chaining (see usage example)
        """
        data = sp.atleast_3d(data)

        if self.delta == 0 or self.delta is None:
            # ordinary least squares
            (x, y) = self._construct_eqns(data)
        else:
            # regularized least squares (ridge regression)
            (x, y) = self._construct_eqns_rls(data)

        (b, res, rank, s) = sp.linalg.lstsq(x, y)

        self.coef = b.transpose()

        self.residuals = data - self.predict(data)
        self.rescov = sp.cov(cat_trials(self.residuals), rowvar=False)

        return self
开发者ID:BioinformaticsArchive,项目名称:SCoT,代码行数:30,代码来源:var.py


示例10: _init_params

    def _init_params(self, X):
        init = self.init
        n_samples, n_features = X.shape
        n_components = self.n_components

        if (init == 'kmeans'):
            km = Kmeans(n_components)
            clusters, mean, cov = km.cluster(X)
            coef = sp.array([c.shape[0] / n_samples for c in clusters])
            comps = [multivariate_normal(mean[i], cov[i], allow_singular=True)
                     for i in range(n_components)]
        elif (init == 'rand'):
            coef = sp.absolute(sprand.randn(n_components))
            coef = coef / coef.sum()
            means = X[sprand.permutation(n_samples)[0: n_components]]
            clusters = [[] for i in range(n_components)]
            for x in X:
                idx = sp.argmin([spla.norm(x - mean) for mean in means])
                clusters[idx].append(x)

            comps = []
            for k in range(n_components):
                mean = means[k]
                cov = sp.cov(clusters[k], rowvar=0, ddof=0)
                comps.append(multivariate_normal(mean, cov, allow_singular=True))

        self.coef = coef
        self.comps = comps
开发者ID:Yevgnen,项目名称:prml,代码行数:28,代码来源:mixture_model.py


示例11: cluster

    def cluster(self, X):
        self.fit(X)

        cluster = [X[sp.argmax(self.responsibility, axis=1) == k] for k in range(self.n_classes)]
        mean = self.center
        cov = [sp.cov(c, rowvar=0, ddof=0) for c in cluster]

        return cluster, mean, cov
开发者ID:Yevgnen,项目名称:prml,代码行数:8,代码来源:mixture_model.py


示例12: getEmpTraitCovar

 def getEmpTraitCovar(self):
     """
     Returns the empirical trait covariance matrix
     """
     if self.P==1:
         out=self.Y[self.Iok].var()
     else:
         out=SP.cov(self.Y[self.Iok].T)
     return out
开发者ID:PMBio,项目名称:limix,代码行数:9,代码来源:varianceDecompositionOld.py


示例13: fit

    def fit(self, X):
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.mean = X.mean(axis=0)

        return sp.dot(X, eigvecs)
开发者ID:Yevgnen,项目名称:prml,代码行数:9,代码来源:pca.py


示例14: _initParams

 def _initParams(self,init_method=None):
     """ this function initializes the paramenter and Ifilter """
     if self.P==1:
         if self.bgRE:
             params0 = {'Cg':SP.sqrt(0.5)*SP.ones(1),'Cn':SP.sqrt(0.5)*SP.ones(1)}
             Ifilter = None
         else:
             params0 = {'Cr':1e-9*SP.ones(1),'Cn':SP.ones(1)}
             Ifilter = {'Cr':SP.zeros(1,dtype=bool),'Cn':SP.ones(1,dtype=bool)}
     else:
         if self.bgRE:
             if self.colCovarType=='freeform':
                 if init_method=='pairwise':
                     _RV = fitPairwiseModel(self.Y,XX=self.XX,S_XX=self.S_XX,U_XX=self.U_XX,verbose=False)
                     params0 = {'Cg':_RV['params0_Cg'],'Cn':_RV['params0_Cn']}
                 elif init_method=='random':
                     params0 = {'Cg':SP.randn(self.Cg.getNumberParams()),'Cn':SP.randn(self.Cn.getNumberParams())}
                 else:
                     cov = 0.5*SP.cov(self.Y.T)+1e-4*SP.eye(self.P)
                     chol = LA.cholesky(cov,lower=True)
                     params = chol[SP.tril_indices(self.P)]
                     params0 = {'Cg':params.copy(),'Cn':params.copy()}
             Ifilter = None
         else:
             if self.colCovarType=='freeform':
                 cov = SP.cov(self.Y.T)+1e-4*SP.eye(self.P)
                 chol = LA.cholesky(cov,lower=True)
                 params = chol[SP.tril_indices(self.P)]
             #else:
             #    S,U=LA.eigh(cov)
             #    a = SP.sqrt(S[-self.rank_r:])[:,SP.newaxis]*U[:,-self.rank_r:]
             #    if self.colCovarType=='lowrank_id':
             #        c = SP.sqrt(S[:-self.rank_r].mean())*SP.ones(1)
             #    else:
             #        c = SP.sqrt(S[:-self.rank_r].mean())*SP.ones(self.P)
             #    params0_Cn = SP.concatenate([a.T.ravel(),c])
             params0 = {'Cr':1e-9*SP.ones(self.P),'Cn':params}
             Ifilter = {'Cr':SP.zeros(self.P,dtype=bool),
                         'Cn':SP.ones(params.shape[0],dtype=bool)}
     if self.mean.F is not None and self.bgRE:
         params0['mean'] = 1e-6*SP.randn(self.mean.getParams().shape[0])
         if Ifilter is not None:
             Ifilter['mean'] = SP.ones(self.mean.getParams().shape[0],dtype=bool)
     return params0,Ifilter
开发者ID:PMBio,项目名称:mtSet,代码行数:44,代码来源:multiTraitSetTest.py


示例15: infer_full_post

 def infer_full_post(self,X_i,D_i):
     class MJMError(Exception):
         pass
     [m,V] = self.infer_full(X_i,D_i)
     ns=X_i.shape[0]
     cv = sp.zeros([ns,ns])
     for i in xrange(self.size):
         cv+=V[ns*i:ns*(i+1),:]
     cv= cv/self.size + sp.cov(m,rowvar=0,bias=1)
     return [sp.mean(m,axis=0).reshape([1,ns]),cv]
开发者ID:markm541374,项目名称:GPc,代码行数:10,代码来源:GPdc.py


示例16: randomized

  def randomized(cls, degree, dim, scale):
    mixcoeffs = scipy.random.random(degree)
    mixcoeffs /= mixcoeffs.sum()

    means = scipy.random.standard_normal((degree, dim)) * scale

    # Generate random covariances by generating random data.
    randomdata = (scipy.random.standard_normal((dim, 10)) * scale
                     for _ in xrange(degree))
    covs = [scipy.cov(i) for i in randomdata]
    return cls(mixcoeffs, means, covs)
开发者ID:bayerj,项目名称:theano-mog,代码行数:11,代码来源:gaussianmixture.py


示例17: setUp

    def setUp(self):
        np.random.seed(1)

        # define phenotype
        N = 200
        P = 2
        Y = sp.randn(N,P)
        # define row caoriance
        f = 10
        G = 1.*(sp.rand(N, f)<0.2)
        X = 1.*(sp.rand(N, f)<0.2)
        R = covar_rescale(sp.dot(X,X.T))
        R+= 1e-4 * sp.eye(N)
        # define col covariances
        Cg = FreeFormCov(P)
        self._Cg = Cg
        Cn = FreeFormCov(P)
        Cg.setCovariance(0.5 * sp.cov(Y.T))
        Cn.setCovariance(0.5 * sp.cov(Y.T))
        # define gp
        self.gp = GP3KronSumLR(Y = Y, Cg = Cg, Cn = Cn, R = R, G = G, rank = 1)
开发者ID:PMBio,项目名称:limix,代码行数:21,代码来源:test_gp3kronSumLR.py


示例18: ex15

def ex15(exclude=sc.array([1,2,3,4]),plotfilename='ex15.png',
		 bovyprintargs={}):
    """ex15: solve exercise 15
    Input:
       exclude        - ID numbers to exclude from the analysis
       plotfilename   - filename for the output plot
    Output:
       plot
    History:
       2010-05-07 - Written - Bovy (NYU)
    """
    #Read the data
    data= read_data('data_allerr.dat',allerr=True)
    ndata= len(data)
    nsample= ndata- len(exclude)
    #Put the dat in the appropriate arrays and matrices
    Y= sc.zeros(nsample)
    X= sc.zeros(nsample)
    Z= sc.zeros((nsample,2))
    jj= 0
    for ii in range(ndata):
        if sc.any(exclude == data[ii][0]):
            pass
        else:
            Y[jj]= data[ii][1][1]
            X[jj]= data[ii][1][0]
            Z[jj,0]= X[jj]
            Z[jj,1]= Y[jj]
            jj= jj+1
    #Now compute the PCA solution
    Zm= sc.mean(Z,axis=0)
    Q= sc.cov(Z.T)
    eigs= linalg.eig(Q)
    maxindx= sc.argmax(eigs[0])
    V= eigs[1][maxindx]
    V= V/linalg.norm(V)

    m= sc.sqrt(1/V[0]**2.-1)
    bestfit= sc.array([-m*Zm[0]+Zm[1],m])

    #Plot result
    plot.bovy_print(**bovyprintargs)
    xrange=[0,300]
    yrange=[0,700]
    plot.bovy_plot(sc.array(xrange),bestfit[1]*sc.array(xrange)+bestfit[0],
                   'k--',xrange=xrange,yrange=yrange,
                   xlabel=r'$x$',ylabel=r'$y$',zorder=2)
    plot.bovy_plot(X,Y,marker='o',color='k',linestyle='None',
                   zorder=0,overplot=True)
 
    plot.bovy_text(r'$y = %4.2f \,x %4.0f' % (bestfit[1], bestfit[0])+r'$',
                   bottom_right=True)
    plot.bovy_end_print(plotfilename)
开发者ID:alessandro-gentilini,项目名称:DataAnalysisRecipes,代码行数:53,代码来源:ex15.py


示例19: simulate

    def simulate(self, l, noisefunc=None, random_state=None):
        """Simulate vector autoregressive (VAR) model.

        This function generates data from the VAR model.

        Parameters
        ----------
        l : int or [int, int]
            Number of samples to generate. Can be a tuple or list, where l[0]
            is the number of samples and l[1] is the number of trials.
        noisefunc : func, optional
            This function is used to create the generating noise process. If
            set to None, Gaussian white noise with zero mean and unit variance
            is used.

        Returns
        -------
        data : array, shape (n_trials, n_samples, n_channels)
            Generated data.
        """
        m, n = np.shape(self.coef)
        p = n // m

        try:
            l, t = l
        except TypeError:
            t = 1

        if noisefunc is None:
            rng = check_random_state(random_state)
            noisefunc = lambda: rng.normal(size=(1, m))

        n = l + 10 * p

        y = np.zeros((n, m, t))
        res = np.zeros((n, m, t))

        for s in range(t):
            for i in range(p):
                e = noisefunc()
                res[i, :, s] = e
                y[i, :, s] = e
            for i in range(p, n):
                e = noisefunc()
                res[i, :, s] = e
                y[i, :, s] = e
                for k in range(1, p + 1):
                    y[i, :, s] += self.coef[:, (k - 1)::p].dot(y[i - k, :, s])

        self.residuals = res[10 * p:, :, :].T
        self.rescov = sp.cov(cat_trials(self.residuals).T, rowvar=False)

        return y[10 * p:, :, :].transpose([2, 1, 0])
开发者ID:cbrnr,项目名称:scot,代码行数:53,代码来源:varbase.py


示例20: stats

            self.mapping[indexes[i]] = finalbeta[i]
        return self.mapping

    def stats(self, startdate, enddate, mktbasket, output = False):
        """
        Calculates statistics for a fund over a period.
        
        Parameters
        ----------
        startdate : datetime
            beginning of statistic period
        enddate : datetime
            end of statistic period
        mktbasket : dict
            dictionary of market streams
        output : bool
            if True, output results to db
        
        Returns
        -------
        stats : dict
            dictionary of statistics
        """
        inputmatrix, fundreturns, indexes, daterange = self.align(startdate, enddate, mktbasket)
        if self.mapping and not(inputmatrix is None):
            weights = scipy.array([self.mapping[mykey] if mykey in self.mapping else 0.0 for mykey in mktbasket.keys()])
            projected = scipy.dot(inputmatrix,weights.reshape(len(indexes),1)).flatten()
            actual = fundreturns.flatten()
            diff = actual-projected
            outdata = {
                     'TE'     : scipy.std(diff)*100.0*100.0,
                     'BETA'   : scipy.cov(projected,actual)[1,0]/scipy.var(projected),
                     'ALPHA'  : (scipy.product(diff+1.0))**(1.0/diff.size)-1.0,
                     'VOL'    : scipy.std(actual)*scipy.sqrt(252.0),
                     'PROJ'   : scipy.product(1.0+projected)-1.0,
                     'ACT'    : scipy.product(1.0+actual)-1.0,
                     'R2'     : 0.0 if scipy.all(actual==0.0) else scipy.corrcoef(projected,actual)[1,0]**2.0,
                     'AV'     : self.av(startdate),
                     'DELTA'  : self.deltaestimate(startdate)
                    }
            outdata['DIFF'] = outdata['ACT']-outdata['PROJ']
            outdata['PL'] = outdata['DELTA']*outdata['DIFF']*100.0 
            if output:
                cnxn = pyodbc.connect(ORACLESTRING)
                cursor = cnxn.cursor()
                sql = 'INSERT INTO FUNDOUTPUT VALUES ({0!s},{1!s},{2!s},{3!s},{4!s},{5!s},{6},{7},{8!s},{9!s},{10!s},{11!s},{12!s},{13!s});'
                sql = sql.format(self.fundcode,outdata['PROJ'],outdata['ACT'],outdata['DIFF'],
                           outdata['DELTA'],outdata['PL'],oracledatebuilder(startdate),
                           oracledatebuilder(enddate),outdata['TE'],outdata['R2'],outdata['BETA'],
                           outdata['ALPHA'],outdata['VOL'],outdata['AV'])
                cursor.execute(sql)
                cnxn.commit()            
                cnxn.close()
开发者ID:deppyboy,项目名称:Regression,代码行数:53,代码来源:fund.py



注:本文中的scipy.cov函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.cumsum函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.cos函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap