• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.var函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.var函数的典型用法代码示例。如果您正苦于以下问题:Python var函数的具体用法?Python var怎么用?Python var使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了var函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _call

    def _call(self, dataset):
        """Computes featurewise scores."""

        attrdata = dataset.sa[self.__attr].value
        if np.issubdtype(attrdata.dtype, 'c'):
            raise ValueError("Correlation coefficent measure is not meaningful "
                             "for datasets with literal labels.")

        samples = dataset.samples
        pvalue_index = self.__pvalue
        result = np.empty((dataset.nfeatures,), dtype=float)

        for ifeature in xrange(dataset.nfeatures):
            samples_ = samples[:, ifeature]
            corr = pearsonr(samples_, attrdata)
            corrv = corr[pvalue_index]
            # Should be safe to assume 0 corr_coef (or 1 pvalue) if value
            # is actually NaN, although it might not be the case (covar of
            # 2 constants would be NaN although should be 1)
            if np.isnan(corrv):
                if np.var(samples_) == 0.0 and np.var(attrdata) == 0.0 \
                   and len(samples_):
                    # constant terms
                    corrv = 1.0 - pvalue_index
                else:
                    corrv = pvalue_index
            result[ifeature] = corrv

        return Dataset(result[np.newaxis])
开发者ID:arnaudsj,项目名称:PyMVPA,代码行数:29,代码来源:corrcoef.py


示例2: r2_score

def r2_score(y_true, y_pred, round_to=2):
    R"""R-squared for Bayesian regression models. Only valid for linear models.
    http://www.stat.columbia.edu/%7Egelman/research/unpublished/bayes_R2.pdf

    Parameters
    ----------
    y_true: : array-like of shape = (n_samples) or (n_samples, n_outputs)
        Ground truth (correct) target values.
    y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
        Estimated target values.
    round_to : int
        Number of decimals used to round results (default 2).

    Returns
    -------
    `namedtuple` with the following elements:
    R2_median: median of the Bayesian R2
    R2_mean: mean of the Bayesian R2
    R2_std: standard deviation of the Bayesian R2
    """
    dimension = None
    if y_true.ndim > 1:
        dimension = 1

    var_y_est = np.var(y_pred, axis=dimension)
    var_e = np.var(y_true - y_pred, axis=dimension)

    r2 = var_y_est / (var_y_est + var_e)
    r2_median = np.around(np.median(r2), round_to)
    r2_mean = np.around(np.mean(r2), round_to)
    r2_std = np.around(np.std(r2), round_to)
    r2_r = namedtuple('r2_r', 'r2_median, r2_mean, r2_std')
    return r2_r(r2_median, r2_mean, r2_std)
开发者ID:zaxtax,项目名称:pymc3,代码行数:33,代码来源:stats.py


示例3: average_data

def average_data(data):
    """
    Find mean and std. deviation of data returned by ``simulate``.
    """
    numnodes = data['nodes']
    its = data['its']
    its_mean = numpy.average(its)
    its_std = math.sqrt(numpy.var(its))
    dead = data['dead']
    dead_mean = 100.0*numpy.average(dead)/numnodes
    dead_std = 100.0*math.sqrt(numpy.var(dead))/numnodes
    immune = data['immune']
    immune_mean = 100.0*numpy.average(immune)/numnodes
    immune_std = 100.0*math.sqrt(numpy.var(immune))/numnodes
    max_contam = data['max_contam']
    max_contam_mean = 100.0*numpy.average(max_contam)/numnodes
    max_contam_std = 100.0*math.sqrt(numpy.var(max_contam))/numnodes
    normal = data['normal']
    normal_mean = 100.0*numpy.average(normal)/numnodes
    normal_std = 100.0*math.sqrt(numpy.var(normal))/numnodes
    return {'its': (its_mean, its_std),
            'nodes': numnodes,
            'dead': (dead_mean, dead_std),
            'immune': (immune_mean, immune_std),
            'max_contam': (max_contam_mean, max_contam_std),
            'normal': (normal_mean, normal_std)}
开发者ID:3lectrologos,项目名称:sna,代码行数:26,代码来源:diffuse.py


示例4: _get_likelihood

    def _get_likelihood(self, model):
        """Compute the marginal likelihood of the linear model with a g-prior on betas.

        Parameters
        ----------
        model : np.ndarray in R^ndim
            vector of variable inclusion indicators

        Returns
        -------
        float
            log marginal likelihood
        """

        X = self.X[:, model == 1]
        y = self.y
        nobs, ndim = X.shape
        design = np.hstack((np.ones((nobs, 1)), X))

        mle = np.linalg.solve(np.dot(design.T, design), np.dot(design.T, y))
        residuals = y - np.dot(design, mle)
        rsquared = 1 - np.var(residuals) / np.var(y)

        return (log_gamma((nobs - 1) / 2)
            - (nobs - 1) / 2 * np.log(np.pi)
            - 0.5 * np.log(nobs)
            - (nobs - 1) / 2 * np.log(np.dot(residuals, residuals))
            + (nobs - ndim - 1) / 2 * np.log(1 + self.par["penalty"])
            - (nobs - 1) / 2 * np.log(1 + self.par["penalty"] * (1 - rsquared)))
开发者ID:martinaragoneses,项目名称:bma,代码行数:29,代码来源:linear_averaging.py


示例5: test_bernoulli_extract

    def test_bernoulli_extract(self):
        fit = self.fit
        extr = fit.extract(permuted=True)
        assert -7.4 < np.mean(extr['lp__']) < -7.0
        assert 0.1 < np.mean(extr['theta']) < 0.4
        assert 0.01 < np.var(extr['theta']) < 0.02

        # use __getitem__
        assert -7.4 < np.mean(fit['lp__']) < -7.0
        assert 0.1 < np.mean(fit['theta']) < 0.4
        assert 0.01 < np.var(fit['theta']) < 0.02

        # permuted=False
        extr = fit.extract(permuted=False)
        self.assertEqual(extr.shape, (1000, 4, 2))
        self.assertTrue(0.1 < np.mean(extr[:, 0, 0]) < 0.4)

        # permuted=True
        extr = fit.extract('lp__', permuted=True)
        assert -7.4 < np.mean(extr['lp__']) < -7.0
        extr = fit.extract('theta', permuted=True)
        assert 0.1 < np.mean(extr['theta']) < 0.4
        assert 0.01 < np.var(extr['theta']) < 0.02
        extr = fit.extract('theta', permuted=False)
        assert extr.shape == (1000, 4, 2)
        assert 0.1 < np.mean(extr[:, 0, 0]) < 0.4
开发者ID:Aleyasen,项目名称:pystan,代码行数:26,代码来源:test_basic.py


示例6: bhattacharyya_dist

def bhattacharyya_dist (X, y):

    classes = np.unique(y)
    n_class = len(classes)
    n_feats = X.shape[1]

    b = np.zeros(n_feats)
    for i in np.arange(n_class):
        for j in np.arange(i+1, n_class):
            if j > i:
                xi = X[y == i, :]
                xj = X[y == j, :]

                mi = np.mean (xi, axis=0)
                mj = np.mean (xj, axis=0)

                vi = np.var  (xi, axis=0)
                vj = np.var  (xj, axis=0)

                si = np.std  (xi, axis=0)
                sj = np.std  (xj, axis=0)

                d  = 0.25 * (np.square(mi - mj) / (vi + vj)) + 0.5  * (np.log((vi + vj) / (2*si*sj)))
                d[np.isnan(d)] = 0
                d[np.isinf(d)] = 0

                b = np.maximum(b, d)

    return b
开发者ID:borjaayerdi,项目名称:oasis_feets,代码行数:29,代码来源:do_classification.py


示例7: curv_fit

def curv_fit(x=None, y=None, model=None):
    
    x = np.array(x)    
    y = np.array(y)    
    params = lmfit.Parameters()
    
    if model == 'gaussian':
        mod = lmfit.models.GaussianModel()
        params = mod.guess(y, x=x)
        out = mod.fit(y,params, x=x)
        r_sq = 1 - out.residual.var()/np.var(y)
        
    elif model == '4PL':
        mod = lmfit.Model(logistic_4p)
        params.add('la', value=1.0)
        params.add('gr', value=120.0, vary=False)
        params.add('ce', value=150.0)
        params.add('ua', value=3.0)
        out = mod.fit(y, params,x=x)
        r_sq = 1 - out.residual.var()/np.var(y)
        
    elif model == '5PL':
        mod = lmfit.Model(logistic_5p)
        params.add('la', value=1.0)
        params.add('gr', value=1.0)
        params.add('ce', value=1.0)
        params.add('ua', value=1.0)
        params.add('sy', value=1.0)
        out = mod.fit(y, params, x=x)
        r_sq = 1 - out.residual.var()/np.var(y)
    
    out.R_sq = r_sq
    return out
开发者ID:rvalenzuelar,项目名称:tta_climatology,代码行数:33,代码来源:curve_fitting.py


示例8: AsianCallSimPrice

def AsianCallSimPrice(S0, K, T, r, sigma, M, I, CV=False):
    dt = T / M
    S = np.zeros((M + 1, I))
    z = np.random.standard_normal((M + 1, I))  # pseudorandom numbers
    Savg = np.zeros(I)
    S[0] = S0
    S = S0 * np.exp(np.cumsum((r - 0.5 * sigma ** 2) * dt + sigma * np.sqrt(dt) * z, axis=0))
    Savg = np.average(S, axis=0)
    if CV == False:
        price = np.exp(-r * T) * np.sum(np.maximum(Savg - K, 0)) / I
        error = math.sqrt(np.var(np.maximum(Savg - K, 0))) / math.sqrt(I)
        result = (price, error)
    else:
        Tvector = np.arange(dt, T + dt, dt)
        T_avg = Tvector.mean()
        i_vector = np.arange(1, 2 * M + 1, 2)
        sigma_avg = math.sqrt(sigma ** 2 / (M ** 2 * T_avg) * np.dot(i_vector, Tvector[::-1]))
        delta = 0.5 * (sigma ** 2 - sigma_avg ** 2)
        d = (math.log(S0 / K) + (r - delta + 0.5 * sigma_avg ** 2) * T_avg) / (sigma_avg * math.sqrt(T_avg))
        GeomAsianCall = np.exp(-delta * T_avg) * S0 * scipy.stats.norm.cdf(d) - np.exp(
            -r * T_avg
        ) * K * scipy.stats.norm.cdf(d - sigma_avg * math.sqrt(T_avg))
        S_CV = scipy.stats.mstats.gmean(S, axis=0)
        X = np.exp(-r * T) * np.maximum(S_CV - K, 0)
        Y = np.exp(-r * T) * np.maximum(Savg - K, 0)
        b = np.cov(X, Y)[0][1] / X.var()
        price = Y.mean() - b * (X.mean() - GeomAsianCall)
        error = math.sqrt(np.var(Y - b * X)) / math.sqrt(I)
        rho = np.corrcoef(X, Y)[0][1]
        result = (price, error, rho)
    return result
开发者ID:ikromanov,项目名称:python_for_finance,代码行数:31,代码来源:bsm_mcs_asian.py


示例9: calc_com

def calc_com(mask):
    pts = index_to_zyx( mask )

    z = pts[0,:].astype(float).mean()
    # Correct Center of Mass for reentrant domain
    y1 = pts[1,:].astype(float)
    x1 = pts[2,:].astype(float)
    y2 = (y1 < ny/2.)*y1 + (y1>= ny/2.)*(y1 - ny)
    x2 = (x1 < nx/2.)*x1 + (x1>= nx/2.)*(x1 - nx)
    y1m = y1.mean()
    y2m = y2.mean()
    x1m = x1.mean()
    x2m = x2.mean()
    
    if numpy.var(y2 - y2m) > numpy.var(y1 - y1m):
        y = y1m
    else:
        y = (y2m + .5)%ny - .5
        
    if numpy.var(x2 - x2m) > numpy.var(x1 - x1m):
        x = x1m
    else:
        x = (x2m + .5)%nx - .5
        
    return numpy.array((z, y, x))
开发者ID:phaustin,项目名称:cloud_tracker,代码行数:25,代码来源:model_param.py


示例10: log_evidence

def log_evidence(X, y, g):
    """Compute the model's log evidence (a.k.a. marginal likelihood).

    Parameters
    ----------
    X : np.ndarray in R^(nobs x ndim)
        feature matrix
    y : np.ndarray in R^nobs
        target vector
    g : float (0, inf)
        dimensionality penalty

    Returns
    -------
    float
        log evidence
    """

    n, d = X.shape
    X_int = np.hstack((np.ones((n, 1)), X))

    mle = np.linalg.solve(np.dot(X_int.T, X_int), np.dot(X_int.T, y))
    resid = y - np.dot(X_int, mle)
    rsq = (d > 0 and 1 - np.var(resid) / np.var(y)) or 0

    return (log_gamma((n - 1) / 2)
        - (n - 1) / 2 * np.log(np.pi)
        - 0.5 * np.log(n)
        - (n - 1) / 2 * np.log(np.dot(resid, resid))
        + (n - d - 1) / 2 * np.log(1 + 1 / g)
        - (n - 1) / 2 * np.log(1 + 1 / g * (1 - rsq)))
开发者ID:timsf,项目名称:bma,代码行数:31,代码来源:linear_regression.py


示例11: main

def main():
    images, labels = load_labeled_training(flatten=True)
    images = standardize(images)
    unl = load_unlabeled_training(flatten=True)
    unl = standardize(unl)
    test = load_public_test(flatten=True)
    test = standardize(test)
    shuffle_in_unison(images, labels)
    #d = DictionaryLearning().fit(images)
    d = MiniBatchDictionaryLearning(n_components=500, n_iter=500, verbose=True).fit(images)
    s = SparseCoder(d.components_)
    proj_test = s.transform(images)
    pt = s.transform(test)
    #kpca = KernelPCA(kernel="rbf")
    #kpca.fit(unl)
    #test_proj = kpca.transform(images)
    #pt = kpca.transform(test)
    #spca = SparsePCA().fit(unl)
    #test_proj = spca.transform(images)
    #pt = spca.transform(test)
    svc = SVC()
    scores = cross_validation.cross_val_score(svc, proj_test, labels, cv=10)
    print scores
    print np.mean(scores)
    print np.var(scores)
    svc.fit(proj_test, labels)
    pred = svc.predict(pt)
    write_results(pred, '../svm_res.csv')
开发者ID:deepxkn,项目名称:facial-expression-recognition-1,代码行数:28,代码来源:pca_sparse_svm.py


示例12: classify_2d

def classify_2d(data_a, data_b, x):
    x1 = x[0]
    x2 = x[1]

    probability_a = data_a.shape[1] / (data_a.shape[1] + data_b.shape[1])
    probability_b = data_b.shape[1] / (data_a.shape[1] + data_b.shape[1])

    mean_x1_a = np.mean(data_a[0,:])
    mean_x2_a = np.mean(data_a[1,:])

    mean_x1_b = np.mean(data_b[0,:])
    mean_x2_b = np.mean(data_b[1,:])

    variance_x1_a = np.var(data_a[0,:])
    variance_x2_a = np.var(data_a[1,:])

    variance_x1_b = np.var(data_b[0,:])
    variance_x2_b = np.var(data_b[1,:])

    pd_x1_given_a = mlab.normpdf(x1, mean_x1_a, variance_x1_a)
    pd_x2_given_a = mlab.normpdf(x2, mean_x2_a, variance_x2_a)
    pd_x1_given_b = mlab.normpdf(x1, mean_x1_b, variance_x1_b)
    pd_x2_given_b = mlab.normpdf(x2, mean_x2_b, variance_x2_b)

    posterior_numerator_a = probability_a * pd_x1_given_a * pd_x2_given_a
    posterior_numerator_b = probability_b * pd_x1_given_b * pd_x2_given_b

    posterior_numerators = { 'A': posterior_numerator_a, 'B': posterior_numerator_b }

    return max(posterior_numerators.iterkeys(), key=(lambda k: posterior_numerators[k]))
开发者ID:thomasbrus,项目名称:machine-learning,代码行数:30,代码来源:assignment-7_2a.py


示例13: findvdisp3

	def findvdisp3(self,r,v,mags,r200,maxv):
		"use red sequence to find members"
		binedge = np.arange(0,r200+1,0.3)
		rin = r
		vin = v
		colin = mags.T[1] - mags.T[2]
		avg_c = np.average(colin)
		vfinal = np.array([])
		for i in range(binedge.size-1):
			i += 1
			x = rin[np.where((rin>binedge[i-1]) & (rin<binedge[i]))]
			y = vin[np.where((rin>binedge[i-1]) & (rin<binedge[i]))]
			c = colin[np.where((rin>binedge[i-1]) & (rin<binedge[i]))]
			for k in range(6):
				y2 = y
				x2 = x
				c2 = c
				stv = 3.5 * np.std(y2)
				y = y2[np.where((y2 > -stv) & (y2 < stv) | ((c2<avg_c+0.04) & (c2>avg_c-0.04)))]
				x = x2[np.where((y2 > -stv) & (y2 < stv) | ((c2<avg_c+0.04) & (c2>avg_c-0.04)))]
				c = c2[np.where((y2 > -stv) & (y2 < stv) | ((c2<avg_c+0.04) & (c2>avg_c-0.04)))]
			vstd2 = np.std(y)
			vvar2 = np.var(y)
			print 'standard dev of zone %i = %f' % (i,vstd2)
			vfinal = np.append(y[np.where((y<vvar2) & (y>-vvar2))],vfinal)
		return np.var(vfinal)
开发者ID:nkern,项目名称:Caustic,代码行数:26,代码来源:flux_caustics_ideal.py


示例14: _tTest

    def _tTest(x, y, exclude=95):
        """Compute a one-sided Welsh t-statistic."""
        with np.errstate(all="ignore"):
            def cappedSlog(v):
                q = np.percentile(v, exclude)
                v2 = v.copy()
                v2 = v2[~np.isnan(v2)]
                v2[v2 > q] = q
                v2[v2 <= 0] = 1. / (75 + 1)
                return np.log(v2)
            x1 = cappedSlog(x)
            x2 = cappedSlog(y)
            sx1 = np.var(x1) / len(x1)
            sx2 = np.var(x2) / len(x2)
            totalSE = np.sqrt(sx1 + sx2)
            if totalSE == 0:
                stat = 0
            else:
                stat = (np.mean(x1) - np.mean(x2)) / totalSE

            #df   = (sx1 + sx2)**2 / (sx1**2/(len(x1)-1) + sx2**2/(len(x2) - 1))
            #pval = 1 - scidist.t.cdf(stat, df)

            # Scipy's t distribution CDF implementaton has inadequate
            # precision.  We have switched to the normal distribution for
            # better behaved p values.
            pval = 0.5 * erfc(stat / sqrt(2))

            return {'testStatistic': stat, 'pvalue': pval}
开发者ID:PacificBiosciences,项目名称:kineticsTools,代码行数:29,代码来源:KineticWorker.py


示例15: calc_twosample_ts

def calc_twosample_ts(propGroup1, propGroup2):
    n1 = len(propGroup1[0])
    n2 = len(propGroup2[0])
    numFeatures = len(propGroup1)

    T_statistics = []
    effectSizes = []
    notes = []
    for r in xrange(0, numFeatures):
        meanG1 = float(sum(propGroup1[r])) / n1
        varG1 = var(propGroup1[r], ddof=1)
        stdErrG1 = varG1 / n1

        meanG2 = float(sum(propGroup2[r])) / n2
        varG2 = var(propGroup2[r], ddof=1)
        stdErrG2 = varG2 / n2

        dp = meanG1 - meanG2
        effectSizes.append(dp * 100)

        denom = math.sqrt(stdErrG1 + stdErrG2)

        if denom == 0:
            notes.append("degenerate case: zero variance for both groups; variance set to 1e-6.")
            T_statistics.append(dp / 1e-6)
        else:
            notes.append("")
            T_statistics.append(dp / denom)

    return T_statistics, effectSizes, notes
开发者ID:jnesme,项目名称:STAMP,代码行数:30,代码来源:White.py


示例16: XDapogee

def XDapogee(options,args):
    #First load the chains
    savefile= open(args[0],'rb')
    thesesamples= pickle.load(savefile)
    savefile.close()
    vcs= numpy.array([s[0] for s in thesesamples])*_APOGEEREFV0/_REFV0
    dvcdrs= numpy.array([s[6] for s in thesesamples])*30. #To be consistent with this project's dlnvcdlnr 
    print numpy.mean(vcs)
    print numpy.mean(dvcdrs)
    #Now fit XD to the 2D PDFs
    ydata= numpy.zeros((len(vcs),2))
    ycovar= numpy.zeros((len(vcs),2))
    ydata[:,0]= numpy.log(vcs)
    ydata[:,1]= dvcdrs
    vcxamp= numpy.ones(options.g)/options.g
    vcxmean= numpy.zeros((options.g,2))
    vcxcovar= numpy.zeros((options.g,2,2))
    for ii in range(options.g):
        vcxmean[ii,:]= numpy.mean(ydata,axis=0)+numpy.std(ydata,axis=0)*numpy.random.normal(size=(2))/4.
        vcxcovar[ii,0,0]= numpy.var(ydata[:,0])
        vcxcovar[ii,1,1]= numpy.var(ydata[:,1])
    extreme_deconvolution.extreme_deconvolution(ydata,ycovar,
                                                vcxamp,vcxmean,vcxcovar)
    save_pickles(options.plotfile,
                 vcxamp,vcxmean,vcxcovar)
    print vcxamp
    print vcxmean[:,0]
    print vcxmean[:,1]
    return None
开发者ID:jobovy,项目名称:segue-maps,代码行数:29,代码来源:XDapogee.py


示例17: explained_variance_score

def explained_variance_score(y_true, y_pred):
    """Explained variance regression score function

    Best possible score is 1.0, lower values are worse.

    Note: the explained variance is not a symmetric function.

    return the explained variance

    Parameters
    ----------
    y_true : array-like

    y_pred : array-like

    """
    y_true, y_pred = check_arrays(y_true, y_pred)
    numerator = np.var(y_true - y_pred)
    denominator = np.var(y_true)
    if denominator == 0.0:
        if numerator == 0.0:
            return 1.0
        else:
            # arbitary set to zero to avoid -inf scores, having a constant
            # y_true is not interesting for scoring a regression anyway
            return 0.0
    return 1 - numerator / denominator
开发者ID:buhrmann,项目名称:scikit-learn,代码行数:27,代码来源:metrics.py


示例18: calc_error

def calc_error(data):
    """
    Error estimation for time series of simulation observables and take into
    account that these series are to some kind degree correlated (which
    enhances the estimated statistical error).
    """
    # calculate the normalized autocorrelation function of data
    acf = autocorrelation(data)
    # calculate the integrated correlation time tau_int
    # (Janke, Wolfhard. "Statistical analysis of simulations: Data correlations
    # and error estimation." Quantum Simulations of Complex Many-Body Systems:
    # From Theory to Algorithms 10 (2002): 423-445.)
    tau_int = 0.5
    for i in range(len(acf)):
        tau_int += acf[i]
        if ( i >= 6 * tau_int ):
            break
    # mean value of the time series
    data_mean = np.mean(data)
    # calculate the so called effective length of the time series N_eff
    if (tau_int > 0.5):
        N_eff = len(data) / (2.0 * tau_int)
        # finally the error is sqrt(var(data)/N_eff)
        stat_err = np.sqrt(np.var(data) / N_eff)
    else:
        stat_err = np.sqrt(np.var(data) / len(data))
    return data_mean, stat_err
开发者ID:KaiSzuttor,项目名称:kaipy,代码行数:27,代码来源:statistic.py


示例19: welch_ttest

def welch_ttest (X, y):

    classes = np.unique(y)
    n_class = len(classes)
    n_feats = X.shape[1]

    b = np.zeros(n_feats)
    for i in np.arange(n_class):
        for j in np.arange(i+1, n_class):
            if j > i:
                xi = X[y == i, :]
                xj = X[y == j, :]
                yi = y[y == i]
                yj = y[y == j]

                mi = np.mean (xi, axis=0)
                mj = np.mean (xj, axis=0)

                vi = np.var  (xi, axis=0)
                vj = np.var  (xj, axis=0)

                n_subjsi = len(yi)
                n_subjsj = len(yj)

                t = (mi - mj) / np.sqrt((np.square(vi) / n_subjsi) + (np.square(vj) / n_subjsj))
                t[np.isnan(t)] = 0
                t[np.isinf(t)] = 0

                b = np.maximum(b, t)

    return b
开发者ID:borjaayerdi,项目名称:oasis_feets,代码行数:31,代码来源:do_classification.py


示例20: test_pairwise_distances_data_derived_params

def test_pairwise_distances_data_derived_params(n_jobs, metric, dist_function,
                                                y_is_x):
    # check that pairwise_distances give the same result in sequential and
    # parallel, when metric has data-derived parameters.
    with config_context(working_memory=1):  # to have more than 1 chunk
        rng = np.random.RandomState(0)
        X = rng.random_sample((1000, 10))

        if y_is_x:
            Y = X
            expected_dist_default_params = squareform(pdist(X, metric=metric))
            if metric == "seuclidean":
                params = {'V': np.var(X, axis=0, ddof=1)}
            else:
                params = {'VI': np.linalg.inv(np.cov(X.T)).T}
        else:
            Y = rng.random_sample((1000, 10))
            expected_dist_default_params = cdist(X, Y, metric=metric)
            if metric == "seuclidean":
                params = {'V': np.var(np.vstack([X, Y]), axis=0, ddof=1)}
            else:
                params = {'VI': np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T}

        expected_dist_explicit_params = cdist(X, Y, metric=metric, **params)
        dist = np.vstack(tuple(dist_function(X, Y,
                                             metric=metric, n_jobs=n_jobs)))

        assert_allclose(dist, expected_dist_explicit_params)
        assert_allclose(dist, expected_dist_default_params)
开发者ID:scikit-learn,项目名称:scikit-learn,代码行数:29,代码来源:test_pairwise.py



注:本文中的numpy.var函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.vdot函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.vander函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap