• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.nansum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.nansum函数的典型用法代码示例。如果您正苦于以下问题:Python nansum函数的具体用法?Python nansum怎么用?Python nansum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了nansum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: clean_weights

def clean_weights(weights,  must_haves=None, fraction=0.5):
    if must_haves is None:
        must_haves=[True]*len(weights)    
    if not any(must_haves):
        return [0.0]*len(weights)    
    needs_replacing=[(np.isnan(x) or x==0.0) and must_haves[i] for (i,x) in enumerate(weights)]
    keep_empty=[(np.isnan(x) or x==0.0) and not must_haves[i] for (i,x) in enumerate(weights)]
    no_replacement_needed=[(not keep_empty[i]) and (not needs_replacing[i]) for (i,x) in enumerate(weights)]
    if not any(needs_replacing):
        return weights    
    missing_weights=sum(needs_replacing)
    total_for_missing_weights=fraction*missing_weights/(
        float(np.nansum(no_replacement_needed)+np.nansum(missing_weights)))
    
    adjustment_on_rest=(1.0-total_for_missing_weights)    
    each_missing_weight=total_for_missing_weights/missing_weights    
    def _good_weight(value, idx, needs_replacing, keep_empty, 
                     each_missing_weight, adjustment_on_rest):        
        if needs_replacing[idx]:
            return each_missing_weight
        if keep_empty[idx]:
            return 0.0
        else:
            return value*adjustment_on_rest

    weights=[_good_weight(value, idx, needs_replacing, keep_empty, 
                          each_missing_weight, adjustment_on_rest) 
             for (idx, value) in enumerate(weights)]    
    xsum=sum(weights)
    weights=[x/xsum for x in weights]    
    return weights
开发者ID:caitouwh,项目名称:kod,代码行数:31,代码来源:tw2.py


示例2: logprob2dslope

def logprob2dslope(p,x,y,x_err,y_err):
    m,scatter = p[0],p[1]
    if scatter<0.0:
        return -np.inf
    sigma = (scatter+y_err**2+m**2*x_err**2)
    lp = -0.5*np.nansum((y-m*x)**2/sigma)-0.5*np.nansum(np.log(sigma))
    return lp
开发者ID:low-sky,项目名称:colira,代码行数:7,代码来源:lptest.py


示例3: test_nansum_with_boolean

 def test_nansum_with_boolean(self):
     # gh-2978
     a = np.zeros(2, dtype=bool)
     try:
         np.nansum(a)
     except Exception:
         raise AssertionError()
开发者ID:Horta,项目名称:numpy,代码行数:7,代码来源:test_regression.py


示例4: exp1computeLL

 def exp1computeLL(self,dat,f):
     T=20
     #initialize
     q=np.zeros(T+1); q[0]=self.q0
     u=np.zeros(T+1); u[0]=self.u0
     a=np.zeros((T+1,D));self.f=[]
     p=np.zeros((T+1,D));
     a[0,:]=np.ones(10)/3.0
     a[0,-1]=np.nan
     a[0,:3]*=q[0]
     a[0,3:6]*=(1-q[0])*u[0]
     a[0,6:]*=(1-q[0])*(1-u[0])
     phase=0
     LL=0
     #print a[0,:]
     for t in range(T):
         if t>10: phase=1
         else: phase=0
         p[t,:]=getProb(a[t,:],self.d)
         m=data2a[dat[t],:,phase]
         w=np.power(a[t,:],self.m)
         loglik= np.nansum(np.log(np.maximum(0.001,p[t,m==f[t]])))
         if f[t]==1:
             s=m*w
             a[t+1,:]= self.g*s/np.nansum(s) + (1-self.g)*a[t,:]
         else:
             s=(1-m)*w
             a[t+1,:]= self.h*s/np.nansum(s) + (1-self.h)*a[t,:]
         #print t,dat[t],f[t],np.nansum(p[t,m==f[t]]),loglik
         #print 'm= ',m
         #print 'p= ',p
         LL+=loglik
     return LL
开发者ID:simkovic,项目名称:toolbox,代码行数:33,代码来源:Model.py


示例5: crunchy3

def crunchy3(offset, eta, sec, sigma=None):
    powers = []
    powers_norm = []

    y_axis = sec.get_y_axis()
    x_axis = sec.get_x_axis()
    px_y = np.absolute(y_axis[1] - y_axis[0])
    px_x = np.absolute(x_axis[1] - x_axis[0])

    if sigma is None:
        sigma = [px_y, px_x]
    if sigma[0] < px_y:
        sigma = [px_y, sigma[1]]
    if sigma[1] < px_x:
        sigma = [sigma[0], px_x]

    for yi in range(len(y_axis)):
        y = y_axis[yi]
        for xi in range(len(x_axis)):
            x = x_axis[xi]
            y_eff = y + eta * offset ** 2
            x_eff = x - offset
            this_weight = weight_function3(eta, y, x, y_eff, x_eff, sigma)
            if this_weight is None:
                powers.append(None)
                powers_norm.append(None)
            else:
                variance = 1 / this_weight
                powers.append(sec.get([yi, xi]) / variance)
                powers_norm.append(1 / variance)
    p = np.nansum(list(filter(None, powers)))
    pn = np.nansum(list(filter(None, powers_norm)))
    return offset, p / pn
开发者ID:haukejung,项目名称:pulsarpkg,代码行数:33,代码来源:multiprocessing_helper_functions.py


示例6: Q_factor

def Q_factor(A, B):
    """Compute the "overlap" between the images A and B.
    """
    A_norm = np.nansum(A ** 2) ** 0.5
    B_norm = np.nansum(B ** 2) ** 0.5
    values = (A / A_norm) * (B / B_norm)
    return np.nansum(values)
开发者ID:mwcraig,项目名称:gammapy,代码行数:7,代码来源:overlap.py


示例7: bic

def bic(em_fit_result_dict, LL_all):
  '''
        Compute the Bayesian Information Criterion score
    '''

  # Number of parameters:
  # - mixt_target: Tnum
  # - mixt_random: Tnum
  # - mixt_nontargets: Tnum
  # - alpha: 1
  # - beta: 1

  # First count the Loglikelihood
  bic_tot = -2.*np.nansum(LL_all[np.isfinite(LL_all)])

  # Then count alpha, beta appropriately
  K = 2
  bic_tot += K*np.log(np.nansum(np.isfinite(LL_all)))

  # Finally, the mixture proportions per condition
  for T_i, T in enumerate(em_fit_result_dict['T_space']):
    K = 2 + int(T > 1)
    bic_tot += K*np.log(np.nansum(np.isfinite(LL_all[T_i])))

  return bic_tot
开发者ID:Azhag,项目名称:Bayesian-visual-working-memory,代码行数:25,代码来源:em_circularmixture_parametrickappa.py


示例8: nansum

 def nansum(self, axis=None, dtype=None, out=None):
     return UncertainQuantity(
         np.nansum(self.magnitude, axis, dtype, out),
         self.dimensionality,
         (np.nansum(self.uncertainty.magnitude**2, axis))**0.5,
         copy=False
     )
开发者ID:CatherineH,项目名称:python-quantities,代码行数:7,代码来源:uncertainquantity.py


示例9: likelihood

    def likelihood(self, a=None, b=None, s=None):
        """
        \sum_{i,j} [w_{ij}[y_{i,j} s_j(a_i + b_j)
                    - log(1 + exp(s_j(a_i + b_j)))]
        """
        if ((a is None) and (b is None) and (s is None)):
            a = np.array(self.a_est.values())
            b = np.array(self.b_est.values())

            if self.model is '2PL':
                s = np.array(self.s_est.values())

        c = a[self.obser['index_user']] + b[self.obser['index_item']]

        if (self.model is '2PL') and (s is not None):
            c = s[self.obser['index_item']] * c

        pos = self.data[self.response].values > 0

        # account for weights
        w = 1.0

        if self.wts is not None:
            w = _fc(self.data[self.wts])
            first_term = np.nansum(w[pos] * c[pos])
        else:
            first_term = np.nansum(c[pos])

        second_term = np.nansum(w * np.log(1 + np.exp(c)))

        return (first_term - second_term -
                self.alpha * np.sum(a*a) - self.alpha * np.sum(b*b))
开发者ID:vats-div,项目名称:rasch_model,代码行数:32,代码来源:RaschModel.py


示例10: effrad

def effrad(CL,inst,bindist='lin'):
    """ This method returns the effective radius for a given instrument for the entire cloud period. The radius is in the same units as the instrument's units (usually micrometers). Note that one might get the effective diameter if the instrument's size bins are diameters.
        example: CloudObj.effrad(inst='FSSP96',bindist='lin')
        bindist is 'lin' if the difference between bins is linearly distributed (FSSPs) and 'log' if they are logarythmically distributed (PCASP)"""
    # according to the formula in https://en.wikipedia.org/wiki/Cloud_drop_effective_radius latest access on Oct 2013.
    [pos,sd]=[[i,sd] for i,sd in enumerate(CL.sd) if sd["Distname"].lower() == inst.lower()][0]
    # building dr (dradius) vector
    R=sd["bins"]; t=len(R)
    b=np.zeros([t]);
    h=np.zeros([t]);
    if bindist=='lin':
        for i in range(1,t):
            b[i]=(R[i-1]+R[i])/2.;
        for i in range(0,t-1):
            h[i]=(R[i+1]+R[i])/2.;
        b[0]=R[0]-(R[1]-h[0]);
        h[t-1]=R[t-1]+(b[t-1]-R[t-2]);
        dR=h-b;
    elif bindist=='log':
        for i in range(1,t):
            b[i]=10**((np.log10(R[i-1])+np.log10(R[i]))/2.);
        for i in range(0,t-1):
            h[i]=10**((np.log10(R[i+1])+np.log10(R[i]))/2.);
        b[0]=10**(np.log10(R[0])+(log10(R[1])-log10(h[1])));
        h[t-1]=10**(np.log10(R[t-1])-(log10(b[t-2])-np.log10(R[t-2])));
        dR=h-b;
    else: print("[effrad] bindist option entry is neither 'lin' or 'log'.")
    # calculating the effective radius
    ER=np.nansum((sd["bins"]**3 *dR) * sd["data"].transpose(),axis=1)/np.nansum((sd["bins"]**2 *dR) * sd["data"].transpose(),axis=1)
    return ER
开发者ID:StephGagne,项目名称:SAMAC,代码行数:30,代码来源:effrad.py


示例11: LinearSolveAll

def LinearSolveAll():
    Dir=os.getcwd();
    DataDir=Dir + '/DataFormatted/';
    Locations=np.genfromtxt(DataDir+'SeismicLocations.csv');
    Locations[:,0]=Locations[:,0]-360;
    Density=np.genfromtxt(DataDir+'DenseAll.csv');
    Qs=np.genfromtxt(DataDir+'LongLatSurfaceHeat.csv',skip_header=1,delimiter=',');
    Qm=np.genfromtxt(DataDir+'MantleHeat.txt',skip_header=1,delimiter=',');
    QsInterp=Nearest2D(Qs[:,0:2],Qs[:,2]);
    QmInterp=Nearest2D(Qm[:,0:2],Qm[:,2]);
    
    Avocado=6.022e23; # mols to atoms conversion

    qs=QsInterp(Locations[:,0:2])/1000;
    qm=QmInterp(Locations[:,0:2])/1000;
    #Density[Density>3.1]=np.nan;
    
    Fels=(3-Density)/(0.3);
    Fels[Density<2.7]=1;
    Fels[Density>3]=0;
    years=365.24*24*60*60;#years to seconds conversion

    Depth=np.genfromtxt(DataDir+'Depth.csv');
    dz=(Depth[1]-Depth[0])*1000;
    
    UContentU=2.8e-6/238; #upper crust uranium mass fraction
    ThContentU=UContentU*3.8/232; #upper crust thorium mass fraction
    K40ContentU=2*120e-6*3.4e-2/94; #upper crust thorium mass fraction
    
    UContentL=0.2e-6/238; #mol/g of each cell
    ThContentL=1.2e-6/232;
    K40ContentL=2*120e-6*0.6e-2/94;
    
    alpha238=7.41e-12;#Joules/decay
    alpha235=7.24e-12;#Joules/decay
    alpha232=6.24e-12;#Joules/decay
    beta=1.14e-13; #Joules/decay
    
    LamU238 = np.log(2)/(4.468*1e9);#% decay rate of U in years
    LamTh232 = np.log(2)/(1.405e10); # decay rate of Th in years
    LamU235 = np.log(2)/(703800000); #decay rate of 235U in years
    LamK40=np.log(2)/1.248e9;#decay rate of K40 in years
    
    UraniumHeatL=alpha238*Avocado*UContentL*LamU238/years+alpha235*Avocado*UContentL*LamU235/years/137.88;
    ThoriumHeatL=alpha232*Avocado*ThContentL*LamTh232/years;
    KHeatL=beta*Avocado*K40ContentL*LamK40/years;
    TotalHeatL=UraniumHeatL+ThoriumHeatL+KHeatL; # W/gram
    
    UraniumHeatU=alpha238*Avocado*UContentU*LamU238/years+alpha235*Avocado*UContentU*LamU235/years/137.88;
    ThoriumHeatU=alpha232*Avocado*ThContentU*LamTh232/years;
    KHeatU=beta*Avocado*K40ContentU*LamK40/years;
    
    qc=qs-qm;
    FluxL=np.nansum((1-Fels)*TotalHeatL*dz*Density*1e6,0);
    TotalHeatU=(qc-FluxL)/np.nansum(Fels*Density*1e6*dz,0);
    
    print(TotalHeatL)
    print(dz)
    plt.close('all')
    return qc*1e3 #return in W/g
开发者ID:ksathaye,项目名称:HeatFlow,代码行数:60,代码来源:ODE70km.py


示例12: calculate_avg

def calculate_avg():													#DONE

	global data_ratios_avg
	global data_ratios_std
	
	#remove nan values of the weights
	weights_nan = np.zeros((nb_rows, 1))	
	weights_nan_sq = np.zeros((nb_rows, 1))	
	nb_files = np.ones((nb_rows, 1)) * len(args.xvgfilenames)
	tmp_weights_nan = np.zeros((nb_rows, len(args.xvgfilenames)))
	for r in range(0, nb_rows):
		tmp_weights_nan[r,:] = weights
		for f_index in range(0, len(args.xvgfilenames)):
			if np.isnan(data_ratios[r,f_index]):
				tmp_weights_nan[r,f_index] = 0
				nb_files[r,0] -= 1
	weights_nan[:,0] = np.nansum(tmp_weights_nan, axis = 1)
	weights_nan_sq[:,0] = np.nansum(tmp_weights_nan**2, axis = 1)	
	weights_nan[weights_nan == 0] = 1
	
	#avg
	data_ratios_avg = np.zeros((nb_rows,1))
	data_ratios_avg[:,0] =  scipy.stats.nanmean(data_ratios * weights * nb_files / weights_nan, axis = 1)

	#std
	tmp_std = np.zeros((nb_rows, 1))
	tmp_std[:,0] = np.nansum(weights * (data_ratios - data_ratios_avg[:,0:1])**2, axis = 1)			
	tmp_div = np.copy((weights_nan)**2 - weights_nan_sq)
	tmp_div[tmp_div == 0] = 1
	data_ratios_std = np.sqrt(weights_nan / tmp_div * tmp_std)

	return
开发者ID:jhelie,项目名称:xvg_average_residues,代码行数:32,代码来源:xvg_average_residues_ratios.py


示例13: orientation_numpy

def orientation_numpy(normals, weights):

    # Project the normals against the plane
    dx, dy, dz = np.rollaxis(normals, 2)

    # Use the quadruple angle formula to push everything around the
    # circle 4 times faster, like doing mod(x,pi/2)
    qz = 4 * dz * dx * dx * dx - 4 * dz * dz * dz * dx
    qx = dx * dx * dx * dx - 6 * dx * dx * dz * dz + dz * dz * dz * dz

    # Build the weights using a threshold, finding the normals lying on
    # the XZ plane
    d = 0.3
    global cx, qqx, qqz
    cx = np.max((1.0 - dy * dy / (d * d), 0 * dy), 0)
    w = weights * cx

    qqx = np.nansum(w * qx) / w.sum()
    qqz = np.nansum(w * qz) / w.sum()
    angle = np.arctan2(qqz, qqx) / 4

    q0 = np.array([np.cos(angle), 0, np.sin(angle)])
    q0 /= np.sqrt(np.dot(q0, q0))
    q2 = np.cross(q0, np.array([0, 1, 0]))

    # Build an output matrix out of the components
    mat = np.vstack((q0, np.array([0, 1, 0]), q2))
    axes = expmap.rot2axis(mat)

    return axes
开发者ID:theY4Kman,项目名称:blockplayer,代码行数:30,代码来源:lattice.py


示例14: integrate

 def integrate(self, frequencies=None, 
               radius=2.7, nooffset=False,
               azel='az'):
     """
     Given a radius calculate beam integral inside the radius and
     also the total integral
     """
     if frequencies is None:
         frequencies = self.cfg['synth']['freq']
     lisdic = []
     for i, freq in enumerate(frequencies):
         if freq in self.cfg['synth']['freq']:
             dic = {}
             dic['frequency'] = freq
             if azel in ('az', 'el'):
                 find = self.cfg['synth']['freq'].index(freq)*2 + 1
             else:
                 find = self.cfg['synth']['freq'].index(freq)*2 + 2
             if not nooffset:
                 ydata = numpy.sqrt(self.data[:, find]**2 - self.offset**2)
             else:
                 ydata = self.data[:, find]
             if azel in ('az', 'el'):
                 xdata = self.data[:, 0]
             else:
                 xdata = numpy.sqrt(self.data[:, 0]**2 + self.data[:, 1]**2)
                 ind = numpy.where(self.data[:, 0] < 0)
                 xdata[ind] = -xdata[ind]
             idx = numpy.where(numpy.abs(xdata) <= radius)
             dic['inner'] = numpy.nansum(ydata[idx])
             dic['all'] = numpy.nansum(ydata)
             lisdic.append(dic)
             print freq, dic['inner'], dic['all']
     return pd.DataFrame(lisdic)
开发者ID:gopastro,项目名称:pybeampattern,代码行数:34,代码来源:integrals.py


示例15: reportCreate

def reportCreate(data, paramDict):
    report = copy.deepcopy(paramDict)
    setKeys = data["DataSets"].keys()
    # Order all Mod first, then all Org
    setKeys.sort()
    bestRes = ""
    start = 0
    end = len(setKeys)
    middle = end / 2
    i = start
    while i < end / 2:
        # Calculate Score
        modBs = np.array(data["DataSets"][setKeys[i]])
        obsBs = np.array(data["DataSets"][setKeys[middle]])
        modBsmean = nanmean(modBs)
        obsBsmean = nanmean(obsBs)
        obsBsMinModBs = obsBs - modBs
        obsBsMinMean = obsBs - obsBsmean
        SSres = np.nansum(obsBsMinModBs ** 2)
        SStot = np.nansum(obsBsMinMean ** 2)
        ResNorm = SSres ** 0.5
        if i == 0:
            bestRes = copy.copy(ResNorm)
        report[(setKeys[i] + "_RN")] = ResNorm  # Norm of residuals
        i = i + 1
        middle = middle + 1
    return report, bestRes
开发者ID:mercergeoinfo,项目名称:LSA,代码行数:27,代码来源:LSASO.py


示例16: fuzzyKmeans

def fuzzyKmeans(samples,fixCenter=None,iter=5,fuzzParam=1.5):

    #Not actually k means yet just 3 means

    if fixCenter is not None:
       dMeans = [min(samples)+0.01 , fixCenter ,max(samples)-0.01]
    else:
       dMeans = [min(samples)+0.01 , mean(samples) ,max(samples)-0.01]
    begDeg = map(None,numpy.zeros(len(samples)))
    midDeg = map(None,numpy.zeros(len(samples)))
    endDeg = map(None,numpy.zeros(len(samples)))

    for j in range(iter):
       for k in range(len(samples)):
          pBeg = (1.0/(samples[k] - dMeans[2])**2)**(1.0/(fuzzParam-1))
          pMid = (1.0/(samples[k] - dMeans[1])**2)**(1.0/(fuzzParam-1))
          pEnd = (1.0/(samples[k] - dMeans[0])**2)**(1.0/(fuzzParam-1))
          nmlz = pBeg + pMid + pEnd
          begDeg[k] = pBeg/nmlz; midDeg[k] = pMid/nmlz; endDeg[k] = pEnd/nmlz
       #Update means 0 and 2, the other should stay at zero! (Change this for general purpose k-means)
       dMeans[0] = numpy.nansum((numpy.array(endDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(endDeg)**fuzzParam)
       if fixCenter is None:
          dMeans[1] = numpy.nansum((numpy.array(midDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(midDeg)**fuzzParam)
       dMeans[2] = numpy.nansum((numpy.array(begDeg)**fuzzParam)*numpy.array(samples))/numpy.nansum(numpy.array(begDeg)**fuzzParam)

    return dMeans
开发者ID:08s011003,项目名称:nupic,代码行数:26,代码来源:stats.py


示例17: bic

def bic(em_fit_result_dict, LL_all):
  '''
        Compute the Bayesian Information Criterion score

        Split it, associating the parameters to the number of datapoint they really take care of.
    '''

  # Number of parameters:
  # - mixt_target_tr: 1
  # - mixt_random_tr: 1
  # - mixt_nontarget_trk: 1
  # - alpha: 1
  # - beta: 1
  # - gamma: 1


  # First count the Loglikelihood
  bic_tot = -2. * np.nansum(LL_all[np.tril_indices(LL_all.shape[0])])

  # Then count alpha, beta and gamma, for all datapoints appropriately
  K = 3
  bic_tot += K * np.log(np.nansum(np.isfinite(LL_all)))

  # Now do the mixture proportions per condition
  for nitems_i, nitems in enumerate(em_fit_result_dict['T_space']):
    for trecall_i, trecall in enumerate(em_fit_result_dict['T_space']):
      if trecall <= nitems:
        K = 3
        bic_tot += K * np.log(np.nansum(np.isfinite(LL_all[nitems_i, trecall_i])))

  return bic_tot
开发者ID:Azhag,项目名称:Bayesian-visual-working-memory,代码行数:31,代码来源:em_circularmixture_parametrickappa_doublepowerlaw.py


示例18: autocorr_test

def autocorr_test(_xdata, _ydata):
    import numpy as np
    import pandas as pd
    from statsmodels.stats.diagnostic import acorr_ljungbox
    from statsmodels.tsa.stattools import acf
    #all statst need regularly spaced, continuous time series - just y variable
    #Durbin-Watson statistics:
    # calculated correctly with missing data
    # but no significance level. Apparently critical values for DW are not implemented in any python library
    #ACF:
    # crashes on missing data
    # Ljung-Box:
    # crashes on missing data too
    _ydata=np.ma.masked_invalid(_ydata)
    #autocorrelation in residuals
    #this is acf function that does not allow nans
#    print "\nautocorrelation for first three lags:", acf(_ydata)[1:4]
    #this is from pandas, is nan agnostic
    pdf=pd.Series(_ydata, index=_xdata, copy=True)
    print "autocorrelation for first three lags:", [pdf.autocorr(i) for i in range(1,4)]
    #durbin-watson
    a=_ydata[:-1].astype('float')
    b=_ydata[1:].astype('float')
    _stat=np.nansum((b-a)**2)/np.nansum(_ydata**2)
    print "Durbin-Watson statistic (close to 2 if no autocorrelation):", _stat
    _stat, _pvalue=acorr_ljungbox(_ydata, lags=1, boxpierce=False)    
    print "Ljung-Box p-value on lag 1 autocorrelation:", _pvalue
    print ""
开发者ID:csag-uct,项目名称:trend-analysis,代码行数:28,代码来源:functions.py


示例19: SWEmeltplot

def SWEmeltplot(data,beginyear,endyear):
    SWE = np.zeros(endyear+1-beginyear)   
    melt = np.zeros(endyear+1-beginyear)   
    years= np.arange(beginyear, endyear+1)
    stationcount = []
    for k in range(len(SWE)):    
        count = 0
        for i in range(len(data)):
            for j in range(len(data[i].monsum)):
                if(data[i].monsum[j].year==years[k]):
                    count +=1
                    SWE[k]=np.nansum(data[i].monsum[j].SWE)+SWE[k]
                    melt[k]=np.nansum(data[i].monsum[j].melt)+melt[k]
        SWE[k] =SWE[k]/count #returns the mean SWE for all stations
        melt[k] =melt[k]/count #returns the mean melt for all stations
        stationcount.append(count)
    plt.figure()
    plt.subplot(3,1,1)
    plt.plot(years, SWE)
    plt.title('SWE')
    plt.subplot(3,1,2)    
    plt.plot(years, melt)
    plt.title('melt')
    plt.subplot(3,1,3)
    plt.bar(years, stationcount)
    return(SWE,melt)
开发者ID:tonychangmsu,项目名称:Python_Scripts,代码行数:26,代码来源:SNOTEL_extract03252013.py


示例20: compute_genetic_distance

def compute_genetic_distance(X, **kwargs):
    """Given genotype matrix X, returns pairwise genetic distance between individuals
    using the estimator described in Theorem 1.

    Args:
        X: n * p matrix of 0/1/2/nan, n is #individuals, p is #SNPs
    """

    n, p = X.shape
    missing = np.isnan(X)
    col_sums = np.nansum(X, axis=0)
    col_counts = np.sum(~missing, axis=0)
    mu_hat = col_sums / 2. / col_counts     # p dimensional

    eta0_hat = np.nansum(X**2 - X, axis=0) / 2. / col_counts - mu_hat**2

    X_tmp = X/2.
    X_tmp[missing] = 0
    non_missing = np.array(~missing, dtype=float)

    X_shifted = X_tmp - mu_hat
    gdm_squared = 2. * np.mean(eta0_hat) - 2. * np.dot(X_shifted, X_shifted.T) / np.dot(non_missing, non_missing.T)
    gdm_squared[np.diag_indices(n)] = 0.

    if len(gdm_squared[gdm_squared < 0]) > 0:
        # shift all entries by the smallest amount that makes them non-negative
        shift = - np.min(gdm_squared[gdm_squared < 0])
        gdm_squared += shift
        gdm_squared[np.diag_indices(n)] = 0.

    gdm = np.sqrt(np.maximum(gdm_squared, 0.))

    return gdm
开发者ID:anand-bhaskar,项目名称:gap,代码行数:33,代码来源:localization.py



注:本文中的numpy.nansum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.nanvar函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.nanstd函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap