• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.mean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.mean函数的典型用法代码示例。如果您正苦于以下问题:Python mean函数的具体用法?Python mean怎么用?Python mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: PrintValues

def PrintValues( outfile, values,  options, prefix = "",titles = None):

    if options.flat or options.aggregate_column:

        if options.add_header:
            if prefix: outfile.write( "prefix\t" )
            
            if titles: outfile.write( "column\t" )
                
            print "\t".join( ("nval", "min", "max", "mean", "median", "stddev", "sum", "q1", "q3" ) )
        
        for x in range(len(values)):

            vals = values[x]

            if len(vals) == 0:

                if options.output_empty:
                    if titles: outfile.write( titles[x] + "\t" )
                    if prefix: outfile.write( prefix + "\t" )

                    outfile.write( "0" + "\tna" * 8  + "\n" )

                continue

            if titles: outfile.write( titles[x] + "\t" )
            if prefix: outfile.write( prefix + "\t" )

            vals.sort()
            if len(vals) > 4:
                q1 = options.value_format % vals[len(vals) // 4]
                q3 = options.value_format % vals[len(vals) * 3 // 4]
            else:
                q1 = options.value_format % vals[0]
                q3 = options.value_format % vals[-1]

            outfile.write( "\t".join( ( "%i" % len(vals),
                                        options.value_format % float(min(vals)),
                                        options.value_format % float(max(vals)),
                                        options.value_format % scipy.mean(vals),
                                        options.value_format % scipy.median(vals),
                                        options.value_format % scipy.std(vals),                                      
                                        options.value_format % reduce( lambda x, y: x+y, vals),
                                        q1, q3,
                                        )) + "\n")
            
    else:

        if titles:
            print "category\t%s" % string.join(titles,"\t")

        print "count\t%s"  % (string.join( map(lambda v: "%i" % len(v), values), "\t"))
        print "min\t%s"    % (string.join( map(lambda v: options.value_format % min(v), values), "\t"))
        print "max\t%s"    % (string.join( map(lambda v: options.value_format % max(v), values), "\t"))
        print "mean\t%s"   % (string.join( map(lambda v: options.value_format % scipy.mean(v), values), "\t"))
        print "median\t%s" % (string.join( map(lambda v: options.value_format % scipy.median(v), values), "\t"))
        print "stddev\t%s" % (string.join( map(lambda v: options.value_format % scipy.std(v), values), "\t"))
        print "sum\t%s"    % (string.join( map(lambda v: options.value_format % reduce( lambda x,y: x+y, v), values), "\t"))
        print "q1\t%s"     % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=25), values), "\t"))
        print "q3\t%s"     % (string.join( map(lambda v: options.value_format % scipy.stats.scoreatpercentile(v,per=75), values), "\t"))
开发者ID:siping,项目名称:cgat,代码行数:60,代码来源:data2stats.py


示例2: makeinputh5

def makeinputh5(Iono,basedir):
    """This will make a h5 file for the IonoContainer that can be used as starting
    points for the fitter. The ionocontainer taken will be average over the x and y dimensions
    of space to make an average value of the parameters for each altitude.
    Inputs
    Iono - An instance of the Ionocontainer class that will be averaged over so it can
    be used for fitter starting points.
    basdir - A string that holds the directory that the file will be saved to.
    """
    # Get the parameters from the original data
    Param_List = Iono.Param_List
    dataloc = Iono.Cart_Coords
    times = Iono.Time_Vector
    velocity = Iono.Velocity
    zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
    siz = list(Param_List.shape[1:])
    vsiz = list(velocity.shape[1:])

    datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
    outdata = sp.zeros([len(zlist)]+siz)
    outvel = sp.zeros([len(zlist)]+vsiz)
    #  Do the averaging across space
    for izn,iz in enumerate(zlist):
        arr = sp.argwhere(idx==izn)
        outdata[izn] = sp.mean(Param_List[arr],axis=0)
        outvel[izn] = sp.mean(velocity[arr],axis=0)

    Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
                            paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)
    Ionoout.saveh5(basedir/'startdata.h5')
开发者ID:jswoboda,项目名称:RadarDataSim,代码行数:30,代码来源:testdishmode.py


示例3: compactDistance

 def compactDistance(self, target, candidates):
     #compare the candidates to the target accordin to some measure
     targetarr = target.reshape((self.totalSize, 3))
     candidatesarr = candidates.reshape((candidates.shape[0], self.totalSize, 3))
     target_avg = scipy.mean(targetarr, axis=0)
     candidates_avg = scipy.mean(candidatesarr, axis=1)
     return scipy.sum((target_avg - candidates_avg)**2, axis=1)
开发者ID:KeithWM,项目名称:mosaic,代码行数:7,代码来源:photo_match_tinyimg2.py


示例4: makeinputh5

def makeinputh5(Iono,basedir):
    basedir = Path(basedir).expanduser()

    Param_List = Iono.Param_List
    dataloc = Iono.Cart_Coords
    times = Iono.Time_Vector
    velocity = Iono.Velocity
    zlist,idx = sp.unique(dataloc[:,2],return_inverse=True)
    siz = list(Param_List.shape[1:])
    vsiz = list(velocity.shape[1:])

    datalocsave = sp.column_stack((sp.zeros_like(zlist),sp.zeros_like(zlist),zlist))
    outdata = sp.zeros([len(zlist)]+siz)
    outvel = sp.zeros([len(zlist)]+vsiz)

    for izn,iz in enumerate(zlist):
        arr = sp.argwhere(idx==izn)
        outdata[izn]=sp.mean(Param_List[arr],axis=0)
        outvel[izn]=sp.mean(velocity[arr],axis=0)

    Ionoout = IonoContainer(datalocsave,outdata,times,Iono.Sensor_loc,ver=0,
                            paramnames=Iono.Param_Names, species=Iono.Species,velocity=outvel)


    ofn = basedir/'startdata.h5'
    print('writing {}'.format(ofn))
    Ionoout.saveh5(str(ofn))
开发者ID:jswoboda,项目名称:RadarDataSim,代码行数:27,代码来源:barkertest.py


示例5: signalToNoiseRatio

 def signalToNoiseRatio(self, xs):
     """ What is the one-sample signal-to-noise ratio. """         
     rxs = repmat(xs, self.ESamples, 1).T
     gs = self._df(rxs)
     g2s = mean(gs **2, axis=1)
     gs = mean(gs, axis=1)
     return gs**2/g2s
开发者ID:bitfort,项目名称:py-optim,代码行数:7,代码来源:stoch_1d.py


示例6: plotmap

    def plotmap(self,fig,ax):
        """ This function will plot the map of Alaska. The data will be plotted
            over it and will use the basemap class to position everything.
            Input
                fig - The figure handle for the plots.
                ax - The axes handle that the map will be plotted over.
            Output
                m - This is the handle for the basemap object.
        """
        latlim2 = self.params['latbounds']
        lonlim2 = self.params['lonbounds']
        m = Basemap(projection='merc',lon_0=sp.mean(lonlim2),lat_0=sp.mean(latlim2),\
        lat_ts=sp.mean(latlim2),llcrnrlat=latlim2[0],urcrnrlat=latlim2[1],\
        llcrnrlon=lonlim2[0],urcrnrlon=lonlim2[1],\
        rsphere=6371200.,resolution='i',ax=ax)
        # draw coastlines, state and country boundaries, edge of map.
        #m.drawcoastlines()
    #    m.drawstates()
    #    m.drawcountries()
        m.readshapefile('st99_d00','states',drawbounds=True)

        merstep = sp.round_((lonlim2[1]-lonlim2[0])/5.)
        parstep = sp.round_((latlim2[1]-latlim2[0])/5.)
        meridians=sp.arange(lonlim2[0],lonlim2[1],merstep)
        parallels = sp.arange(latlim2[0],latlim2[1],parstep)
        m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
        m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
        plt.hold(True)
        return m
开发者ID:jswoboda,项目名称:MahaliPlotting,代码行数:29,代码来源:PlottingClass.py


示例7: plot_optimal_tau_for_mean_uncertainty_reduction

def plot_optimal_tau_for_mean_uncertainty_reduction(
        results_for_exp, results_for_exp_inftau):
    """ Plot the optimal tau for the mean of uncertainty reduction.

    :param results_for_exp: The results of one experiment as 4-D array of the
        shape (metrics, z-values, tau-values, experimental repetitions).
    :type results_for_exp: 4-D array
    :param result_list_inftau: The results of one experiment for `tau = inf` as
        3-D array of the shape (metrics, z-values, experimental repetitions).
    :type results_for_exp_inftau: 3-D array.
    """
    values = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1]))
    err = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1], 2, 1))
    mark = sp.empty((results_for_exp.shape[0], results_for_exp.shape[1]))
    for m, metric in enumerate(cfg['metrics']):
        for z in xrange(len(cfg['zs'])):
            r = sp.mean(results_for_exp[m, z], axis=1)
            mark[m, z] = r.max()
            values[m, z] = sp.mean(cfg['time_scales'][r == r.max()]).magnitude
            r = cfg['time_scales'][r > 0.8 * r.max()]
            err[m, z, 0] = values[m, z] - min(r).magnitude
            err[m, z, 1] = max(r).magnitude + values[m, z]
    plot_param_per_metric_and_z(values, err)
    plot_bool_indicator_per_metric_and_z(
        sp.mean(results_for_exp_inftau, axis=2) >= mark)
开发者ID:jgosmann,项目名称:spyke-metrics-extra,代码行数:25,代码来源:section3.2.1.py


示例8: estimate_performance_xgboost

def estimate_performance_xgboost(X,labels,param, num_round, folds):
    '''
    Cross validation for XGBoost performance
    '''
    f=open("summary_bst_scan.txt","a")
    start = np.random.random_integers(1000) #time.time()
    # Cross validate
    kf = cv.KFold(labels.size, n_folds=folds, random_state=start)
    # Dictionary to store all the AMSs
    all_rmse = []
    for train_indices, test_indices in kf:
        X_train, X_test = X.loc[train_indices], X.loc[test_indices]
        y_train, y_test = labels[train_indices], labels[test_indices]
        xgmat = xgb.DMatrix(X_train, label=y_train)
        plst = param.items()#+[('eval_metric', '[email protected]')]

        watchlist = []#[(xgmat, 'train')]
        bst = xgb.train(plst, xgmat, num_round, watchlist)

        xgmat_test = xgb.DMatrix(X_test)
        y_out = bst.predict(xgmat_test)
        num=y_test.shape[0]
        y_test=np.reshape(y_test,num)
        rmse_score=rmse(y_out,y_test)
        print('rmse={}'.format(rmse_score))
        f.write('rmse={}'.format(rmse_score))
        f.write('\n')
        all_rmse.append(rmse_score)
    print ("------------------------------------------------------")
    print ("mean rmse ={} with std={}".format(sp.mean(all_rmse),sp.std(all_rmse)))
    f.write("mean rmse ={} with std={}".format(sp.mean(all_rmse),sp.std(all_rmse)))
    f.write('\n')   
    f.close()
开发者ID:wanglfjp,项目名称:kaggle,代码行数:33,代码来源:mode_GBM_frankWang.py


示例9: execute

 def execute(self):
     self.power_mat, self.thermal_expectation = self.full_calculation()
     n_chan = self.power_mat.shape[1]
     n_freq = self.power_mat.shape[0]
     # Calculate the the mean channel correlations at low frequencies.
     low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real
     # Factorize it into preinciple components.
     e, v = linalg.eigh(low_f_mat)
     self.low_f_mode_values = e
     # Make sure the eigenvalues are sorted.
     if sp.any(sp.diff(e) < 0):
         raise RuntimeError("Eigenvalues not sorted.")
     self.low_f_modes = v
     # Now subtract out the noisiest channel modes and see what is left.
     n_modes_subtract = 10
     mode_subtracted_power_mat = sp.copy(self.power_mat.real)
     mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq))
     for ii in range(n_modes_subtract):
         mode = v[:,-ii]
         amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1)
         amp = sp.sum(amp * mode, 1)
         to_subtract = amp[:,None,None] * mode[:,None] * mode
         mode_subtracted_power_mat -= to_subtract
         auto_power = mode_subtracted_power_mat.view()
         auto_power.shape = (n_freq, n_chan**2)
         auto_power = auto_power[:,::n_chan + 1]
         mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1)
     self.subtracted_auto_power = mode_subtracted_auto_power
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:28,代码来源:noise_power.py


示例10: plot_pairwise_velocities_r

def plot_pairwise_velocities_r(case,color,all_radial_distances,all_radial_velocities):
    dr = 0.3 # Mpc/h
    rmin, rmax = sp.amin(all_radial_distances), sp.amax(all_radial_distances) 
    rrange = rmax-rmin 
    N = int(sp.ceil(rrange/dr))
    rs = sp.linspace(rmin,rmax,N)
    v12_of_r = [[] for index in range(N)]
    
    for r,v12 in zip(all_radial_distances,all_pairwise_velocities):
    
        index = int(sp.floor((r-rmin)/dr))
        v12_of_r[index].append(v12)
            
    
    sigma_12s = sp.zeros(N)
    v12_means = sp.zeros(N)
    for index in range(len(sigma_12s)):
        v12_of_r_index = sp.array(v12_of_r[index])
        print "number of counts in the", index,"th bin:", len(v12_of_r_index)
        sigma_12 = sp.sqrt(sp.mean(v12_of_r_index**2))
        v12_mean = -sp.mean(v12_of_r_index)
        sigma_12s[index] = sigma_12
        v12_means[index] = v12_mean
    
    
    plt.plot(rs,sigma_12s,color=color,label='$\sigma_{12}$')
    plt.plot(rs,v12_means,color=color,label='$|v_{12}|$')
    plt.xlabel('r [Mpc/h]')
    plt.ylabel('[km/s]')
    plt.xscale('log')
    plt.axis([0.5,100,0,600])
开发者ID:ioodderskov,项目名称:VelocityField,代码行数:31,代码来源:moments_of_pairwise_velocities.py


示例11: plot_temporal_average

    def plot_temporal_average( self, 
                                                        color = 'g',
                                                        plot_std = True,
                                                        
                                                        t_start = None,
                                                        
                                                        label = None,
                                                        **kargs):
        if 'ax'in kargs:
            ax = kargs['ax']
        else:
            from matplotlib import pyplot
            fig = pyplot.figure()
            ax = fig.add_subplot(1,1,1)
        
        allpixel = self.selectAndPreprocess( **kargs ) 
        

        m = mean( allpixel , axis = 1 )
        
        if t_start is None:
            t = self.t()
        else:
            t = self.t() - self.t()[0] + t_start
        
        ax.plot(t , m , color = color , linewidth = 2 , label = label)
        
        if plot_std:
            s = mean( allpixel , axis = 1 )
            ax.fill_between(t , m+s , m-s , color = color , alpha = .3 , )
开发者ID:AntoineValera,项目名称:SynaptiQs,代码行数:30,代码来源:imageserie.py


示例12: printy

def printy(s):
    if ((s._num_updates * s.batch_size < 100 
         and s._num_updates % (20 / s.batch_size) == 0)
        or s._num_updates % (100 / s.batch_size) == 0):
        print s._num_updates * s.batch_size, #s.bestParameters, 
        s.provider.nextSamples(4)
        print mean(s.provider.currentLosses(s.bestParameters))
开发者ID:bitfort,项目名称:py-optim,代码行数:7,代码来源:test_xor.py


示例13: _read_sky_logfile

 def _read_sky_logfile(self):
     #TODO : expand to read errors, msgs etc
     # read in the whole sky log file, shouldn't be big
     f = open(self.skylogfile)
     lines = f.readlines()
     f.close()
     dust = [line.split()[1:] for line in lines if line.startswith('dtau_dust')]
     line = [line.split()[1:] for line in lines if line.startswith('dtau_line')]
     dust = _sp.array(dust, dtype='float')
     line = _sp.array(line, dtype='float')
     transitions = _sp.unique(dust[:,0])
     shells = _sp.unique(dust[:,1])
     dtau_dust = dict()
     dtau_line = dict()
     dtau_tot = dict()
     for t in transitions:
         d = []
         l = []
         for s in shells:
             d.append( _sp.mean([i[2] for i in dust if ((i[0]==t) * (i[1]==s))]) )
             l.append( _sp.mean([i[2] for i in line if ((i[0]==t) * (i[1]==s))]) )
         dtau_dust[t] = _sp.copy(d)
         dtau_line[t] = _sp.copy(l)
         dtau_tot[t] = _sp.array(d) + _sp.array(l)
     # create object to store in main class
     class Tau(object):pass
     Tau.dtau_dust = dtau_dust
     Tau.dtau_line = dtau_line
     Tau.dtau_tot = dtau_tot
     Tau.transitions = transitions
     Tau.shells = shells
     self.Tau = Tau
开发者ID:vilhelmp,项目名称:ratran_python,代码行数:32,代码来源:ratout.py


示例14: pForest_vs_flann_20Trials

def pForest_vs_flann_20Trials(numTrees=10):
    print "Comparing FLANN to Proximity Forest on 500 Random 2D Points"
    flann_scores=[]
    pf_scores=[]
    discrepancies=[]
    for i in range(20):
        print "=============================================="
        print "TRIAL: %d"%(i+1)
        print "=============================================="
        (nd, sum_flann, sum_pf) = pForest_vs_flann(numTrees=numTrees, verbose=False)
        flann_scores.append(sum_flann)
        pf_scores.append(sum_pf)
        discrepancies.append(nd)
        print "=============================================="
        print "Discrepancies: %d, Cost per Discrepancy: %3.2f"%(nd,(sum_flann - sum_pf)*1.0/nd)
        print "=============================================="
        
    print "=============================================="
    print "20 TRIAL SUMMARY"
    print "Average Discrepancies: %3.2f"%( 1.0*sum(discrepancies)/len(discrepancies))
    flann_scores = scipy.array(flann_scores)
    pf_scores = scipy.array(pf_scores)
    avg_delta_score = (sum(flann_scores) - sum(pf_scores))*1.0/len(discrepancies)
    print "Average Cost Per Discrepancy: %3.2f"%avg_delta_score
    print "Average FLANN Distance: %3.2f, StdDev: %3.2f"%(scipy.mean(flann_scores),scipy.std(flann_scores))
    print "Average Proximity Forest Distance: %3.2f, StdDev: %3.2f"%(scipy.mean(pf_scores),scipy.std(pf_scores))
    print "=============================================="
    return (discrepancies, flann_scores, pf_scores)
开发者ID:Sciumo,项目名称:ProximityForest,代码行数:28,代码来源:ProximityTree_Demo.py


示例15: test_psd_normalization

def test_psd_normalization():
    ''' This function tests the normalization of function psd. Mock data is
        one second of normal, mean zero, std = 2 data sampled at
        1kHz.  Since this is white noise, the white noise level of the PSD times
        the root of the bandwidth should give the rms amplitude of the
        data (in this case rt(2)).

        The normalization for a hanning window is also tested.  Windowing
        the data removes power from the time stream.  The data must be
        recalibrated in order to recover the best estimate of the white
        noise level.  For a hanning window the time stream must be multipled by
        root(8/3) before the PSD is taken.
        '''

    # make fake data, window, window and rescale
    x = sp.random.normal(0, 2, 10000)
    wrx = window(x, 'hanning', 1)
    ms_x = sp.mean(x ** 2)
    ms_wrx = sp.mean(np.array(wrx) ** 2)
    ratio = ms_x / ms_wrx
    print ('MSA of timestream = %.4f\t\nMSA of windowed timestream = %.4f\nratio = %.4f' % (ms_x, ms_wrx, ratio))
    # take PSDs
    x_psd = psd(x, 381.47)
    wrx_psd = psd(wrx, 381.47)
    pylab.subplot(2, 1, 1)
    pylab.title('Test psd normalization')
    pylab.xlabel('Sample')
    pylab.ylabel('Cnts')
    pylab.plot(x, 'bo', wrx, 'ro')
    pylab.subplot(2, 1, 2)
    pylab.title('PSD')
    pylab.xlabel('Frequency [Hz]')
    pylab.ylabel('Cnts/rtHz')
    pylab.loglog(x_psd[0], x_psd[1], 'b-', wrx_psd[0], wrx_psd[1], 'r-')
    pylab.show()
开发者ID:ranajoy-cosmo,项目名称:core-plus,代码行数:35,代码来源:fourier_analysis.py


示例16: remove_baseline

    def remove_baseline(self, anchorx, window, lead=0):
        """
        Remove baseline wander by subtracting a cubic spline.
        anchorx is a vector of isoelectric points (usually qrs onset -20ms)
        window is width of window to use (in ms) for averaging the amplitude at anchors
        """
        ecg = self.data[:, lead]                    
        windowwidth = _ms_to_samples(window, self.samplingrate) / 2
        #Do we have enough points before first anchor to use it
        if anchorx[0] < windowwidth:
            anchorx = anchorx[1:]
        # subtract dc
        ecg -= scipy.mean(ecg[anchorx[:]]) 
        # amplitudes for anchors
        # window is zero, no averaging
        if windowwidth == 0:
            anchory = scipy.array([ecg[x] for x in anchorx])
        # or average around the anchor
        else:
            anchory = scipy.array([scipy.mean(ecg[x-windowwidth:x+windowwidth])
                      for x in anchorx])
        # x values for spline that we are going to calculate
        splinex = scipy.array(range(len(ecg)))
        # calculate cubic spline fit
        tck = scipy.interpolate.splrep(anchorx, anchory)
        spliney = scipy.interpolate.splev(splinex, tck)
        # subtract the spline
        ecg -= spliney

        self.data[:, lead] = ecg

        return ecg
开发者ID:Basildcruz,项目名称:ecgtk,代码行数:32,代码来源:ecgtk.py


示例17: computeOpenMaxProbability

def computeOpenMaxProbability(openmax_fc8, openmax_score_u):
    """ Convert the scores in probability value using openmax
    
    Input:
    ---------------
    openmax_fc8 : modified FC8 layer from Weibull based computation
    openmax_score_u : degree

    Output:
    ---------------
    modified_scores : probability values modified using OpenMax framework,
    by incorporating degree of uncertainity/openness for a given class
    
    """
    prob_scores, prob_unknowns = [], []
    for channel in range(NCHANNELS):
        channel_scores, channel_unknowns = [], []
        for category in range(NCLASSES):
            channel_scores += [sp.exp(openmax_fc8[channel, category])]
                    
        total_denominator = sp.sum(sp.exp(openmax_fc8[channel, :])) + sp.exp(sp.sum(openmax_score_u[channel, :]))
        prob_scores += [channel_scores/total_denominator ]
        prob_unknowns += [sp.exp(sp.sum(openmax_score_u[channel, :]))/total_denominator]
        
    prob_scores = sp.asarray(prob_scores)
    prob_unknowns = sp.asarray(prob_unknowns)

    scores = sp.mean(prob_scores, axis = 0)
    unknowns = sp.mean(prob_unknowns, axis=0)
    modified_scores =  scores.tolist() + [unknowns]
    assert len(modified_scores) == 1001
    return modified_scores
开发者ID:abhijitbendale,项目名称:OSDN,代码行数:32,代码来源:compute_openmax.py


示例18: mean_and_std_from_binned_report

def mean_and_std_from_binned_report(br_json, antibody_type_idx):
    Ab_mean_results = [0] # needs extra zero since there are n_ages + 1 bins in demographics layer
    Ab_std_results  = [0]

    age_bins = br_json['Header']['Subchannel_Metadata']['NumBinsPerAxis'][0]
    for age_idx in range(0,age_bins):
        Ab        = br_json["Channels"]["Sum " + br_channel_titles[antibody_type_idx] + " Variant Fractions"]["Data"][age_idx][-365:]
        ss_Ab     = br_json["Channels"]["Sum of Squared " + br_channel_titles[antibody_type_idx] + " Variant Fractions"]["Data"][age_idx][-365:]
        statpop   = br_json["Channels"]["Population"]["Data"][age_idx][-365:]

        mean_Ab = []
        std_Ab  = []
        for val,ss,pop in zip(Ab,ss_Ab,statpop):
            if pop > 0:
                mean = val/pop
                variance = ss/pop - mean**2
            else:
                mean = 0
                variance = 0
            mean_Ab.append(mean)
            if variance < 0:
                std_Ab.append(0)
            else:
                std_Ab.append(variance**0.5)

        #print(scipy.mean(mean_Ab), scipy.mean(std_Ab))
        Ab_mean_results.append(scipy.mean(mean_Ab))
        Ab_std_results.append(scipy.mean(std_Ab))

    return (Ab_mean_results, Ab_std_results)
开发者ID:Bridenbecker,项目名称:EMOD,代码行数:30,代码来源:createimmunelayer.py


示例19: Corr

def Corr(GDP,I,C):
	m = sp.shape(GDP)[1]
	GDPIcorr = []
	GDPCcorr = []
	for i in range(0, m):
		gdp = GDP[:,i]
		inv = I[:,i]
		con = C[:,i]
		#Correlation between output and investment for each series
		gdpi = sp.corrcoef(gdp,inv)
		GDPIcorr.append(gdpi[0,1])
		#Correlation between output and consumption for each series
		gdpc = sp.corrcoef(gdp,con)
		GDPCcorr.append(gdpc[0,1])
	#Mean and standard deviation of correlation between GDP and
	#Investment and Consumption over total number of simulations
	GDPICORR = sp.array(GDPIcorr)
	gdpimean = sp.mean(GDPICORR)
	gdpistdev = sp.std(GDPICORR)
	GDPCCORR = sp.array(GDPCcorr)
	gdpcmean = sp.mean(GDPCCORR)
	gdpcstdev = sp.std(GDPCCORR)
	sp.savetxt('GDPICORR.csv',GDPICORR)
	sp.savetxt('GDPCCORR.csv',GDPCCORR)
	print "The mean and standard deviation between GDP and"
	print "Investment and GDP and Consumption followed by"
	print "The lists of each correlation coefficient for"
	print "each series are saved in csv files"
	return gdpimean, gdpistdev, gdpcmean, gdpcstdev
开发者ID:snowdj,项目名称:byu_macro_boot_camp,代码行数:29,代码来源:DSGE_simulation-Sara_Final.py


示例20: _compute_cycle_equal_weighted_power

    def _compute_cycle_equal_weighted_power(self, T_F, T_C) :
        """ Computes cycle burnups and peaking factors assuming 
            equal batch powers.
        """
        N = len(T_F)
        rho_L = self.p['leakage_penalty']       
        
        # shorten function call by eliminating p and boron dependence
        rho = lambda b, t_f, t_c: self.rho(self.p, b, t_f, t_c, 0.0)   
        
        # equal power sharing implies equal temperatures--using the average
        T_Fa, T_Ca = sp.mean(T_F), sp.mean(T_C)
        
        # linearize the reactivity, i.e., rho ~ rho_0 + AB.  (this may fail
        # if poison is still dominant at 10 GWd/MTU)
        B_a, B_b = 10.0, 20.0
        rho_a, rho_b = rho(B_a, T_Fa, T_Ca), rho(B_b, T_Fa, T_Ca)
        A = (rho_b-rho_a)/(B_b-B_a)
        rho_0 = rho_a - A*B_a
        
        # then B_s and B_c are *approximately*
        B_s = (rho_L - rho_0)/A
        B_c = 2.0*B_s/(len(T_F)+1)
        
        # solve f(B_c) = mean(rho)-rho_L = 0 via scipy's root finder
        f = lambda B : sp.mean(rho(B*sp.arange(1, N+1), T_Fa, T_Ca)) - rho_L
        B_c = root(f, B_c).x[0]

        # compute batch-wise, EOC burnups and associated peaking factors
        B = B_c * sp.arange(1, N+1)
        ppf = sp.ones(N)
        return B, ppf
开发者ID:corps-g,项目名称:nrm,代码行数:32,代码来源:nrm.py



注:本文中的scipy.mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.median函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.maximum函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap