• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.std函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.std函数的典型用法代码示例。如果您正苦于以下问题:Python std函数的具体用法?Python std怎么用?Python std使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了std函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _get_mean_std_from_runs

def _get_mean_std_from_runs(results_for_runs,decider):
    '''
    For a collection of runs (usually from HPs) return the average and std of the decider error and test error.
    Usually decider error will be validation or train error (which we then also get the average test error)

    results_for_runs = array with all results for runs (each run usually corresponds to a speicfic HP) for a specific model
        e.g. [result1, ..., result200]
    decider = namespace holding the appropriate function handler/pointer named get_errors_from (e.g. get_errors_based_on_train_error).
    So decider must be able to call decider.get_errors_from(run)
    '''
    decider_errors_for_runs = [] #
    #train_errors_for_runs = []
    #cv_errors_for_runs = []
    test_errors_for_runs = [] #
    for current_result in results_for_runs:
        decider_error, train_error, cv_error, test_error = decider.get_errors_from(current_result)
        print('decider_error ', decider_error)
        #
        # if np.isnan( decider_error ):
        #     pdb.set_trace()
        decider_errors_for_runs.append(decider_error)
        #train_errors_for_runs.append(train_error)
        #cv_errors_for_runs.append(cv_error)
        test_errors_for_runs.append(test_error)
    decider_mean, decider_std = np.mean(decider_errors_for_runs), np.std(decider_errors_for_runs)
    test_mean, test_std = np.mean(test_errors_for_runs), np.std(test_errors_for_runs)
    #pdb.set_trace()
    return decider_mean, decider_std, test_mean, test_std
开发者ID:brando90,项目名称:hbf_tensorflow_code,代码行数:28,代码来源:extract_results_lib2.py


示例2: __init__

 def __init__(self, fndark, nblocksize):
     if (os.path.isfile(fndark+'-dark.npz')):
         npzfile=np.load(fndark+'-dark.npz');
         self.dmean=npzfile['dmean'];
         self.dstd=npzfile['dstd'];
         self.dbpm=npzfile['dbpm'];
     else:
         dark=Binary(fndark);
         nframes=dark.nframes; my=dark.my; mx=dark.mx;
         nblocks=nframes//nblocksize;
         
         bmed=np.zeros((nblocks,my,mx));
         bstd=np.zeros((nblocks,my,mx));
         for iblock in range(nblocks):
             t0=time.clock();
             a=dark.data[iblock*nblocksize:(iblock+1)*nblocksize];
             a,idx=dropbadframes(a);
             print '- read block, dropped bad, subtracted dark in '+str(time.clock()-t0)+'s';
             nfb=a.shape[0];                
             bmed[iblock,:,:]=np.median(a,axis=0);
             bstd[iblock,:,:]=np.std(a,axis=0);
         self.dmean=np.mean(bmed,axis=0);
         self.dstd=np.sqrt(np.sum((bstd)**2,axis=0));
         self.dbpm=self.dstd<(np.median(self.dstd)+5*np.std(self.dstd));
         self.dbpm=self.dstd<(np.median(self.dstd*self.dbpm)+5*np.std(self.dstd*self.dbpm));
         
         np.savez(fndark+'-dark',dmean=self.dmean,dstd=self.dstd,dbpm=self.dbpm);
         del dark;
开发者ID:perhansson,项目名称:daq,代码行数:28,代码来源:epix.py


示例3: testNormalizeLike

    def testNormalizeLike(self):
        a = np.empty((10, 3))
        a[:, 0] = np.random.random(10)
        a[:, 1] = np.random.random(10)
        a[:, 2] = np.random.random(10)

        b = np.empty((10, 3))
        b[:, 0] = np.random.random(10)
        b[:, 1] = np.random.random(10)
        b[:, 2] = np.random.random(10)
        b = b * 2

        c = normalizeArrayLike(b, a)

        # Should be normalized like a
        mean = []
        std = []
        mean.append(np.mean(a[:, 0]))
        mean.append(np.mean(a[:, 1]))
        mean.append(np.mean(a[:, 2]))
        std.append(np.std(a[:, 0]))
        std.append(np.std(a[:, 1]))
        std.append(np.std(a[:, 2]))

        # Check all values
        for col in xrange(b.shape[1]):
            for bval, cval in zip(b[:, col].flat, c[:, col].flat):
                print cval, (bval - mean[col]) / std[col]
                print cval, bval
                assert cval == (bval - mean[col]) / std[col]
        print ("TestNormalizeLike success")
开发者ID:spacecowboy,项目名称:aNeuralN,代码行数:31,代码来源:util_tests.py


示例4: _ols

 def _ols(self,x,y):
     lr = LinearRegression()
     coef_xy = lr.fit(y= y.reshape(-1, 1), X= x.reshape(-1, 1)).coef_
     coef_yx = lr.fit(y= x.reshape(-1, 1), X= y.reshape(-1, 1)).coef_
     r_xy = y - coef_xy*x
     r_yx = x - coef_yx*y
     return r_xy/np.std(r_xy), r_yx/np.std(r_yx)
开发者ID:AGAR038,项目名称:LiNGAM,代码行数:7,代码来源:direct_lingam.py


示例5: meanclip3

def meanclip3(xx,yy,slope, clipsig=3.0, maxiter=5, converge_num=0.1, verbose=0):
    from numpy import array, polyfit
    import numpy
    xx=array(xx)
    yy=array(yy)
    xx0=array(xx[:])
    yy0=array(yy[:])
    ct=len(yy)
    iter = 0; c1 = 1.0 ; c2 = 0.0
    while (c1 >= c2) and (iter < maxiter):
        lastct = ct
        pol = polyfit(xx0,yy0,1,full=True) ###
        mean0=pol[0][1]
        slope=pol[0][0]
        sig=numpy.std(yy0-mean0-slope*xx0)
        wsm = numpy.where( abs(yy0-xx0*slope) < mean0+clipsig*sig )
        ct = len(wsm[0])
        if ct > 0:
            xx0=xx0[wsm]
            yy0=yy0[wsm]
        c1 = abs(ct - lastct)
        c2 = converge_num * lastct
        iter += 1
# End of while loop
    pol = polyfit(xx0,yy0,1,full=True) ###
    mean0=pol[0][1]
    slope=pol[0][0]
    sig=numpy.std(yy0-mean0-slope*xx0)
    if verbose: pass
    return mean0, sig,slope,yy0,xx0
开发者ID:rkirkpatrick,项目名称:lcogtsnpipe,代码行数:30,代码来源:lscabsphotdef_old.py


示例6: average_form_factors

def average_form_factors(qz_lists, F_lists):
    """Average multiple sets of form factors. Need at least two 
    input data sets.
    
    qz_lists : list of lists
    F_lists : list of lists
    
    Each list must be in an ascending order, which is the default format
    in NFIT frm.dat.
    """ 
    if len(qz_lists) < 2:
        raise TypeError('Need more than one form factor set for averaging')
    if len(qz_lists) != len(F_lists):
        raise TypeError('Number of qz and F data sets must agree')
    for qzvalues, Fvalues in zip(qz_lists, F_lists):
        if len(qzvalues) != len(Fvalues):
            raise TypeError('Length of each qz and F data set must agree') 
   
    qz_bin, F_bin = create_binned_data(qz_lists, F_lists)
    normalize_to_each_other(F_bin)
    qz_bin = np.array(qz_bin)
    F_bin = np.array(F_bin)
    avg_qz = np.mean(qz_bin, axis=1)
    err_qz = np.std(qz_bin, axis=1, ddof=1, dtype=np.float64)
    avg_F = np.mean(F_bin, axis=1)    
    err_F = np.std(F_bin, axis=1, ddof=1, dtype=np.float64)   
         
    return avg_qz, err_qz, avg_F, err_F
开发者ID:kakabori,项目名称:NFIT_to_SDP,代码行数:28,代码来源:main.py


示例7: test_Moster13SmHm_behavior

def test_Moster13SmHm_behavior():
	"""
	"""
	default_model = Moster13SmHm()
	mstar1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	ratio1 = mstar1/3.4275e10
	np.testing.assert_array_almost_equal(ratio1, 1.0, decimal=3)

	default_model.param_dict['n10'] *= 1.1
	mstar2 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	assert mstar2 > mstar1

	default_model.param_dict['n11'] *= 1.1
	mstar3 = default_model.mean_stellar_mass(prim_haloprop = 1.e12)
	assert mstar3 == mstar2

	mstar4_z1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12, redshift=1)
	default_model.param_dict['n11'] *= 1.1
	mstar5_z1 = default_model.mean_stellar_mass(prim_haloprop = 1.e12, redshift=1)
	assert mstar5_z1 != mstar4_z1

	mstar_realization1 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	mstar_realization2 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	mstar_realization3 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=44)
	assert np.array_equal(mstar_realization1, mstar_realization2)
	assert not np.array_equal(mstar_realization1, mstar_realization3)

	measured_scatter1 = np.std(np.log10(mstar_realization1))
	model_scatter = default_model.param_dict['scatter_model_param1']
	np.testing.assert_allclose(measured_scatter1, model_scatter, rtol=1e-3)

	default_model.param_dict['scatter_model_param1'] = 0.3
	mstar_realization4 = default_model.mc_stellar_mass(prim_haloprop = np.ones(1e4)*1e12, seed=43)
	measured_scatter4 = np.std(np.log10(mstar_realization4))
	np.testing.assert_allclose(measured_scatter4, 0.3, rtol=1e-3)
开发者ID:bsipocz,项目名称:halotools,代码行数:35,代码来源:test_smhm_components.py


示例8: main

def main():
    train = pd.DataFrame.from_csv('train.csv')
    places_index = train['place_id'].values

    places_loc_sqr_wei = []
    for i, place_id in enumerate(train['place_id'].unique()):
        if not i % 100:
            print(i)
        place_df = train.iloc[places_index == place_id]
        place_weights_acc_sqred = 1 / (place_df['accuracy'].values ** 2)

        places_loc_sqr_wei.append([place_id,
                                   np.average(place_df['x'].values, weights=place_weights_acc_sqred),
                                   np.std(place_df['x'].values),
                                   np.average(place_df['y'].values, weights=place_weights_acc_sqred),
                                   np.std(place_df['y'].values),
                                   np.average(np.log(place_df['accuracy'].values)),
                                   np.std(np.log(place_df['accuracy'].values)),
                                   place_df.shape[0]])

        # print(places_loc_sqr_wei[-1])
        # plt.hist2d(place_df['x'].values, place_df['y'].values, bins=100)
        # plt.show()
        plt.hist(np.log(place_df['accuracy'].values), bins=20)
        plt.show()
    places_loc_sqr_wei = np.array(places_loc_sqr_wei)
    column_names = ['x_mean', 'x_sd', 'y_mean', 'y_sd', 'accuracy_mean', 'accuracy_sd', 'n_persons']
    places_loc_sqr_wei = pd.DataFrame(data=places_loc_sqr_wei[:, 1:], index=places_loc_sqr_wei[:, 0],
                                      columns=column_names)

    now = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
    places_loc_sqr_wei.to_csv('places_loc_sqr_weights_%s.csv' % now)
开发者ID:yairbeer,项目名称:kaggle_fb5,代码行数:32,代码来源:places_calculation_v2.py


示例9: mcnoise

def mcnoise(data, noise_std, n, noise_scaling=1.):
    """
    Parameters
    ----------
    data : ndarray
        Array of data.
    noise_std : float
        Standard deviation of the noise
    n : int
        Number of repetition
    noise_scaling: float
        Scaling factor for noise

    Returns
    -------
    variance, variance error, skewness, skewness error, kurtosis, kurtosis error

    """
    noise_arr = np.random.normal(0, noise_std, (n, data.size)) * noise_scaling
    var_sample = np.var(data + noise_arr, axis=1)
    skew_sample = skew(data + noise_arr, axis=1)
    kurt_sample = kurtosis(data + noise_arr, axis=1)
    var_val = np.mean(var_sample)
    skew_val = np.mean(skew_sample)
    kurt_val = np.mean(kurt_sample)
    var_err = np.std(var_sample)
    skew_err = np.std(skew_sample)
    kurt_err = np.std(kurt_sample)
    return var_val, var_err, skew_val, skew_err, kurt_val, kurt_err
开发者ID:piyanatk,项目名称:sim,代码行数:29,代码来源:noise.py


示例10: test_stats

def test_stats(x):
	coords_vals = year_sample_dict_data[x]
		
	x_nodes = []	
	y_nodes = []
	z_nodes = []
	values_list = []
	for item in coords_vals:
		x_nodes.append(item[0])
		y_nodes.append(item[1])
		z_nodes.append(item[2])
		values_list.append(coords_vals[item])
	xs = x_nodes
	ys = y_nodes
	zs = z_nodes
	values_list = np.array(values_list)
	all_data = year_stack[x, :time_len, :lat_end, :lon_end]
	### New and improved and faster!!!
	annual_mean = np.mean(all_data)
	sample_mean = np.mean(values_list)
	annual_stdev = np.std(all_data)
	sample_stdev = np.std(values_list)
	annual_max = np.max(all_data)
	annual_min = np.min(all_data)
	sample_max = np.max(all_data)
	sample_min = np.min(all_data)
	annual_range = np.abs(annual_max - annual_min)
	sample_range = np.abs(sample_max - sample_min)
	fitness = np.abs(annual_mean-sample_mean) + np.abs(annual_stdev - sample_stdev) + np.abs(annual_range - sample_range)
	return fitness, annual_mean, sample_mean, annual_stdev, sample_stdev 
开发者ID:nicholaschris,项目名称:masters_thesis,代码行数:30,代码来源:sample_annual_means.py


示例11: rectif

def rectif(z_in, contrast=contrast, method=method, verbose=False):
    """
    Transforms an image (can be 1, 2 or 3D) with normal histogram into
    a 0.5 centered image of determined contrast
    method is either 'Michelson' or 'Energy'

    Phase randomization takes any image and turns it into Gaussian-distributed
    noise of the same power (or, equivalently, variance).
    # See: Peter J. Bex J. Opt. Soc. Am. A/Vol. 19, No. 6/June 2002 Spatial
    frequency, phase, and the contrast of natural images
    """
    z = z_in.copy()
    # Final rectification
    if verbose:
        print('Before Rectification of the frames')
        print( 'Mean=', np.mean(z[:]), ', std=', np.std(z[:]), ', Min=', np.min(z[:]), ', Max=', np.max(z[:]), ' Abs(Max)=', np.max(np.abs(z[:])))

    z -= np.mean(z[:]) # this should be true *on average* in MotionClouds

    if (method == 'Michelson'):
        z = (.5* z/np.max(np.abs(z[:]))* contrast + .5)
    else:
        z = (.5* z/np.std(z[:])  * contrast + .5)

    if verbose:
        print('After Rectification of the frames')
        print('Mean=', np.mean(z[:]), ', std=', np.std(z[:]), ', Min=', np.min(z[:]), ', Max=', np.max(z[:]))
        print('percentage pixels clipped=', np.sum(np.abs(z[:])>1.)*100/z.size)
    return z
开发者ID:egorananyev,项目名称:mc,代码行数:29,代码来源:MotionClouds.py


示例12: find_velocity

def find_velocity(times, frames, maxshift=10):
	frame_times = np.array(times)
	nframes = len(frames)
	last_idx = np.argmax(frame_times)
	velocities = []
	npairs = nframes*(nframes-1)/2
	denom = 0
	maxshift = 10
	for i in range(nframes):
		for j in range(i+1, nframes):
			dt = frame_times[i] - frame_times[j]
			offset = findshift(frames[i], frames[j], maxshift)
			if abs(max(offset)) > maxshift:
				continue
			denom += 1
			print (i, j, offset, dt)
			velocity = -offset/dt
			velocities.append(velocity)

	denom = min(1, denom-1)
	velocities = np.array(velocities)
	(vx1, vy1) = velocities[:,0].mean(), velocities[:,1].mean()
	vx = np.mean(velocities[:,0])
	vy = np.mean(velocities[:,1])
	sx = np.std(velocities[:,0])/denom + 0.2*abs(vx) + 5e-4
	sy = np.std(velocities[:,1])/denom + 0.2*abs(vy) + 5e-4
	return (vx, vy, sx, sy)
开发者ID:ecgeil,项目名称:radar,代码行数:27,代码来源:uniform.py


示例13: normalize

    def normalize( self, verbose=False):
        #list_of_points, point_count, stroke_count = self.getListOfXYPoints( self )
        #coords = np.array( list_of_points ).reshape( point_count, 2 )
        coords = self.listCoordinates()
        point_count,ccrd = coords.shape
        stroke_count = len(self.strokes)
        mean = np.mean(coords, 0)
        sdev = np.std(coords, 0)
        coords = coords - mean
        
        if sdev[0] != 0 and sdev[1] != 0:
			coords = coords * ( 1 / sdev )
			
        new_sketch = self.constructNormalizedSketch(coords, point_count, stroke_count )
        
        if verbose:
            print(mean, sdev)
            print(np.std(coords, 0))
            print(np.mean(coords, 0))
            for i in range(0, point_count):
                print( coords[i, 0], coords[i, 1] )
            
            plt.figure(1)            
            allpts = new_sketch.listCoordinates()
            plt.plot(allpts[:,0],allpts[:,1])
            plt.xlabel('x')
            plt.ylabel('y')
            plt.title('New Sketch with Normalized Points')
            new_sketch.printContents()
        
        return new_sketch
开发者ID:Kurmich,项目名称:sketchfe,代码行数:31,代码来源:Sketch.py


示例14: gaussian_kernel

    def gaussian_kernel(self,xvalues,yvalues,r200,normalization=100,scale=10,xres=200,yres=220,xmax=6.0,ymax=5000.0,adj=20):
        """
        Uses a 2D gaussian kernel to estimate the density of the phase space.
        As of now, the maximum radius extends to 6Mpc and the maximum velocity allowed is 5000km/s
        The "q" parameter is termed "scale" here which we have set to 10 as default, but can go as high as 50.
        "normalization" is simply H0
        "x/yres" can be any value, but are recommended to be above 150
        "adj" is a custom value and changes the size of uniform filters when used (not normally needed)
        """
        self.x_scale = xvalues/xmax*xres
        self.y_scale = ((yvalues+ymax)/(normalization*scale))/((ymax*2.0)/(normalization*scale))*yres

        img = np.zeros((xres+1,yres+1))
        self.x_range = np.linspace(0,xmax,xres+1)
        self.y_range = np.linspace(-ymax,ymax,yres+1) 

        for j in range(xvalues.size):
            img[self.x_scale[j],self.y_scale[j]] += 1
        
        #Estimate kernel sizes
        #Uniform
        #self.ksize = 3.12/(xvalues.size)**(1/6.0)*((np.var(self.x_scale[xvalues<r200])+np.var(self.y_scale[xvalues<r200]))/2.0)**0.5/adj
        #if self.ksize < 3.5:
        #    self.ksize = 3.5
        #Gaussian
        self.ksize_x = (4.0/(3.0*xvalues.size))**(1/5.0)*np.std(self.x_scale[xvalues<r200])
        self.ksize_y = (4.0/(3.0*yvalues.size))**(1/5.0)*np.std(self.y_scale[xvalues<r200])
        
        #smooth with estimated kernel sizes
        #img = ndi.uniform_filter(img, (self.ksize,self.ksize))#,mode='reflect')
        self.img = ndi.gaussian_filter(img, (self.ksize_y,self.ksize_x),mode='reflect')
        self.img_grad = ndi.gaussian_gradient_magnitude(img, (self.ksize_y,self.ksize_x))
        self.img_inf = ndi.gaussian_gradient_magnitude(ndi.gaussian_gradient_magnitude(img, (self.ksize_y,self.ksize_x)), (self.ksize_y,self.ksize_x))
开发者ID:nkern,项目名称:CausticMass,代码行数:33,代码来源:CausticMass.py


示例15: ccf

def ccf(x, y, unbiased=True):
    '''cross-correlation function for 1d

    Parameters
    ----------
    x, y : arrays
       time series data
    unbiased : boolean
       if True, then denominators for autocovariance is n-k, otherwise n

    Returns
    -------
    ccf : array
        cross-correlation function of x and y

    Notes
    -----
    This is based np.correlate which does full convolution. For very long time
    series it is recommended to use fft convolution instead.

    If unbiased is true, the denominator for the autocovariance is adjusted
    but the autocorrelation is not an unbiased estimtor.

    '''
    cvf = ccovf(x, y, unbiased=unbiased, demean=True)
    return cvf / (np.std(x) * np.std(y))
开发者ID:philippmuller,项目名称:statsmodels,代码行数:26,代码来源:stattools.py


示例16: prepare_results

    def prepare_results(self, initial_pops=[50,100]):
        """
        Analyzes data from a batch run, preparing it for plotting.

        """

        self.initial_pops = initial_pops
        self.result_dict = {}

        for pop in self.initial_pops:
            self.result_dict[pop] = {}

            print('Starting batch for %d.' % pop)

            batch = BatchDriver(self.num_sims)
            results = batch.drive(initial_pop=pop)

            stdevs = []

            for indx, result in enumerate(results):
                adults = result['adults']
                minus_120 = len(adults) - 120
                last_120 = adults[minus_120:]

                stdev = np.std(last_120)
                stdevs.append(stdev)

            stdev_of_stdev = np.std(stdevs)

            self.result_dict[pop]['mean_stdev'] = np.mean(stdevs)
            self.result_dict[pop]['ci'] = (1.96 * stdev_of_stdev) / math.sqrt(self.num_sims)

        print(self.result_dict)
开发者ID:gatech-cse6730,项目名称:proj2-code,代码行数:33,代码来源:analyzer.py


示例17: cross_validate

    def cross_validate(self, seg_corpus, dep_corpus, out_folder=None):
        assert seg_corpus.keys() == dep_corpus.keys()
        texts = np.array(sorted(seg_corpus.keys()))
        folds = KFold(len(texts), number_of_folds)

        # extract features for all texts
        all_features = {}
        all_labels = {}
        for text in texts:
            features, labels = self.extract_features_from_text(
                dep_corpus[text], seg_forest=seg_corpus[text])
            all_features[text] = features
            all_labels[text] = labels

        # do the cross-validation
        macro_F1s = []
        micro_F1s = []
        tp = fp = fn = tp_i = fp_i = fn_i = 0
        for i, (train, test) in enumerate(folds):
            print "# FOLD", i
            # train
            train_texts = texts[train]
            train_features = chained([all_features[text] for text in
                                      train_texts])
            train_labels = chained([all_labels[text] for text in train_texts])
            print "  training on %d items..." % len(train_labels)
            self._train(train_features, train_labels)
            print "  extracted %d features using the dict vectorizer." % \
                len(self.pipeline.named_steps[
                    'vectorizer'].get_feature_names())
            # test (predicting textwise)
            test_labels = []
            pred_labels = []
            for text in texts[test]:
                features = all_features[text]
                labels = all_labels[text]
                predictions = self._predict(features)
                test_labels.extend(labels)
                pred_labels.extend(predictions)
                if out_folder is not None:
                    discourse_tree = self._segment_text(predictions,
                                                        dep_corpus[text])
                    with open(out_folder + '/' + text + '.tree', 'w') as fout:
                        fout.write(str(discourse_tree))
            macro_f1, micro_f1 = self._score(test_labels, pred_labels)
            macro_F1s.append(macro_f1)
            micro_F1s.append(micro_f1)
            tp_i, fp_i, fn_i = _cnt_stat(test_labels, pred_labels)
            tp += tp_i
            fp += fp_i
            fn += fn_i

        print "# Average Macro F1 = %3.1f +- %3.2f" % \
            (100 * np.mean(macro_F1s), 100 * np.std(macro_F1s))
        print "# Average Micro F1 = %3.1f +- %3.2f" % \
            (100 * np.mean(micro_F1s), 100 * np.std(micro_F1s))
        if tp or fp or fn:
            print "# F1_{tp,fp} %.2f" % (2. * tp / (2. * tp + fp + fn) * 100)
        else:
            print "# F1_{tp,fp} 0. %"
开发者ID:discourse-lab,项目名称:DiscourseSegmenter,代码行数:60,代码来源:matesegmenter.py


示例18: getDftBins

def getDftBins(data=None, sampleRate=None, low=100, high=8000, chunk=64):
    """Return DFT (discrete Fourier transform) of ``data``, doing so in
    time-domain bins, each of size ``chunk`` samples.

    e.g., for getting FFT magnitudes in a ms-by-ms manner.

    If given a sampleRate, the data are bandpass filtered (low, high).
    """
    # good to reshape & vectorize data rather than use a python loop
    if data is None:
        data = []
    bins = []
    i = chunk
    if sampleRate:
        # just to get freq vector
        _junk, freq = getDft(data[:chunk], sampleRate)
        band = (freq > low) & (freq < high)  # band (frequency range)
    while i <= len(data):
        magn = getDft(data[i - chunk:i])
        if sampleRate:
            bins.append(np.std(magn[band]))  # filtered by frequency
        else:
            bins.append(np.std(magn))  # unfiltered
        i += chunk
    return np.array(bins)
开发者ID:jonathanoroberts,项目名称:psychopy,代码行数:25,代码来源:microphone.py


示例19: condBias

 def condBias(H,O):  
     H_ensmean = np.mean(H, axis=1)
     r = np.corrcoef(H_ensmean, O)[0,1]
     std_H = np.std(H_ensmean)
     std_O = np.std(O)            
     cond_bias = r * std_O/std_H
     return cond_bias
开发者ID:illing2005,项目名称:murcss,代码行数:7,代码来源:crpss_test.py


示例20: plotForce

def plotForce():
    figure(size=3,aspect=0.5)
    subplot(1,2,1)
    from EvalTraj import plotFF
    plotFF(vp=351,t=28,f=900,cm=0.6,foffset=8)
    subplot_annotate()
    
    subplot(1,2,2)
    for i in [1,2,3,4]:
        R=np.squeeze(np.load('Rdpse%d.npy'%i))
        R=stats.nanmedian(R,axis=2)[:,1:,:]
        dps=np.linspace(-1,1,201)[1:]
        plt.plot(dps,R[:,:,2].mean(0));
    plt.legend([0,0.1,0.2,0.3],loc=3) 
    i=2
    R=np.squeeze(np.load('Rdpse%d.npy'%i))
    R=stats.nanmedian(R,axis=2)[:,1:,:]
    mn=np.argmin(R,axis=1)
    y=np.random.randn(mn.shape[0])*0.00002+0.0438
    plt.plot(np.sort(dps[mn[:,2]]),y,'+',mew=1,ms=6,mec=[ 0.39  ,  0.76,  0.64])
    plt.xlabel('Displacement of Force Origin')
    plt.ylabel('Average Net Force Magnitude')
    hh=dps[mn[:,2]]
    err=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.975,hh.shape[0])
    err2=np.std(hh)/np.sqrt(hh.shape[0])*stats.t.ppf(0.75,hh.shape[0])
    m=np.mean(hh)
    print m, m-err,m+err
    np.save('force',[m, m-err,m+err,m-err2,m+err2])
    plt.xlim([-0.5,0.5])
    plt.ylim([0.0435,0.046])
    plt.grid(b=True,axis='x')
    subplot_annotate()
开发者ID:simkovic,项目名称:wolfpackRevisited,代码行数:32,代码来源:Evaluation.py



注:本文中的numpy.std函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.str函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.stack函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap