• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.nanmean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.nanmean函数的典型用法代码示例。如果您正苦于以下问题:Python nanmean函数的具体用法?Python nanmean怎么用?Python nanmean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了nanmean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: plot_hist

def plot_hist(ind, binwidth=5, incl_daystr=True, ax=None, pos=(0.05, 0.7),
              kw={'alpha' : 0.3, 'color' : 'k'}):
    """Plot histogram of onset days.
    """
    if ax is None:
        ax = plt.gca()

    def daystr(day):
        day = round(day)
        mm, dd = atm.jday_to_mmdd(day)
        mon = atm.month_str(mm)
        return '%.0f (%s-%.0f)' % (day, mon, dd)

    if isinstance(ind, pd.Series) or isinstance(ind, xray.DataArray):
        ind = ind.values

    b1 = np.floor(np.nanmin(ind) / binwidth) * binwidth
    b2 = np.ceil(np.nanmax(ind) / binwidth) * binwidth
    bin_edges = np.arange(b1, b2 + 1, binwidth)
    n, bins, _ = ax.hist(ind, bin_edges, **kw)
    ax.set_xlabel('Day of Year')
    ax.set_ylabel('Num of Occurrences')
    if incl_daystr:
        dmean = daystr(np.nanmean(ind))
        dmin = daystr(np.nanmin(ind))
        dmax = daystr(np.nanmax(ind))
    else:
        dmean = '%.0f' % np.nanmean(ind)
        dmin = '%.0f' % np.nanmin(ind)
        dmax = '%.0f' % np.nanmax(ind)
    s = 'Mean %s\n' % dmean + 'Std %.0f\n' % np.nanstd(ind)
    s = s + 'Min %s\n' % dmin + 'Max %s' % dmax
    x0, y0 = pos
    atm.text(s, (x0, y0), ax=ax, horizontalalignment='left')
开发者ID:jenfly,项目名称:monsoon-onset,代码行数:34,代码来源:indices.py


示例2: evaluate

  def evaluate(self):
    """Compute evaluation result.

    Returns:
      A named tuple with the following fields -
        average_precision: float numpy array of average precision for
            each class.
        mean_ap: mean average precision of all classes, float scalar
        precisions: List of precisions, each precision is a float numpy
            array
        recalls: List of recalls, each recall is a float numpy array
        corloc: numpy float array
        mean_corloc: Mean CorLoc score for each class, float scalar
    """
    if (self.num_gt_instances_per_class == 0).any():
      logging.warn(
          'The following classes have no ground truth examples: %s',
          np.squeeze(np.argwhere(self.num_gt_instances_per_class == 0)) +
          self.label_id_offset)

    if self.use_weighted_mean_ap:
      all_scores = np.array([], dtype=float)
      all_tp_fp_labels = np.array([], dtype=bool)
    for class_index in range(self.num_class):
      if self.num_gt_instances_per_class[class_index] == 0:
        continue
      if not self.scores_per_class[class_index]:
        scores = np.array([], dtype=float)
        tp_fp_labels = np.array([], dtype=float)
      else:
        scores = np.concatenate(self.scores_per_class[class_index])
        tp_fp_labels = np.concatenate(self.tp_fp_labels_per_class[class_index])
      if self.use_weighted_mean_ap:
        all_scores = np.append(all_scores, scores)
        all_tp_fp_labels = np.append(all_tp_fp_labels, tp_fp_labels)
      logging.info('Scores and tpfp per class label: %d', class_index)
      logging.info(tp_fp_labels)
      logging.info(scores)
      precision, recall = metrics.compute_precision_recall(
          scores, tp_fp_labels, self.num_gt_instances_per_class[class_index])
      self.precisions_per_class[class_index] = precision
      self.recalls_per_class[class_index] = recall
      average_precision = metrics.compute_average_precision(precision, recall)
      self.average_precision_per_class[class_index] = average_precision

    self.corloc_per_class = metrics.compute_cor_loc(
        self.num_gt_imgs_per_class,
        self.num_images_correctly_detected_per_class)

    if self.use_weighted_mean_ap:
      num_gt_instances = np.sum(self.num_gt_instances_per_class)
      precision, recall = metrics.compute_precision_recall(
          all_scores, all_tp_fp_labels, num_gt_instances)
      mean_ap = metrics.compute_average_precision(precision, recall)
    else:
      mean_ap = np.nanmean(self.average_precision_per_class)
    mean_corloc = np.nanmean(self.corloc_per_class)
    return ObjectDetectionEvalMetrics(
        self.average_precision_per_class, mean_ap, self.precisions_per_class,
        self.recalls_per_class, self.corloc_per_class, mean_corloc)
开发者ID:pcm17,项目名称:models,代码行数:60,代码来源:object_detection_evaluation.py


示例3: calc_norm_summary_tables

def calc_norm_summary_tables(accuracy_tbl, time_tbl):
    """
    Calculate normalized performance/ranking summary, as numpy
    matrices as usual for convenience, and matrices of additional
    statistics (min, max, percentiles, etc.)

    Here normalized means relative to the best which gets a 1, all
    others get the ratio resulting from dividing by the performance of
    the best.
    """
    # Min across all minimizers, i.e. for each fit problem what is the lowest chi-squared and the lowest time
    min_sum_err_sq = np.nanmin(accuracy_tbl, 1)
    min_runtime = np.nanmin(time_tbl, 1)

    # create normalised tables
    norm_acc_rankings = accuracy_tbl / min_sum_err_sq[:, None]
    norm_runtimes = time_tbl / min_runtime[:, None]

    summary_cells_acc = np.array([np.nanmin(norm_acc_rankings, 0),
                                  np.nanmax(norm_acc_rankings, 0),
                                  nanmean(norm_acc_rankings, 0),
                                  nanmedian(norm_acc_rankings, 0)
                                  ])

    summary_cells_runtime = np.array([np.nanmin(norm_runtimes, 0),
                                      np.nanmax(norm_runtimes, 0),
                                      nanmean(norm_runtimes, 0),
                                      nanmedian(norm_runtimes, 0)
                                      ])

    return norm_acc_rankings, norm_runtimes, summary_cells_acc, summary_cells_runtime
开发者ID:DanNixon,项目名称:mantid,代码行数:31,代码来源:post_processing.py


示例4: __entrofy

def __entrofy(X, k, w=None, q=None, pre_selects=None):
    '''See entrofy() for documentation'''

    n_participants, n_attributes = X.shape

    if w is None:
        w = np.ones(n_attributes)

    if q is None:
        q = 0.5 * np.ones(n_attributes)

    assert 0 < k <= n_participants
    assert not np.any(w < 0)
    assert np.all(q >= 0.0) and np.all(q <= 1.0)
    assert len(w) == n_attributes
    assert len(q) == n_attributes

    if k == n_participants:
        return np.arange(n_participants)

    # Initialization
    y = np.zeros(n_participants, dtype=bool)

    if pre_selects is None:
        # Select one at random
        pre_selects = np.random.choice(n_participants, size=1)

    y[pre_selects] = True

    # Where do we have missing data?
    Xn = np.isnan(X)

    while True:
        i = y.sum()
        if i >= k:
            break

        # Initialize the distribution vector
        p = np.nanmean(X[y], axis=0)
        p[np.isnan(p)] = 0.0

        # Compute the candidate distributions
        p_new = (p * i + X) / (i + 1.0)

        # Wherever X is nan, propagate the old p since we have no new information
        p_new[Xn] = (Xn * p)[Xn]

        # Compute marginal gain for each candidate
        delta = obj(p_new, w, q) - obj(p, w, q)

        # Knock out the points we've already taken
        delta[y] = -np.inf

        # Select the top score.  Break near-ties randomly.
        target_score = delta.max()
        target_score = target_score - 1e-3 * np.abs(target_score)
        new_idx = np.random.choice(np.flatnonzero(delta >= target_score))
        y[new_idx] = True

    return obj(np.nanmean(X[y], axis=0), w, q), np.flatnonzero(y)
开发者ID:anukat2015,项目名称:entrofy,代码行数:60,代码来源:entrofy.py


示例5: go

    def go(x, y, x_denominators=1, y_denominators=1):

        # these next too lines are wrong, but they are bug-compatible with v0.6.13 !
        x = x / np.nanmean(x_denominators)
        y = y / np.nanmean(y_denominators)

        return group_sequential(x, y, spending_function, estimated_sample_size, alpha, cap)
开发者ID:zalando,项目名称:expan,代码行数:7,代码来源:early_stopping.py


示例6: get_loss_pred

def get_loss_pred(params,gt,est):
    fest="/home/coskun/PycharmProjects/RNNPoseV2/pred/3.6m/estimation.txt"
    fgt="/home/coskun/PycharmProjects/RNNPoseV2/pred/3.6m/ground_truth.txt"
    loss=0
    loss_list=[]
    with open(fest,"a") as f_handle_est,  open(fgt,"a") as f_handle_gt:
        for b in range(len(gt)):
            diff_vec=np.abs(gt[b].reshape(params['n_output']/3,3) - est[b].reshape(params['n_output']/3,3)) #14,3
            for val in est[b]:
                f_handle_est.write("%f "%(val*1000))
            for val in gt[b]:
                f_handle_gt.write("%f "%(val*1000))
            # val=np.sqrt(np.sum(diff_vec**2,axis=1))
            #
            # for i in range(14):
            #     f=val[i]
            #     f_handle.write("%f"%(f))
            #     if(i<13):
            #         f_handle.write(";")
            f_handle_est.write('\n')
            f_handle_gt.write('\n')
            b_l=np.sqrt(np.sum(diff_vec**2,axis=1))
            loss_list.append(b_l)
            loss +=np.nanmean(np.sqrt(np.sum(diff_vec**2,axis=1)))
        loss=np.nanmean(loss)
    return (loss,loss_list)
开发者ID:Seleucia,项目名称:RNNPose,代码行数:26,代码来源:utils.py


示例7: run

	def run(self, x1, x2):
		if isinstance(x1, np.ndarray):
			x1 = np.nanmean(x1)
		if isinstance(x2, np.ndarray):
			x2 = np.nanmean(x2)

		return x1/(x1+x2)
开发者ID:gallantlab,项目名称:realtimefmri,代码行数:7,代码来源:preprocessing.py


示例8: compute

	    def compute(self, today, assets, out, close):

	    	# get returns dataset
	        returns = ((close - np.roll(close, 1, axis=0)) / np.roll(close, 1, axis=0))[1:]

	        # get index of benchmark
	        benchmark_index = np.where((assets == 8554) == True)[0][0]

	        # get returns of benchmark
	        benchmark_returns = returns[:, benchmark_index]
	        
	        # prepare X matrix (x_is - x_bar)
	        X = benchmark_returns
	        X_bar = np.nanmean(X)
	        X_vector = X - X_bar
	        X_matrix = np.tile(X_vector, (len(returns.T), 1)).T

	        # prepare Y matrix (y_is - y_bar)
	        Y_bar = np.nanmean(close, axis=0)
	        Y_bars = np.tile(Y_bar, (len(returns), 1))
	        Y_matrix = returns - Y_bars

	        # prepare variance of X
	        X_var = np.nanvar(X)

	        # multiply X matrix an Y matrix and sum (dot product)
	        # then divide by variance of X
	        # this gives the MLE of Beta
	        out[:] = (np.sum((X_matrix * Y_matrix), axis=0) / X_var) / (len(returns))
开发者ID:quantopian,项目名称:algorithm-component-library,代码行数:29,代码来源:quanta_lib.py


示例9: nanmean

def nanmean(array):
    """Return the mean of an array ignoring nans.

    Args:
        array: array of values

    Returns:
        result: np.nanmean(array)

    """
    try:
        i = 0
        unc = 0
        if np.isnan(array.v).all() or len(array.v) == 0:
            return Measurement(np.nan, np.nan)
        val = np.nanmean(array.v)
        for u in np.nditer(array.u):
            if np.isfinite(u):
                unc += u ** 2
                i += 1
        return Measurement(val, np.sqrt(unc) / i)
    except AttributeError:
        if np.isnan(array).all() or len(array) == 0:
            return np.nan
        return np.nanmean(array)
开发者ID:ZachWerginz,项目名称:Catalogue_cross_calibration,代码行数:25,代码来源:measurement.py


示例10: autocorr

def autocorr(datain,endlag):
    '''
    autocorr(datain,endlag)
    
    Input: 
         datain[0:N] is a data time series of size N
	 endlag is the number of time steps to find autocorrelation
    Output:
    	 aut[0:endlag] is the autocorrelation of datain from lag 0 to time step endlag	 
    
    Steven Cavallo
    University of Oklahoma
    July 2016
    '''
    
    N = np.size(datain)
    aut = []
    for lag in range(0,endlag):
        data1 = datain[0:N-lag]
	data1m = data1 - np.nanmean(data1)
	data2 = datain[lag:]
	data2m = data2 - np.nanmean(data2)
	aut.append(np.sum(data1m*data2m)/np.sqrt(np.sum(data1m**2.0)*np.sum(data2m**2.0)))

    return aut
开发者ID:scavallo,项目名称:python_scripts,代码行数:25,代码来源:utilities_modules.py


示例11: process_chunk

    def process_chunk(self, data):

        moment_data = numpy.log(data)
        
        moments = numpy.zeros(self.mmax - self.mmin, dtype=numpy.float32)

        mean = numpy.nanmean(moment_data)

        moment_data = moment_data - mean

        if self.mmin == 1:
            temp = numpy.ones(len(moment_data), dtype=numpy.float32)
        elif self.mmin == 2:
            temp = moment_data
        else:
            temp = numpy.pow(moment_data, self.mmin-1)

        for i in range(0, self.mmax-self.mmin):
            temp = temp * moment_data
            moments[i] = numpy.nanmean(temp)

        if self.mmin == 1:
            moments[0] = mean

        return moments
开发者ID:kpatton1,项目名称:simpdf,代码行数:25,代码来源:core.py


示例12: imputedata

def imputedata(data, strategy='mean', missing=False):
	'''
	two impute strategys
	'''
	with warnings.catch_warnings():
		warnings.simplefilter("ignore", category=RuntimeWarning)
		mean = np.nanmean(data, axis=0)
		sd = np.sqrt(np.nanmean((data - mean)**2, axis=0))
	sign = np.sign(data - mean)
	is_out = is_outliers(data, m=2.5)
	data[is_out] = np.nan
	
	if strategy == '2sd':
		# impute as +-2sd m
		# reduce the change in distribution. 
		for i in range(data.shape[1]):
			if missing:
				sign[np.isnan(sign)] = 0 #missing data will be imputed as mean
			ind_nan = np.where(np.isnan(data[:,i]))
			data[ind_nan,i] = mean[i] + (sd[i] * 2 * sign[ind_nan,i])

	if strategy == 'mean':
		#impute as mean
		for i in range(data.shape[1]):
			ind_nan = np.where(np.isnan(data[:,i]))
			if missing: #missing data will be imputed as mean
				data[ind_nan,i] = mean[i]
			else: #missing data will be left as nan
				data[ind_nan,i] = mean[i] * abs(sign[ind_nan,i])
	return data
开发者ID:htwangtw,项目名称:mindwanderinglabYork,代码行数:30,代码来源:leavel1session_MWQ.py


示例13: trim_bad_edges

 def trim_bad_edges(self, r, window_width = 128, min_snr = 5.):
     """
     Find edge regions that contain no information and trim them.
     
     Parameters
     ----------
     r : `int`
         order index
     window_width : `int`
         number of pixels to average over for local SNR            
     min_snr : `float`
         SNR threshold below which we discard the data
     """
     for n in range(self.N):
         n_pix = len(self.xs[0][n])
         for window_start in range(n_pix - window_width):
             mean_snr = np.sqrt(np.nanmean(self.ivars[r][n,window_start:window_start+window_width]))
             if mean_snr > min_snr:
                 self.ivars[r][n,:window_start] = 0. # trim everything to left of window
                 break
         for window_start in reversed(range(n_pix - window_width)):
             mean_snr = np.sqrt(np.nanmean(self.ivars[r][n,window_start:window_start+window_width]))
             if mean_snr > min_snr:
                 self.ivars[r][n,window_start+window_width:] = 0. # trim everything to right of window
                 break
开发者ID:megbedell,项目名称:wobble,代码行数:25,代码来源:data.py


示例14: main

def main():
    os.system('modprobe w1-gpio')
    os.system('modprobe w1-therm')
    print len(sys.argv)
    if len(sys.argv) == 1:
        number_of_meas = 7
    else:
        print sys.argv[1]
        number_of_meas = int(sys.argv[1])
    print "number_of_measurements = " + str(number_of_meas)
    
    print "getting device files and serials..."
    THEDICT = _get_w1_tree_and_serials()
    
    print "reading sensors " + str(number_of_meas) + " times ..."
    for step in range(int(number_of_meas)):
        for sensor_id in THEDICT:
            if sensor_id[0:2] == '28' or sensor_id[0:2] == '10':
                temp = read_sensor_ds18b20(sensor_id,THEDICT[sensor_id]["path"])
                volt = "n.a."
                THEDICT[sensor_id]["temp"].append(temp)
                THEDICT[sensor_id]["volt"].append(0.)
            if sensor_id[0:2] == '26':
                temp,volt = read_sensor_ds2438(sensor_id,THEDICT[sensor_id]["path"])
                THEDICT[sensor_id]["temp"].append(temp)
                THEDICT[sensor_id]["volt"].append(volt)
            print "step " + str(step) + " " + sensor_id + " " + str(temp) + " " + str(volt)
    
    print "calculating individual and total means:"
    MEAN_IND = {}
    for sensor_id in THEDICT:
        MEAN_IND[sensor_id] = [
                                np.nanmean(np.array(THEDICT[sensor_id]["temp"])), 
                                np.nanmean(np.array(THEDICT[sensor_id]["volt"]))
                              ]
    total_temp = []
    total_volt = []
    for sensor_id in MEAN_IND:
        if sensor_id[0:2] == '28' or sensor_id[0:2] == '10':
            total_temp.append(MEAN_IND[sensor_id][0])
        if sensor_id[0:2] == '26':
            total_volt.append(MEAN_IND[sensor_id][1])
    mean_temp = np.nanmean(np.array(total_temp))
    mean_volt = np.nanmean(np.array(total_volt))
    
    print "temp mean: " + str(mean_temp) + " +/- " + str(np.nanstd(np.array(total_temp)))
    print "volt mean: " + str(mean_volt) + " +/- " + str(np.nanstd(np.array(total_temp)))
        
    
    print "calculating offsets..."
    OFFSETS = {}
    for sensor_id in MEAN_IND:
        OFFSETS[sensor_id] = [
                               MEAN_IND[sensor_id][0] - mean_temp, 
                               MEAN_IND[sensor_id][1] - mean_volt
                             ]
    print OFFSETS
            
    print "writing offsets..."
    write_offset(OFFSETS)
开发者ID:ma-tri-x,项目名称:ESpy,代码行数:60,代码来源:CalibrateSensors.py


示例15: orient_op

def orient_op(orientations, m=4, positions=None, margin=0,
              ret_complex=True, do_err=False, globl=False, locl=False):
    """orient_op(orientations, m=4, positions=None, margin=0,
                 ret_complex=True, do_err=False, globl=False, locl=False)

       calculate the global m-fold particle orientational order parameter

                1   N    i m theta
        Phi  = --- SUM e          j
           m    N  j=1
    """
    if not (globl or locl):
        globl = True
        locl = orientations.ndim == 2
    np.mod(orientations, tau/m, orientations)
    if margin:
        if margin < ss:
            margin *= ss
        center = 0.5*(positions.max(0) + positions.min(0))
        d = helpy.dist(positions, center)   # distances to center
        orientations = orientations[d < d.max() - margin]
    phis = np.exp(m*orientations*1j)
    if locl:
        phis = np.nanmean(phis, 1)
    if do_err:
        err = np.nanstd(phis, ddof=1)/sqrt(np.count_nonzero(~np.isnan(phis)))
    if not globl:
        return (np.abs(phis), err) if do_err else np.abs(phis)
    phi = np.nanmean(phis) if ret_complex else np.abs(np.nanmean(phis))
    if locl:
        return (np.abs(phis), phi, err) if do_err else (np.abs(phis), phi)
    return (phi, err) if do_err else phi
开发者ID:leewalsh,项目名称:square-tracking,代码行数:32,代码来源:correlation.py


示例16: Avg_WS_Picks

def Avg_WS_Picks(draft_db, plot=False):
    x=draft_db[['Pk','WS/48']]
    #Find the range of picks (#1-?)
    pick_list=sorted(set(x['Pk'].tolist())) #set function gets unique elements of picks
    pick_means=[]
    for pick in pick_list:
        x1=x['WS/48'][x['Pk']==pick].tolist() #Get win shares for each pick
        pick_mean=np.nanmean(x1)
        pick_means.append(pick_mean)

    #Graph
    if plot:
        plt.scatter(pick_list,pick_means)
        #Plot with average WS for an NBA Player
        avg_WS=np.nanmean(x['WS/48'].tolist())
        plt.plot(pick_list,np.ones(len(pick_list))*avg_WS)
        plt.xlabel('Pick')
        plt.ylabel('Average WS/48')
        plt.ylim([-.5,.5])
        plt.xlim([0,200])
        plt.xticks([0,10,25,40,50,100])
        plt.savefig('Avg_WS_vs_pick')
        plt.show()
    else:
        return pd.DataFrame({
            'Pk': pick_list,
            'Avg_WS/48' : pick_means
        })
开发者ID:Sandy4321,项目名称:NBA_Draft,代码行数:28,代码来源:NBA_Draft.py


示例17: pair_angle_op

def pair_angle_op(angles, nmask=None, m=4, globl=False, locl=False):
    """calculate the pair-angle (bond angle) order parameter

    the parameter for particle i is defined as:
        psi_m_i = < exp(i m theta_ij) >
    averaged over neighbors j of particle i
    the global parameter is the mean over all particles i:
        Psi_m = < psi_m_i >

    Parameters
    angles: angles between neighboring pairs (from pair_angles)
    nmask:  neighbor mask if invalid angles are not np.nan (None)
    m:      symmetryangles will be considered modulo tau/m

    Returns
    mag:    the absolute value |psi|
    ang:    the phase of psi mod tau/m
    psims:  the local values of psi for each particle
    """
    if not (globl or locl):
        globl = locl = True
    if nmask is not None:
        angles[nmask] = np.nan
    psims = np.nanmean(np.exp(m*angles*1j), 1)
    if not globl:
        return np.abs(psims)
    psim = np.nanmean(psims)
    mag = abs(psim)
    ang = phase(psim)/m
    if locl:
        return mag, ang, psims
    return mag, ang
开发者ID:leewalsh,项目名称:square-tracking,代码行数:32,代码来源:correlation.py


示例18: average_values

def average_values(result):
	"""average the results of bootstrapping"""
	result_avg = result[0]
	
	if isinstance(result_avg,dict):
		for key in result_avg:
			val = result_avg[key]
			
			if isinstance(val,dict):
				result_avg[key] = average_values([r[key] for r in result])
			elif isinstance(val, np.float64):
				result_avg[key] = np.nanmean([r[key] for r in result], axis=0)
			elif key == 'twoway':
				for (x,y), res in np.ndenumerate(val):
					result_avg[key][x,y] = average_values([r[key][x,y] for r in result])
			elif isinstance(val, np.ndarray) and not val.dtype == np.object:
				result_avg[key] = np.nanmean([r[key] for r in result], axis=0)
			elif key != 'target':
				result_avg[key] = [r[key] for r in result]
				
				#make sure we get a cell-array back in MATLAB
				if isinstance(val, str):
					result_avg[key] = np.array(result_avg[key],dtype=np.object)
	
	return result_avg
开发者ID:bearsun,项目名称:matlab_lib,代码行数:25,代码来源:MVPAClassify_20140424.py


示例19: get_loss_bb

def get_loss_bb(gt,est):
    sf="/home/coskun/PycharmProjects/RNNPose21/daya/blanket.txt"
    batch_size=gt.shape[0]
    seq_length=gt.shape[1]
    loss=0
    loss_list=[]
    seq_list=[]
    b_seq_list=[]
    with open(sf,"a") as f_handle:
        for b in range(batch_size):
            seq_los=[0]*seq_length
            for s in range(seq_length):
                diff_vec=np.abs(gt[b][s].reshape(14,3) - est[b][s].reshape(14,3))*2 #14,3
                val=np.sqrt(np.sum(diff_vec**2,axis=1))
                for i in range(14):
                    f=val[i]
                    f_handle.write("%f"%(f))
                    if(i<13):
                        f_handle.write(";")
                f_handle.write('\n')
                b_l=np.nanmean(np.sqrt(np.sum(diff_vec**2,axis=1)))
                loss_list.append(b_l)
                seq_los[s]=b_l
                loss +=np.nanmean(np.sqrt(np.sum(diff_vec**2,axis=1)))
            b_seq_list.append(seq_los)
        seq_list=np.mean(b_seq_list,axis=0)
        loss/=(seq_length*batch_size)
    return (loss,loss_list,seq_list)
开发者ID:Seleucia,项目名称:TFTutorials,代码行数:28,代码来源:utils.py


示例20: _msd_iter

def _msd_iter(pos, lagtimes):
    with warnings.catch_warnings():
        warnings.simplefilter("ignore", category=RuntimeWarning)
        for lt in lagtimes:
            diff = pos[lt:] - pos[:-lt]
            yield np.concatenate((np.nanmean(diff, axis=0),
                                  np.nanmean(diff**2, axis=0)))
开发者ID:caspervdw,项目名称:trackpy,代码行数:7,代码来源:motion.py



注:本文中的numpy.nanmean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.nanmedian函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.nanmax函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap