• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.average函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.average函数的典型用法代码示例。如果您正苦于以下问题:Python average函数的具体用法?Python average怎么用?Python average使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了average函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: mkhist

    def mkhist(fname, xmin, xmax, delta, ihist):
        xdata = []
        if os.path.exists(fname + ".gz"):
            import gzip

            fp = gzip.open(fname + ".gz")
        else:
            fp = open(fname)
        for line in fp:
            time, x = map(float, line.strip().split()[:2])
            xdata.append(x)
        x = np.array(xdata)
        xbins = [xmin + i * delta for i in range(nbin + 1)]
        hist[ihist], edges = np.histogram(x, bins=xbins, range=(xmin, xmax))
        nb_data[ihist] = int(np.sum(hist[ihist, :]))

        print "statistics for timeseries # ", ihist
        print "minx:", "%8.3f" % np.min(x), "maxx:", "%8.3f" % np.max(x)
        print "average x", "%8.3f" % np.average(x), "rms x", "%8.3f" % np.std(x)
        print "statistics for histogram # ", ihist
        print int(np.sum(hist[ihist, :])), "points in the histogram"
        print "average x", "%8.3f" % (
            np.sum([hist[ihist, i] * (edges[i] + edges[i + 1]) / 2 for i in range(nbin)]) / np.sum(hist[ihist])
        )
        print

        var = (
            1.0
            / (nblock * (nblock - 1))
            * np.sum([np.average((x[k : (k + 1) * (len(x) / nblock)] - np.average(x)) ** 2) for k in range(nblock)])
        )
        return var
开发者ID:sunhwan,项目名称:NAMD-replica,代码行数:32,代码来源:mywham.py


示例2: tabular_td_n_online

def tabular_td_n_online(states, actions, generator_class, generator_args, n, alpha):
	gamma = 1
	rms_error = np.zeros(100)
	for i in range(100):
		values = {state: 0 for state in states}
		policies = {state: {action: 1.0/len(actions) for action in actions} for state in states}
		errors = []
		for j in range(10):
			episode_states = []
			rewards = []
			generator = generator_class(*generator_args)
			current_state = generator.state
			while True:
				action, next_state, reward = generator.step(policies, current_state)
				episode_states.append(current_state)
				rewards.append(reward)
				if next_state == None:
					break
				current_state = next_state
			# online returns
			for t, state in enumerate(episode_states):
				returns = 0
				for t_s in range(n):
					if t+t_s < len(episode_states):
						returns += gamma**t_s*rewards[t+t_s]
				if t+n < len(episode_states):
					last_episode_value = values[episode_states[t+n]]
				else:
					last_episode_value = 0
				values[state] += alpha*(returns+last_episode_value-values[state])
			errors.append(np.average([(values[state]-(state+1)/10.0+1)**2 for state in states])**0.5)
		rms_error[i] = np.average(errors)
	return np.average(rms_error)
开发者ID:jaisanliang,项目名称:Machine-Learning,代码行数:33,代码来源:chapter7.py


示例3: testEncodeUnrelatedAreas

  def testEncodeUnrelatedAreas(self):
    """
    assert unrelated areas don"t share bits
    (outside of chance collisions)
    """
    avgThreshold = 0.3

    maxThreshold = 0.12
    overlaps = overlapsForUnrelatedAreas(1499, 37, 5)
    self.assertLess(np.max(overlaps), maxThreshold)
    self.assertLess(np.average(overlaps), avgThreshold)

    maxThreshold = 0.12
    overlaps = overlapsForUnrelatedAreas(1499, 37, 10)
    self.assertLess(np.max(overlaps), maxThreshold)
    self.assertLess(np.average(overlaps), avgThreshold)

    maxThreshold = 0.17
    overlaps = overlapsForUnrelatedAreas(999, 25, 10)
    self.assertLess(np.max(overlaps), maxThreshold)
    self.assertLess(np.average(overlaps), avgThreshold)

    maxThreshold = 0.25
    overlaps = overlapsForUnrelatedAreas(499, 13, 10)
    self.assertLess(np.max(overlaps), maxThreshold)
    self.assertLess(np.average(overlaps), avgThreshold)
开发者ID:mewbak,项目名称:nupic,代码行数:26,代码来源:coordinate_test.py


示例4: average_data

def average_data(data):
    """
    Find mean and std. deviation of data returned by ``simulate``.
    """
    numnodes = data['nodes']
    its = data['its']
    its_mean = numpy.average(its)
    its_std = math.sqrt(numpy.var(its))
    dead = data['dead']
    dead_mean = 100.0*numpy.average(dead)/numnodes
    dead_std = 100.0*math.sqrt(numpy.var(dead))/numnodes
    immune = data['immune']
    immune_mean = 100.0*numpy.average(immune)/numnodes
    immune_std = 100.0*math.sqrt(numpy.var(immune))/numnodes
    max_contam = data['max_contam']
    max_contam_mean = 100.0*numpy.average(max_contam)/numnodes
    max_contam_std = 100.0*math.sqrt(numpy.var(max_contam))/numnodes
    normal = data['normal']
    normal_mean = 100.0*numpy.average(normal)/numnodes
    normal_std = 100.0*math.sqrt(numpy.var(normal))/numnodes
    return {'its': (its_mean, its_std),
            'nodes': numnodes,
            'dead': (dead_mean, dead_std),
            'immune': (immune_mean, immune_std),
            'max_contam': (max_contam_mean, max_contam_std),
            'normal': (normal_mean, normal_std)}
开发者ID:3lectrologos,项目名称:sna,代码行数:26,代码来源:diffuse.py


示例5: get_reference_pt

 def get_reference_pt(self):
     # Reference point for a compound object is the average of all
     # it's contituents reference points
     points = numpy.array([ obj.get_reference_pt() for obj in self.objects ])
     t_ = points.T
     x, y = numpy.average(t_[0]), numpy.average(t_[1])
     return (x, y)
开发者ID:Cadair,项目名称:ginga,代码行数:7,代码来源:CompoundMixin.py


示例6: assign_nearest_nbh

    def assign_nearest_nbh(self, query_doc):

        block_id, query_words, doc_words = query_doc
        query_vector = self.vectorize(query_words)
        doc_vector = self.vectorize(doc_words)
        #distance = emd(query_vector, doc_vector, self.distance_matrix)
        #return block_id, distance

        doc_indices = np.nonzero(doc_vector)[0]
        query_indices = np.nonzero(query_vector)[0]

        query_weights = [self.word_level_idf.get(q_i, 0) for q_i in query_indices]
        doc_weights = [self.word_level_idf.get(d_i, 0) for d_i in doc_indices]

        doc_centroid = np.average([self.embedding.model[self.reverse_vocab[i]] for i in doc_indices], axis=0,
                                  weights=doc_weights)
        query_centroid = np.average([self.embedding.model[self.reverse_vocab[i]] for i in query_indices], axis=0,
                                    weights=query_weights)

        # sklearn euclidean distances may not be a symmetric matrix, so taking
        # average of the two entries
        dist_arr = np.array([[(self.distance_matrix[w_i, q_j] + self.distance_matrix[q_j, w_i]) / 2
                              for w_i in doc_indices] for q_j in query_indices])

        label_assignment = np.argmin(dist_arr, axis=1)
        label_assignment = [(index, l) for index, l in enumerate(label_assignment)]

        distances = [dist_arr[(i,e)] * self.word_level_idf.get(query_indices[i], 1) for i, e in label_assignment]

        distance = (1 - self.alpha) * np.sum(distances) + \
                   self.alpha * sp.spatial.distance.cosine(doc_centroid,query_centroid)
        return block_id, distance
开发者ID:subhadeepmaji,项目名称:ml_algorithms,代码行数:32,代码来源:WordMover.py


示例7: calc_precision_recall_fmeasure

 def calc_precision_recall_fmeasure(self):
     """ Computes Precision, Recall, F-measure and Support """
     
     #  precision, recall, F-measure and support for each class for a given thresholds
     for threshold in [10, 30, 50]:
         result = precision_recall_fscore_support(self.y_true, prediction_to_binary(self.y_pred, threshold))
         self.scores['Precision ' + str(threshold) + '%'] = result[0]
         self.scores['Recall ' + str(threshold) + '%'] = result[1]
         self.scores['F-score ' + str(threshold) + '%'] = result[2]
         self.scores['Support'] = result[3]
        
     # Computes precision-recall pairs for different probability thresholds
     self.precision, self.recall, self.thresholds = precision_recall_curve(self.y_true, self.y_pred)    
     #print "precision = " + str(precision)
     #print "recall = " + str(recall)
     #print "thresholds = " +  str(thresholds)
     
     # Compute the area under the precision-recall curve (average precision from prediction scores)
     self.scores['Precision-Recall AUC'] = average_precision_score(self.y_true, self.y_pred)    
     
     
     self.scores['Weighted Precision'] = average_precision_score(self.y_true, self.y_pred, average='weighted') # weighted average precision by support (the number of true instances for each label).
     self.scores['Average Recall'] = np.average(self.recall)
     self.scores['Average Threshold'] = np.average(self.thresholds)
     
     return
开发者ID:nancyya,项目名称:Predictors,代码行数:26,代码来源:validation.py


示例8: direction_var

def direction_var(values, weights):
  import numpy
  from scitbx import matrix
  weights = numpy.array(weights)
  valx = numpy.array([x for x, y, z in values])
  valy = numpy.array([y for x, y, z in values])
  valz = numpy.array([z for x, y, z in values])

  # Calculate avergae x, y, z
  avrx = numpy.average(valx, weights=weights)
  avry = numpy.average(valy, weights=weights)
  avrz = numpy.average(valz, weights=weights)

  # Calculate mean direction vector
  s1m = matrix.col((avrx, avry, avrz)).normalize()

  # Calculate angles between vectors
  angles = []
  for s in values:
    angles.append(s1m.angle(s))

  # Calculate variance of angles
  angles = numpy.array(angles)
  var = numpy.dot(weights, (angles)**2)/numpy.sum(weights)
  return var
开发者ID:dials,项目名称:dials_scratch,代码行数:25,代码来源:calculate_divergence.py


示例9: kMeans

def kMeans(k, centres, data, error, return_cost = False):
    # centres (kx2)
    # data (Nx2)
    # error: epsilon
    m = centres[:]
    
    while(True):
        sets = [[] for i in range(k)]
        
        for point in data:
            # Calculate distance
            dist_sq = np.sum((point - m) ** 2, axis = 1)
            # Choose the nearest centre and add point into corresponding set
            sets[np.argmin(dist_sq)].append(point)
            
        temp_m = m[:]
        for i in range(len(sets)):
            if sets[i] != []:
                temp_m[i] = (np.mean(sets[i], axis = 0)) # centroid
            
        temp_m = np.array(temp_m)
        changes = temp_m - m
        m = temp_m
        
        if((changes < error).all()):
            break
    
    if(return_cost):
        costs = []
        for i in range(len(sets)):
            costs.append(np.average(np.sqrt(np.sum((m[i] - sets[i]) ** 2, axis = 1))))
        cost = np.average(costs)
        return m, cost
    else:
        return m
开发者ID:LYZhelloworld,项目名称:Courses,代码行数:35,代码来源:assignment3.py


示例10: linearRegression

def linearRegression(segmentedValues):
	print("Linear regression")
	#regression = LinearRegression()
	linRegress = dict()
	for key in segmentedValues.keys():
		x = [x[0] for x in segmentedValues[key]]
		y = [x[1] for x in segmentedValues[key]]
		mean = [float(np.average(x)),float(np.average(y))]
		valuesDict = dict()
		valuesDict['x'] = x
		valuesDict['y'] = y
		valuesFrame = pd.DataFrame(valuesDict)
		try:
			rlmRes = sm.rlm(formula = 'y ~ x', data=valuesFrame).fit()
		except ZeroDivisionError:
			#I have no idea why this occurs. A problem with statsmodel
			#Return None
			print("divide by zero :( ")
			return None
		#Caclulate r2_score (unfortunately, rlm does not give this to us)
		x = np.array(x)
		y = np.array(y)
		#Get the predicted values of Y
		y_pred = x*rlmRes.params.x+rlmRes.params.Intercept
		score = r2_score(y, y_pred)
		#These should both be positive -- put in abs anyway
		slopeConfInterval = abs(float(rlmRes.params.x) - float(rlmRes.conf_int(.005)[0].x))
		intConfInterval = abs(float(rlmRes.params.Intercept) - float(rlmRes.conf_int(.005)[0].Intercept))
		#Slope, Intercept, R^2, num of values, confidenceIntervals, mean of cluster
		linRegress[key] = [rlmRes.params.x, rlmRes.params.Intercept, score, len(x), [slopeConfInterval, intConfInterval], mean]
		print("Key: "+str(key)+" Slope: "+str(rlmRes.params.x)+" Intercept: "+str(rlmRes.params.Intercept)+"R2 Score: "+str(score)+" Num vals: "+str(len(x))+" confidence: "+str(slopeConfInterval)+", "+str(intConfInterval)+" mean: "+str(mean))
	return linRegress
开发者ID:ankitagarwal,项目名称:cscie-81_final,代码行数:32,代码来源:truckAnalysis.py


示例11: randomized_auto_const_bg

    def randomized_auto_const_bg(self, amount):
        """ Automatically determine background. Only consider a randomly
        chosen subset of the image.
        
        Parameters
        ----------
        amount : int
            Size of random sample that is considered for calculation of
            the background.
        """
        cols = [randint(0, self.shape[1] - 1) for _ in xrange(amount)]

        # pylint: disable=E1101,E1103
        data = self.astype(to_signed(self.dtype))
        # Subtract average value from every frequency channel.
        tmp = (data - np.average(self, 1).reshape(self.shape[0], 1))
        # Get standard deviation at every point of time.
        # Need to convert because otherwise this class's __getitem__
        # is used which assumes two-dimensionality.
        tmp = tmp[:, cols]
        sdevs = np.asarray(np.std(tmp, 0))

        # Get indices of values with lowest standard deviation.
        cand = sorted(xrange(amount), key=lambda y: sdevs[y])
        # Only consider the best 5 %.
        realcand = cand[:max(1, int(0.05 * len(cand)))]

        # Average the best 5 %
        bg = np.average(self[:, [cols[r] for r in realcand]], 1)

        return bg.reshape(self.shape[0], 1)
开发者ID:Waino,项目名称:sunpy,代码行数:31,代码来源:spectrogram.py


示例12: genstats

def genstats():
    # returns a list of dictionaries whereas each dictionary contains averages
    global db
    averages = [ 
        # {
            # "reporter": "",
            # "util": "",
            # "time_stddev": "",
            # "time_avg": "",
            # "vertices_avg": "",
            # "edges_avg": "",
        # }, 
    ] # lists of averages

    for (reporter, util), value in db.iteritems():
	value = {k:filter(lambda x: not (x is False or x is None), v) for k,v in value.iteritems()}

        averages.append(
            {
            "reporter": reporter,
            "util": util,
            "time_stddev" : np.std(value["time"], dtype=np.float64),
            "time_avg" : np.average(value["time"]),
            "vertices_avg" : np.average(value["vertices"]) if reporter!="none" else 0,
            "edges_avg" : np.average(value["edges"] if reporter!="none" else 0),
            "timedout_count" : sum(value["timedout"])
        })

    return averages
开发者ID:hasanatkazmi,项目名称:provenance-stats,代码行数:29,代码来源:mkstats.py


示例13: processLanes

def processLanes (lane_ids_as_string, row, junction,isIncomingLane):
    #append an empty row if there are no lanes in this junction    
    if (lane_ids_as_string==""):
        appendEmptyValuesToRow(row)
        return
    edge_prios=[]
    edge_types=[]
    lane_lengths=[]
    lane_speeds=[]    
    lane_id_list= lane_ids_as_string.split(" ")    
    for l_id in lane_id_list:
        try:            
            lane= lane_table[l_id]
            edge= lane.getparent()
            if isIncomingLane:    
                edge_types.append( edge.get("type"))
                edge_prios.append(float(edge.get("priority")))
            lane_lengths.append(float(lane.get("length")))
            lane_speeds.append(float(lane.get("speed")))
        except:
            print ("error with lane_ids: '{}', l_id:'{}' junction_id:'{}'".format(lane_ids_as_string,
                   l_id, row[0]))
            raise
        
    row.append(np.average(lane_speeds))
    row.append(np.std(lane_speeds))
    row.append(np.average(lane_lengths))
    row.append(np.std(lane_lengths))
    if isIncomingLane:        
        row.append(edge_types)
        row.append(np.average(edge_prios))
    else:
        row.append(None)
        row.append(-1)
    row.append(len(lane_id_list))
开发者ID:danielpaulus,项目名称:udacity,代码行数:35,代码来源:dataset-import.py


示例14: AverageBar

def AverageBar(indir='/Volumes/Documents/colbrydi/Documents/DirksWork/chamview/ChamB/'):
    tot = 0.0;
    R = np.array([0,0,0]);
    G = np.array([0,0,0]);
    B = np.array([0,0,0]);
    for root, dirs, filenames in os.walk(indir):
        filenames.sort()
        for f in filenames:
            if fnmatch.fnmatch(f,'0*.jpeg'):
                im = readim(os.path.join(root,f))
                sz = im.shape[0]
                #print(im.shape)
                r = np.zeros((sz,1))
                g = np.zeros((sz,1))
                b = np.zeros((sz,1))
                r[:,0] = np.average(im[:,:,0],1)
                g[:,0] = np.average(im[:,:,1],1)
                b[:,0] = np.average(im[:,:,2],1)
                if tot==0:
                    R = r
                    G = g
                    B = b
                else:
                    R = np.append(R, r, axis=1)
                    G = np.append(G, g, axis=1)
                    B = np.append(B, b, axis=1)
                tot=tot+1
    if tot==0:
        print('ERROR - No files found in '+indir)
        return '' 
    im3 = np.zeros((R.shape[0],R.shape[1], 3))
    im3[:,:,0] = R
    im3[:,:,1] = G
    im3[:,:,2] = B 
    return im3
开发者ID:colbrydi,项目名称:VideoBar,代码行数:35,代码来源:AverageBar.py


示例15: tabular_td_lambda_offline

def tabular_td_lambda_offline(states, actions, generator_class, generator_args, l, alpha):
	gamma = 1
	rms_error = np.zeros(100)
	for i in range(100):
		values = {state: 0 for state in states}
		policies = {state: {action: 1.0/len(actions) for action in actions} for state in states}
		errors = []
		for j in range(10):
			episode_states = []
			rewards = []
			generator = generator_class(*generator_args)
			current_state = generator.state
			while True:
				action, next_state, reward = generator.step(policies, current_state)
				episode_states.append(current_state)
				rewards.append(reward)
				if next_state == None:
					break
				current_state = next_state
			# offline returns
			new_values = {state: values[state] for state in states}
			z = {state: 0 for state in states}
			for t, state in enumerate(episode_states):
				z[state] += 1
				if t < len(episode_states) - 1:
					delta = rewards[t]+gamma*values[episode_states[t+1]]-values[state]
				else:
					delta = rewards[t]-values[state]
				for state in states:
					new_values[state] += alpha*delta*z[state]
					z[state] *= (gamma*l)
			values = new_values
			errors.append(np.average([(values[state]-(state+1)/10.0+1)**2 for state in states])**0.5)
		rms_error[i] = np.average(errors)
	return np.average(rms_error)
开发者ID:jaisanliang,项目名称:Machine-Learning,代码行数:35,代码来源:chapter7.py


示例16: Bplot

def Bplot(data,label,ylabel,trueVal):
    fig = plt.figure(dpi=600)
    ax = plt.subplot(111)
    bp = plt.boxplot(data, notch=0, sym='o', vert=1, whis=1.5,patch_artist=True)
    plt.setp(bp['boxes'], color='black',linewidth=1.5,facecolor='darkkhaki')
    plt.setp(bp['whiskers'], color='black',linewidth=1.5)
    plt.setp(bp['caps'], color='black',linewidth=1.5)
    plt.setp(bp['medians'], color='darkgreen',linewidth=1.5)
    plt.setp(bp['fliers'], color='grey', marker='o')
    ax.axhline(y=trueVal,xmin=0,xmax=1,c="r",linewidth=2.0,zorder=0,linestyle='--')

    ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',alpha=0.8)
    ax.set_axisbelow(True)
    ax.set_ylabel(ylabel,fontsize = 24)
#     ax.set_xlabel(r'Variability',fontsize = 24)
    for tick in ax.yaxis.get_major_ticks():
        tick.label.set_fontsize(18)
    ax.set_xticklabels(label,fontsize=18)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()
       
    for i in range(len(data)):
        med = bp['medians'][i]
        plt.plot([np.average(med.get_xdata())], [np.average(data[i])],color='r', marker='*', markeredgecolor='k',markersize=10,label="Mean")
    
    fig.tight_layout()
    savingTitle = ylabel.translate(None,'${}')
    fig.savefig(''.join(['Plots/',funcFolder,'/Boxplots/%s.eps'])%(savingTitle),format='eps')
开发者ID:tmawyin,项目名称:SensitivityMaterials,代码行数:28,代码来源:MetalPlots.py


示例17: main

def main():
    train = pd.DataFrame.from_csv('train.csv')
    places_index = train['place_id'].values

    places_loc_sqr_wei = []
    for i, place_id in enumerate(train['place_id'].unique()):
        if not i % 100:
            print(i)
        place_df = train.iloc[places_index == place_id]
        place_weights_acc_sqred = 1 / (place_df['accuracy'].values ** 2)

        places_loc_sqr_wei.append([place_id,
                                   np.average(place_df['x'].values, weights=place_weights_acc_sqred),
                                   np.std(place_df['x'].values),
                                   np.average(place_df['y'].values, weights=place_weights_acc_sqred),
                                   np.std(place_df['y'].values),
                                   np.average(np.log(place_df['accuracy'].values)),
                                   np.std(np.log(place_df['accuracy'].values)),
                                   place_df.shape[0]])

        # print(places_loc_sqr_wei[-1])
        # plt.hist2d(place_df['x'].values, place_df['y'].values, bins=100)
        # plt.show()
        plt.hist(np.log(place_df['accuracy'].values), bins=20)
        plt.show()
    places_loc_sqr_wei = np.array(places_loc_sqr_wei)
    column_names = ['x_mean', 'x_sd', 'y_mean', 'y_sd', 'accuracy_mean', 'accuracy_sd', 'n_persons']
    places_loc_sqr_wei = pd.DataFrame(data=places_loc_sqr_wei[:, 1:], index=places_loc_sqr_wei[:, 0],
                                      columns=column_names)

    now = str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M"))
    places_loc_sqr_wei.to_csv('places_loc_sqr_weights_%s.csv' % now)
开发者ID:yairbeer,项目名称:kaggle_fb5,代码行数:32,代码来源:places_calculation_v2.py


示例18: read_midi_oifits

def read_midi_oifits(f,lam,dlam,phot=False):
	hdu=fits.open(f)
	w=hdu[3].data
	ww=w["EFF_WAVE"]
	ix=(ww>lam-dlam)&(ww<lam+dlam)

	v=hdu[4].data

	if phot:
		vv=v["CFLUX"]
		vv/=phot
		vv_noise=v["CFLUXERR"]
		vv_noise/=phot
	else:
		vv = v["VISAMP"]
		vv_noise = v["VISAMPERR"]

	vis = np.average(vv[:,ix],axis=1)
	## average noise and divide by sqrt(n) for sample average
	vis_noise = np.average(vv_noise[:,ix],axis=1)/np.sqrt(np.sum(ix))
	
	u=v["UCOORD"]
	v=v["VCOORD"]
	bl=np.sqrt(u**2+v**2)
	pa=np.rad2deg(np.arctan(u/v))
	return(bl,pa,u,v,vis,vis_noise)
开发者ID:astroleo,项目名称:img2vis,代码行数:26,代码来源:img2vis.py


示例19: _fitter_worker

    def _fitter_worker(self, tasks, coords, subset_coords, masses, subset_masses, rmsdmat, pbar_counter):
        '''
        Fitter RMSD Matrix calculator. See encore.confdistmatrix.RMSDMatrixGenerator._fitter_worker for details.
        '''

        if subset_coords == None:
            for i,j in trm_indeces(tasks[0],tasks[1]):
                coords[i] -= average(coords[i], axis=0, weights=masses)
                coords[j] -= average(coords[j], axis=0, weights=masses)
                weights = asarray(masses)/mean(masses)
                rmsdmat[(i+1)*i/2+j] = - rmsd(coords[i],coords[j],weights=weights)
                pbar_counter.value += 1
        else:
            for i,j in trm_indeces(tasks[0],tasks[1]):
                #masses = asarray(masses)/mean(masses)
                summasses = sum(masses)
                com_i = average(subset_coords[i], axis=0, weights=subset_masses)
                translated_i = coords[i] - com_i
                subset1_coords = subset_coords[i] - com_i
                com_j = average(subset_coords[j], axis=0, weights=subset_masses)
                translated_j = coords[j] - com_j
                subset2_coords = subset_coords[j] - com_j
                rotamat = rotation_matrix(subset1_coords, subset2_coords, subset_masses)[0]
                rotated_i = transpose(dot(rotamat, transpose(translated_i)))
                rmsdmat[(i+1)*i/2+j] = MinusRMSD(rotated_i.astype(float64), translated_j.astype(float64), coords[j].shape[0], masses, summasses)   
                pbar_counter.value += 1
开发者ID:encore-similarity,项目名称:mdanalysis,代码行数:26,代码来源:confdistmatrix.py


示例20: estimate

    def estimate(self):
        """ returns mean and variance """
        pos = self.particles[:, 0:2]
        mu = np.average(pos, weights=self.weights, axis=0)
        var = np.average((pos - mu)**2, weights=self.weights, axis=0)

        return mu, var
开发者ID:GeorgeMcIntire,项目名称:Kalman-and-Bayesian-Filters-in-Python,代码行数:7,代码来源:pf_internal.py



注:本文中的numpy.average函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.bartlett函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.atleast_3d函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap