• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python numpy.apply_along_axis函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中numpy.apply_along_axis函数的典型用法代码示例。如果您正苦于以下问题:Python apply_along_axis函数的具体用法?Python apply_along_axis怎么用?Python apply_along_axis使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了apply_along_axis函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: handle_message

def handle_message(message):
	cols = list(message['data'].keys())
	
	x = message['data']

	df_cos = df_sub[cols].append(x, ignore_index = True)


	X = df_cos.values

	user_array = X[-1]
	hood_matrix = X[:-1]

	max_array = hood_matrix.max(axis = 1)
	min_array = hood_matrix.min(axis = 1)

	def translate(user_value, col_min, col_max):
		NewRange = col_max - col_min
		return (((user_value - (-1)) * NewRange) / 2) + col_min	

	user_array = [translate(x,y,z) for x,y,z in zip(user_array,min_array, max_array)]

	if len(cols) == 1:
		cs_array = np.apply_along_axis(lambda x: abs(x[0] - user_array[0]), 1, hood_matrix)
	else:
		cs_array = np.apply_along_axis(lambda x: euclidean(x, user_array), 1, hood_matrix)

	print cs_array
	max_val, min_val = max(cs_array), min(cs_array)
	color_values = np.linspace(min_val, max_val, 10)

	map_data = dict(zip(ids, cs_array))
	emit('new clusters',  map_data, color_values.tolist())
开发者ID:easadler,项目名称:hood_app,代码行数:33,代码来源:routes.py


示例2: predict_log_proba

 def predict_log_proba(self, XB=None, XN=None ):
     if XB is not None and XN is not None:
         return np.array([self.predicao_log_prob(XB[i], XN[i]) for i in range(XB.shape[0])])
     elif XB is not None:
         return np.apply_along_axis(self.pred_log_prob_Bernoulli, 1,XB )
     else:
         return np.apply_along_axis(self.pred_log_prob_Normal, 1, XN)
开发者ID:TeoCalvo,项目名称:MyPython,代码行数:7,代码来源:webayes.py


示例3: cont_c

    def cont_c(self, percent=0.9, N=None):  # bug? check axis number 0 vs 1 here
        """Return the contribution of each column."""

        if not hasattr(self, 'G'):
            self.fs_c(N=self.rank)  # generate G
        return np.apply_along_axis(lambda _: _/self.L[:N], 1,
                np.apply_along_axis(lambda _: _*self.c, 0, self.G[:, :N]**2))
开发者ID:WojciechMigda,项目名称:mca,代码行数:7,代码来源:mca.py


示例4: getMostInformativeInstances

	def getMostInformativeInstances(self,classifier,unlabelledSet,vec):
		sorted_collated=[]
		if self.__uncertaintyMeasure=="randomSampling":
			# random sampling 
			if len(unlabelledSet)>=250:
				return random.sample(zip(unlabelledSet,unlabelledSet),self.__numberEachIteration)
			else:
				return zip(unlabelledSet,unlabelledSet)
		if self.__learnerType=="LogisticRegression":
			unlabelledSet_fitted = vec.transform(unlabelledSet)
			probs = classifier.predict_proba(unlabelledSet_fitted) 
			uncertainties = np.apply_along_axis( self.determineUncertainty, axis=1, arr=probs )

			collated = zip(unlabelledSet,uncertainties)
			if self.__uncertaintyMeasure=="entropy" or self.__uncertaintyMeasure=="leastConfident":
				sorted_collated = sorted(collated, key=lambda tup: tup[1], reverse=True)
			elif self.__uncertaintyMeasure=="smallestMargin":
				sorted_collated = sorted(collated, key=lambda tup: tup[1])
			return sorted_collated[:self.__numberEachIteration]
		elif self.__uncertaintyMeasure=="hyperplane":
			unlabelledSet_fitted = vec.transform(unlabelledSet)
			distances = classifier.decision_function(unlabelledSet_fitted) 
			uncertainties = np.apply_along_axis( self.determineUncertainty, axis=1, arr=distances )
			collated = zip(unlabelledSet,uncertainties)
			sorted_collated = sorted(collated, key=lambda tup: tup[1])
			return sorted_collated[:self.__numberEachIteration]
开发者ID:jleu1656,项目名称:ENTX,代码行数:26,代码来源:ActiveLearner.py


示例5: build_seq_block

def build_seq_block(sub_num, stims, sub_A_sd, sub_B_sd, block_size):
    # block stimulus list and shuffle within each block
    q = len(stims.index)
    stims = [stims.iloc[:q//2,], stims.iloc[q//2:,]]
    stims = [x.reindex(np.random.permutation(x.index)) for x in stims]
    shuffle(stims)
    stims = [[x.iloc[k:(k+block_size),] for k in range(0, q//2, block_size)] for x in stims]
    stims = pd.concat([val for pair in zip(stims[0], stims[1]) for val in pair])

    # inter-stimulus interval is randomly selected from [1,2,3,4]
    # the first ISI is removed (so sequence begins with a stim presentation)
    ISI = np.delete(np.repeat(2, len(stims.index), axis=0), 0)

    # create matrix of stimulus predictors and add ISIs
    X = np.diag(stims['effect'])
    X = np.apply_along_axis(func1d=insert_ISI, axis=0, arr=X, ISI=ISI)

    # reorder the columns so they are in the same order (0-39) for everyone
    X = X[:,[list(stims['stim']).index([i]) for i in range(len(stims.index))]]

    # now convolve all predictors with double gamma HRF
    X = np.apply_along_axis(func1d=np.convolve, axis=0, arr=X, v=spm_hrf(1))

    # build and return this subject's dataframe
    df = pd.DataFrame(X)
    df['time'] = range(len(df.index))
    df['sub_num'] = sub_num
    # df['sub_intercept'] = np.asscalar(np.random.normal(size=1))
    df['sub_A'] = np.asscalar(np.random.normal(size=1, scale=sub_A_sd))
    df['sub_B'] = np.asscalar(np.random.normal(size=1, scale=sub_B_sd))
    return df
开发者ID:tyarkoni,项目名称:nipymc,代码行数:31,代码来源:xsim.py


示例6: test_reproject

 def test_reproject(self):
     s = self.optgp.sample(10, fluxes=False).as_matrix()
     proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
     assert all(self.optgp.validate(proj) == "v")
     s = numpy.random.rand(10, self.optgp.warmup.shape[1])
     proj = numpy.apply_along_axis(self.optgp._reproject, 1, s)
     assert all(self.optgp.validate(proj) == "v")
开发者ID:jlerman44,项目名称:cobrapy_main,代码行数:7,代码来源:test_flux_analysis.py


示例7: close_gripper

    def close_gripper(self, lr, step_viewer=1, max_vel=.02, close_dist_thresh=0.004, grab_dist_thresh=0.005):
        print 'CLOSING GRIPPER'
        # generate gripper finger trajectory
        joint_ind = self.robot.GetJoint("%s_gripper_l_finger_joint" % lr).GetDOFIndex()
        start_val = self.robot.GetDOFValues([joint_ind])[0]
        print 'start_val: ', start_val
        # execute gripper finger trajectory
        dyn_bt_objs = [bt_obj for sim_obj in self.dyn_sim_objs for bt_obj in sim_obj.get_bullet_objects()]
        next_val = start_val
        while next_val:
            flr2finger_pts_grid = self._get_finger_pts_grid(lr)
            ray_froms, ray_tos = flr2finger_pts_grid['l'], flr2finger_pts_grid['r']

            # stop closing if any ray hits a dynamic object within a distance of close_dist_thresh from both sides
            next_vel = max_vel
            for bt_obj in dyn_bt_objs:
                from_to_ray_collisions = self.bt_env.RayTest(ray_froms, ray_tos, bt_obj)
                to_from_ray_collisions = self.bt_env.RayTest(ray_tos, ray_froms, bt_obj)
                rays_dists = np.inf * np.ones((len(ray_froms), 2))
                for rc in from_to_ray_collisions:
                    ray_id = np.argmin(np.apply_along_axis(np.linalg.norm, 1, ray_froms - rc.rayFrom))
                    rays_dists[ray_id, 0] = np.linalg.norm(rc.pt - rc.rayFrom)
                for rc in to_from_ray_collisions:
                    ray_id = np.argmin(np.apply_along_axis(np.linalg.norm, 1, ray_tos - rc.rayFrom))
                    rays_dists[ray_id, 1] = np.linalg.norm(rc.pt - rc.rayFrom)
                colliding_rays_inds = np.logical_and(rays_dists[:, 0] != np.inf, rays_dists[:, 1] != np.inf)
                if np.any(colliding_rays_inds):
                    rays_dists = rays_dists[colliding_rays_inds, :]
                    if np.any(np.logical_and(rays_dists[:, 0] < close_dist_thresh,
                                             rays_dists[:, 1] < close_dist_thresh)):
                        next_vel = 0
                    else:
                        next_vel = np.minimum(next_vel, np.min(rays_dists.sum(axis=1)))
            if next_vel == 0:
                break
            next_val = np.maximum(next_val - next_vel, 0)

            self.robot.SetDOFValues([next_val], [joint_ind])
            self.step()
            if self.viewer and step_viewer:
                self.viewer.Step()
        handles = []
        # add constraints at the points where a ray hits a dynamic link within a distance of grab_dist_thresh
        for bt_obj in dyn_bt_objs:
            from_to_ray_collisions = self.bt_env.RayTest(ray_froms, ray_tos, bt_obj)
            to_from_ray_collisions = self.bt_env.RayTest(ray_tos, ray_froms, bt_obj)
            
            for i in range(ray_froms.shape[0]):
                self.viewer.Step()
            ray_collisions = [rc for rcs in [from_to_ray_collisions, to_from_ray_collisions] for rc in rcs]

            for rc in ray_collisions:
                if rc.link == bt_obj.GetKinBody().GetLink('rope_59'):
                    self.viewer.Step()
                if np.linalg.norm(rc.pt - rc.rayFrom) < grab_dist_thresh:
                    link_tf = rc.link.GetTransform()
                    link_tf[:3, 3] = rc.pt
                    self._add_constraints(lr, rc.link, link_tf)
        if self.viewer and step_viewer:
            self.viewer.Step()
开发者ID:antingshen,项目名称:lfd,代码行数:60,代码来源:simulation.py


示例8: center_and_norm_table

    def center_and_norm_table(self,table,col_mean=None, col_norm=None,
                    row_mean=None, row_norm=None, table_norm=None):
        """
        Using the norming parameters set in the constructor preprocess each
        subtable.

        Parameters
        ----------
        table       : any subtable
        col_mean    : An optional row vector of column means
        col_norm    : An optional row vector of row norms
        row_mean    : an optional column vector of row means
        row_norm    : an optional column vector of row norms
        table_norm  : optional value to divide entire table by for normalization
        """
        if table.shape[0] == 0:
            return (table,)
        t = table.samples
        # first columns
        if self._col_center:
            if col_mean is not None:
                pass
            else:
                col_mean = np.mean(t, 0)
            t = t - col_mean
        if self._col_norm:
            if col_norm is not None:
                pass
            elif self._col_norm=='l2':
                col_norm = np.apply_along_axis(np.linalg.norm,0,t)
            elif self._col_norm=='std':
                col_norm = np.apply_along_axis(np.std,0,t)
            t = t / col_norm
        # then rows
        if self._row_center:
            if row_mean is not None:
                pass
            else:
                row_mean = np.mean(t.T, 0)
            t = (t.T - row_mean).T
        if self._row_norm:
            if row_norm is not None:
                pass
            elif self._row_norm=='l2':
                row_norm = np.apply_along_axis(np.linalg.norm,0,t.T)
            elif self._row_norm=='std':
                row_norm = np.apply_along_axis(np.std,0,t.T)
            t = (t.T / row_norm).T

        # whole table norm
        if self._table_norm:
            if table_norm is not None:
                pass
            elif self._table_norm == 'l2':
                table_norm = np.linalg.norm(t)
            elif self._table_norm == 'std':
                table_norm = np.std(t)
            t = t / table_norm
        table.samples = t
        return table, col_mean, col_norm, row_mean, row_norm, table_norm
开发者ID:mfalkiewicz,项目名称:PyMVPA,代码行数:60,代码来源:statis.py


示例9: testLBP

def testLBP (format, formatMask, path, output) :
    dataset = pd.read_csv(path)
    idxCls = dataset['idx']
   # cnts = dataset['Cnt']
    fnList = dataset['path']
  #  out = open(output, 'w')
    lbps = list(map(lambda x: local_binary_pattern(cv2.bitwise_and(imread(format.format(x)),imread(formatMask.format(x))), lbpP, lbpR, lbpMethod), fnList))
    histograms = list(map(lambda x:  np.histogram(x, bins=range(int(np.max(lbps)) + 1))[0], lbps))
    distances = prw.pairwise_distances(histograms, metric='l1')
    np.fill_diagonal(distances, math.inf)
    guessedClasses = np.apply_along_axis(lambda x: np.argmin(x), 1, distances)
    scores = np.apply_along_axis(lambda x: np.min(x), 1, distances)
    correct = list(map(lambda i: idxCls[guessedClasses[i]] == idxCls[i], range(0, np.alen(idxCls))))
   # out.write(str(np.average(correct)))
  #  fpr, tpr, thresholds = roc_curve(correct, scores, pos_label=1)
  #  pyplot.plot(tpr, fpr)
   # pyplot.show()
    with open(output + 'lbp_distances.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerows(distances)

    with open(output + 'lbp_guessedClasses.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(guessedClasses)

    with open(output + 'lbp_correct.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(correct)

    with open(output + 'lbp_real.csv', 'w', newline='') as fp:
        a = csv.writer(fp, delimiter=',')
        a.writerow(idxCls)
开发者ID:mariaTatarintseva,项目名称:Diploma,代码行数:32,代码来源:LBPSpeedUp.py


示例10: observe_cloud

def observe_cloud(pts, radius, upsample=0, upsample_rad=1):
    """
    If upsample > 0, the number of points along the rope's backbone is resampled to be upsample points
    If upsample_rad > 1, the number of points perpendicular to the backbone points is resampled to be upsample_rad points, around the rope's cross-section
    The total number of points is then: (upsample if upsample > 0 else len(self.rope.GetControlPoints())) * upsample_rad
    """
    if upsample > 0:
        lengths = np.r_[0, np.apply_along_axis(np.linalg.norm, 1, np.diff(pts, axis=0))]
        summed_lengths = np.cumsum(lengths)
        assert len(lengths) == len(pts)
        pts = math_utils.interp2d(np.linspace(0, summed_lengths[-1], upsample), summed_lengths, pts)
    if upsample_rad > 1:
        # add points perpendicular to the points in pts around the rope's cross-section
        vs = np.diff(pts, axis=0) # vectors between the current and next points
        vs /= np.apply_along_axis(np.linalg.norm, 1, vs)[:,None]
        perp_vs = np.c_[-vs[:,1], vs[:,0], np.zeros(vs.shape[0])] # perpendicular vectors between the current and next points in the xy-plane
        perp_vs /= np.apply_along_axis(np.linalg.norm, 1, perp_vs)[:,None]
        vs = np.r_[vs, vs[-1,:][None,:]] # define the vector of the last point to be the same as the second to last one
        perp_vs = np.r_[perp_vs, perp_vs[-1,:][None,:]] # define the perpendicular vector of the last point to be the same as the second to last one
        perp_pts = []
        from openravepy import matrixFromAxisAngle
        for theta in np.linspace(0, 2*np.pi, upsample_rad, endpoint=False): # uniformly around the cross-section circumference
            for (center, rot_axis, perp_v) in zip(pts, vs, perp_vs):
                rot = matrixFromAxisAngle(rot_axis, theta)[:3,:3]
                perp_pts.append(center + rot.T.dot(radius * perp_v))
        pts = np.array(perp_pts)
    return pts
开发者ID:amoliu,项目名称:lfd,代码行数:27,代码来源:ropesim.py


示例11: plot

    def plot(self, filedir=None, file_format='pdf'):
        if filedir is None:
            filedir = self.workdir
        import matplotlib.pyplot as plt
        plt.switch_backend('agg')

        plt.figure(figsize=(8, 6))
        plt.subplots_adjust(left=0.1, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
        forces = np.array(self.output['forces'])
        maxforce = [np.max(np.apply_along_axis(np.linalg.norm, 1, x)) for x in forces]
        avgforce = [np.mean(np.apply_along_axis(np.linalg.norm, 1, x)) for x in forces]

        if np.max(maxforce) > 0.0 and np.max(avgforce) > 0.0:
            plt.semilogy(maxforce, 'b.-', label='Max force')
            plt.semilogy(avgforce, 'r.-', label='Mean force')
        else:
            plt.plot(maxforce, 'b.-', label='Max force')
            plt.plot(avgforce, 'r.-', label='Mean force')
        plt.xlabel('Ion movement iteration')
        plt.ylabel('Max Force')
        plt.savefig(filedir + os.sep + 'forces.' + file_format)
        plt.clf()

        plt.figure(figsize=(8, 6))
        plt.subplots_adjust(left=0.1, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
        stress = np.array(self.output['stress'])
        diag_stress = [np.trace(np.abs(x)) for x in stress]
        offdiag_stress = [np.sum(np.abs(np.triu(x, 1).flatten())) for x in stress]
        plt.semilogy(diag_stress, 'b.-', label='diagonal')
        plt.semilogy(offdiag_stress, 'r.-', label='off-diagonal')
        plt.legend()
        plt.xlabel('Ion movement iteration')
        plt.ylabel(r'$\sum |stress|$ (diag, off-diag)')
        plt.savefig(filedir + os.sep + 'stress.' + file_format)
开发者ID:apayne9,项目名称:PyChemia,代码行数:34,代码来源:relax.py


示例12: LS_intersection

    def LS_intersection(self):
        """
        self.line_array represents a system of 2d line equations. Each row represents a different
        observation of a line in map frame on which the pinger lies. Row structure: [x1, y1, x2, y2]
        Calculates the point in the plane with the least cummulative distance to every line
        in self.line_array. For more information, see:
        https://en.wikipedia.org/wiki/Line-line_intersection#In_two_dimensions_2
        """

        def line_segment_norm(line_end_pts):
            assert len(line_end_pts) == 4
            return npl.norm(line_end_pts[2:] - line_end_pts[:2])

        begin_pts = self.line_array[:, :2]
        diffs = self.line_array[:, 2:4] - begin_pts
        norms = np.apply_along_axis(line_segment_norm, 1, self.line_array).reshape(diffs.shape[0], 1)
        rot_left_90 = np.array([[0, -1], [1, 0]])
        perp_unit_vecs = np.apply_along_axis(lambda unit_diffs: rot_left_90.dot(unit_diffs), 1, diffs / norms)
        A_sum = np.zeros((2, 2))
        Ap_sum = np.zeros((2, 1))

        for x, y in zip(begin_pts, perp_unit_vecs):
            begin = x.reshape(2, 1)
            perp_vec = y.reshape(2, 1)
            A = perp_vec.dot(perp_vec.T)
            Ap = A.dot(begin)
            A_sum += A
            Ap_sum += Ap

        res = npl.inv(A_sum).dot(Ap_sum)
        self.pinger_position = Point(res[0], res[1], 0)
        return self.pinger_position
开发者ID:uf-mil,项目名称:Navigator,代码行数:32,代码来源:pinger_finder.py


示例13: standardize_design

def standardize_design(G, mean_var=None):
    if mean_var is None:
        mean_var = (0., 1./G.shape[1])
        np.apply_along_axis(lambda x: _change_sample_stats(x, (0., 1.)), 0, G)
    else:
        G -= mean_var[0]
        G /= np.sqrt(mean_var[1])
开发者ID:PMBio,项目名称:limix,代码行数:7,代码来源:sampler.py


示例14: multiStepMC

def multiStepMC(z, price_evolution, anti = False
                , tracker = lambda S_ts : S_ts):
    '''
    multi-step-mc:
    ***NOTE THE STEPS IS DETERMINED BY THE DIMENSION OF Z (which is a np.array)***

    assume equally spaced time steps


    price_evolution: a function that takes an 1d array of Z slice and
                     returns 1d array (+1 size to include s0) of the evlotion
                     of underlyings which based on the Zs

    tracker: a function (takes an array of evolution of underlyings)
            that keep track of features of the evolution of the
            stock price, which could be max/min, or whether a boundary is hitted
    '''

    if anti:
        z = -z

    ## generate the evolution of underlyings for all pathes
    evolutions_ = np.apply_along_axis(price_evolution, 1, z)

    return evolutions_[:,-1], np.apply_along_axis(tracker, 1, evolutions_)
开发者ID:FranktheTank123,项目名称:MFE230I-Library,代码行数:25,代码来源:Monte_Carlo_Fixed_Income.py


示例15: warn_other_module

 def warn_other_module():
     # Apply along axis is implemented in python; stacklevel=2 means
     # we end up inside its module, not ours.
     def warn(arr):
         warnings.warn("Some warning", stacklevel=2)
         return arr
     np.apply_along_axis(warn, 0, [0])
开发者ID:nolta,项目名称:numpy,代码行数:7,代码来源:test_utils.py


示例16: preprocess

def preprocess(y, fs, flength, fshift, fnum):
	u"""音響信号の前処理(フレーム化・窓掛け・FFT)をする
    y: 音響信号
    fs: サンプリング周波数
    flength: フレーム長
    fshift: フレームシフト
    fnum: フレーム数
    """
	X = np.zeros([fnum, flength])    # フレーム化後のデータを格納する
	binSize = int(math.floor(fs / X.shape[1]))
	freqBin = np.arange(0, fs, binSize)

	start = 0       # 切り取り開始点
	end = flength - 1   # 切り取り終了点
	for t in range(fnum):   # フレーム化する
		X[t, 0:(flength - 1)] = y[start:end]
		start += fshift
		end += fshift

	""" 関数ham """
	W = np.apply_along_axis(ham, 1, X)          # 窓を掛ける

	S = np.apply_along_axis(np.fft.fft, 1, W)   # FFTする
	spe = np.absolute(S)        # 振幅スペクトル
	angdft = np.angle(S)        # 位相角
	specSum = np.apply_along_axis(np.sum, 1, spe)
	specRate = spe / specSum.reshape((fnum, 1)) * 100   # 振幅比率

	return [S, spe, specRate, angdft, freqBin]
开发者ID:matsutomo,项目名称:python_program,代码行数:29,代码来源:music.py


示例17: get_taxa_coords

def get_taxa_coords(tax_counts, sample_coords):
    """Returns the PCoA coords of each taxon based on the coords of the samples."""
    # normalize taxa counts along each row/sample (i.e. to get relative abundance)
    tax_counts = apply_along_axis(lambda x: x / float(sum(x)), 0, tax_counts)
    # normalize taxa counts along each column/taxa (i.e. to make PCoA score contributions sum to 1)
    tax_ratios = apply_along_axis(lambda x: x / float(sum(x)), 1, tax_counts)
    return dot(tax_ratios, sample_coords)
开发者ID:kartoffelpuffer,项目名称:qiime,代码行数:7,代码来源:biplots.py


示例18: get_divergence_diversity_sliding

def get_divergence_diversity_sliding(aft, block_length, VERBOSE=0):
    '''Get local divergence and diversity in a sliding window'''
    cons_ind = Patient.get_initial_consensus_noinsertions(aft, return_ind=True)
    ind_N = cons_ind == 5
    cons_ind[ind_N] = 0
    aft_nonanc = 1.0 - aft[:, cons_ind, np.arange(aft.shape[2])]
    aft_nonanc[:, ind_N] = 0

    aft_var = (aft * (1 - aft)).sum(axis=1)

    struct = np.ones(block_length)

    dg = np.ma.array(np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                                         axis=1, arr=aft_nonanc), hard_mask=True)
    ds = np.ma.array(np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                                         axis=1, arr=aft_var), hard_mask=True)

    # NOTE: normalization happens based on actual coverage
    norm = np.apply_along_axis(lambda x: np.convolve(x, struct, mode='valid'),
                               axis=1, arr=(-aft[:, 0].mask))

    dg.mask = norm < block_length
    dg /= norm

    ds.mask = norm < block_length
    ds /= norm

    x = np.arange(dg.shape[1]) + (block_length - 1) / 2.0

    return (x, dg, ds)
开发者ID:iosonofabio,项目名称:hivwholeseq,代码行数:30,代码来源:store_divergence_diversity_local.py


示例19: calc_latency_by_stim

def calc_latency_by_stim(rast, stimparams):
	
	stim_psth, _ = Spikes.calc_psth_by_stim(rast, stimparams, bins = np.arange(0, 0.334, 0.001))
	stim_psth_smoo = np.apply_along_axis(myconv, 2, stim_psth)
	stim_peak_times = np.apply_along_axis(np.argmax, 2, stim_psth_smoo)
	
	return stim_peak_times
开发者ID:r-b-g-b,项目名称:Lab,代码行数:7,代码来源:RF.py


示例20: _spearmanr2

def _spearmanr2(a, b, axis=0):
    """
    Compute all pairwise spearman rank moment correlations between rows
    or columns of a and b

    Parameters
    ----------
    a : (N, M) numpy.ndarray
        The input cases a.
    b : (J, K) numpy.ndarray
        The input cases b. In case of axis == 0: J must equal N;
        otherwise if axis == 1 then K must equal M.
    axis : int
        If 0 the correlation are computed between a and b's columns.
        Otherwise if 1 the correlations are computed between rows.

    Returns
    -------
    cor : (N, J) or (M, K) nd.array
        If axis == 0 then (N, J) matrix of correlations between a x b columns
        else a (N, J) matrix of correlations between a x b rows.

    See Also
    --------
    scipy.stats.spearmanr
    """
    a, b = np.atleast_2d(a, b)
    assert a.shape[axis] == b.shape[axis]
    ar = np.apply_along_axis(stats.rankdata, axis, a)
    br = np.apply_along_axis(stats.rankdata, axis, b)

    return _corrcoef2(ar, br, axis=axis)
开发者ID:acopar,项目名称:orange3,代码行数:32,代码来源:distance.py



注:本文中的numpy.apply_along_axis函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python numpy.apply_over_axes函数代码示例发布时间:2022-05-27
下一篇:
Python numpy.append函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap