• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python scipy.sum函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.sum函数的典型用法代码示例。如果您正苦于以下问题:Python sum函数的具体用法?Python sum怎么用?Python sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sum函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: generate_proc_sim

def generate_proc_sim(input_file, weightfile, output_file,
                      meansub=False, degrade=False):
    r"""make the maps with various combinations of beam conv/meansub"""
    print "%s -> %s (beam, etc.)" % (input_file, output_file)
    simmap = algebra.make_vect(algebra.load(input_file))

    if degrade:
        print "performing common resolution convolution"
        beam_data = sp.array([0.316148488246, 0.306805630985, 0.293729620792,
                 0.281176247549, 0.270856788455, 0.26745856078,
                 0.258910010848, 0.249188429031])
        freq_data = sp.array([695, 725, 755, 785, 815, 845, 875, 905],
                             dtype=float)
        freq_data *= 1.0e6
        beam_diff = sp.sqrt(max(1.1 * beam_data) ** 2 - (beam_data) ** 2)
        common_resolution = beam.GaussianBeam(beam_diff, freq_data)
        # Convolve to a common resolution.
        simmap = common_resolution.apply(simmap)

    if meansub:
        print "performing mean subtraction"
        noise_inv = algebra.make_vect(algebra.load(weightfile))
        means = sp.sum(sp.sum(noise_inv * simmap, -1), -1)
        means /= sp.sum(sp.sum(noise_inv, -1), -1)
        means.shape += (1, 1)
        simmap -= means
        # the weights will be zero in some places
        simmap[noise_inv < 1.e-20] = 0.

    # extra sanity?
    simmap[np.isinf(simmap)] = 0.
    simmap[np.isnan(simmap)] = 0.

    print "saving to" + output_file
    algebra.save(output_file, simmap)
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:35,代码来源:simulate_gbt_aux.py


示例2: determine_sign_of_emat

def determine_sign_of_emat(emat,wt_seq):
    """determine what the correct sign is for an energy matrix. We will
    use the assumption that the wild type sequence must be better
    binding than a random sequence.

    INPUTS:
    emat: energy matrix
    wt_seq: wild type sequence of energy matrix

    OUTPUT:
    emat: energy matrix with correct sign
    """
    n_rand = 1000 # number of random sequences to check
    e_rand = sp.zeros(n_rand)
    # convert sequence to matrix
    seq_mat = seq2mat(wt_seq)
    e_wt = sp.sum(emat*seq_mat)

    for i in range(n_rand):
        seq_rand = sp.zeros((4,len(wt_seq)))

        for j in range(len(wt_seq)):
            seq_rand[sp.random.randint(4),j] = 1
        e_rand[i] = sp.sum(emat*seq_rand)
    if e_wt < sp.mean(e_rand):
        return emat
    else:
        return -emat
开发者ID:irelandb,项目名称:sortseq,代码行数:28,代码来源:MCMC_utils.py


示例3: _do_one_inner_iteration

    def _do_one_inner_iteration(self, inv_val):
        r"""
        Determine which throats are invaded at a given applied capillary
        pressure.

        """
        # Generate a tlist containing boolean values for throat state
        Tinvaded = self['throat.entry_pressure'] <= inv_val
        # Find all pores that can be invaded at specified pressure
        [pclusters, tclusters] = self._net.find_clusters2(mask=Tinvaded,
                                                          t_labels=True)
        if self._AL:
            # Identify clusters connected to invasion sites
            inv_clusters = sp.unique(pclusters[self['pore.inlets']])
        else:
            # All clusters are invasion sites
            inv_clusters = pclusters
        inv_clusters = inv_clusters[inv_clusters >= 0]
        # Find pores on the invading clusters
        pmask = np.in1d(pclusters, inv_clusters)
        # Store current applied pressure in newly invaded pores
        pinds = (self['pore.inv_Pc'] == sp.inf) * (pmask)
        self['pore.inv_Pc'][pinds] = inv_val
        # Find throats on the invading clusters
        tmask = np.in1d(tclusters, inv_clusters)
        # Store current applied pressure in newly invaded throats
        tinds = (self['throat.inv_Pc'] == sp.inf) * (tmask)
        self['throat.inv_Pc'][tinds] = inv_val
        # Store total network saturation
        tsat = sp.sum(self._net['throat.volume'][self['throat.inv_Pc'] <= inv_val])
        psat = sp.sum(self._net['pore.volume'][self['pore.inv_Pc'] <= inv_val])
        total = sp.sum(self._net['throat.volume']) + sp.sum(self._net['pore.volume'])
        self['pore.inv_sat'][pinds] = (tsat + psat)/total
        self['throat.inv_sat'][tinds] = (tsat + psat)/total
开发者ID:MichaelHoeh,项目名称:OpenPNM,代码行数:34,代码来源:__OrdinaryPercolation__.py


示例4: computeOpenMaxProbability

def computeOpenMaxProbability(openmax_fc8, openmax_score_u):
    """ Convert the scores in probability value using openmax
    
    Input:
    ---------------
    openmax_fc8 : modified FC8 layer from Weibull based computation
    openmax_score_u : degree

    Output:
    ---------------
    modified_scores : probability values modified using OpenMax framework,
    by incorporating degree of uncertainity/openness for a given class
    
    """
    prob_scores, prob_unknowns = [], []
    for channel in range(NCHANNELS):
        channel_scores, channel_unknowns = [], []
        for category in range(NCLASSES):
            channel_scores += [sp.exp(openmax_fc8[channel, category])]
                    
        total_denominator = sp.sum(sp.exp(openmax_fc8[channel, :])) + sp.exp(sp.sum(openmax_score_u[channel, :]))
        prob_scores += [channel_scores/total_denominator ]
        prob_unknowns += [sp.exp(sp.sum(openmax_score_u[channel, :]))/total_denominator]
        
    prob_scores = sp.asarray(prob_scores)
    prob_unknowns = sp.asarray(prob_unknowns)

    scores = sp.mean(prob_scores, axis = 0)
    unknowns = sp.mean(prob_unknowns, axis=0)
    modified_scores =  scores.tolist() + [unknowns]
    assert len(modified_scores) == 1001
    return modified_scores
开发者ID:abhijitbendale,项目名称:OSDN,代码行数:32,代码来源:compute_openmax.py


示例5: LML

    def LML(self,params=None,*kw_args):
        """
        calculate LML
        """
        if params is not None:
            self.setParams(params)

        self._update_cache()
        
        start = TIME.time()

        #1. const term
        lml  = self.N*self.P*SP.log(2*SP.pi)

        #2. logdet term
        lml += SP.sum(SP.log(self.cache['Sc2']))*self.N
        lml += 2*SP.log(SP.diag(self.cache['cholB'])).sum()

        #3. quatratic term
        lml += SP.sum(self.cache['LY']*self.cache['LY'])
        lml -= SP.sum(self.cache['WLY']*self.cache['BiWLY'])

        lml *= 0.5

        smartSum(self.time,'lml',TIME.time()-start)
        smartSum(self.count,'lml',1)

        return lml
开发者ID:PMBio,项目名称:mtSet,代码行数:28,代码来源:gp3kronSumLR.py


示例6: execute

 def execute(self):
     self.power_mat, self.thermal_expectation = self.full_calculation()
     n_chan = self.power_mat.shape[1]
     n_freq = self.power_mat.shape[0]
     # Calculate the the mean channel correlations at low frequencies.
     low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real
     # Factorize it into preinciple components.
     e, v = linalg.eigh(low_f_mat)
     self.low_f_mode_values = e
     # Make sure the eigenvalues are sorted.
     if sp.any(sp.diff(e) < 0):
         raise RuntimeError("Eigenvalues not sorted.")
     self.low_f_modes = v
     # Now subtract out the noisiest channel modes and see what is left.
     n_modes_subtract = 10
     mode_subtracted_power_mat = sp.copy(self.power_mat.real)
     mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq))
     for ii in range(n_modes_subtract):
         mode = v[:,-ii]
         amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1)
         amp = sp.sum(amp * mode, 1)
         to_subtract = amp[:,None,None] * mode[:,None] * mode
         mode_subtracted_power_mat -= to_subtract
         auto_power = mode_subtracted_power_mat.view()
         auto_power.shape = (n_freq, n_chan**2)
         auto_power = auto_power[:,::n_chan + 1]
         mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1)
     self.subtracted_auto_power = mode_subtracted_auto_power
开发者ID:OMGitsHongyu,项目名称:analysis_IM,代码行数:28,代码来源:noise_power.py


示例7: tfidf

def tfidf(termFrequency):
	""" The student must code this. """
	gf = sp.sum(termFrequency,axis=1).astype(float)
	p = (termFrequency.T/gf).T
	g = sp.sum(p*sp.log(p+1)/sp.log(len(p[0,:])),axis=1) + 1
	a = (sp.log(termFrequency + 1).T*g).T
	return a
开发者ID:KathleenF,项目名称:numerical_computing,代码行数:7,代码来源:LSI.py


示例8: decode

def decode(file_name):
    border.rotate(file_name)
    image = Image.open("temp.png")
    q = border.find("temp.png")
    ind = sp.argmin(sp.sum(q, 1), 0)
    up_left = q[ind, 0] + 2
    up_top = q[ind, 1] + 2
    d_right = q[ind+1, 0] - 3
    d_bottom = q[ind-1, 1] - 3

    box = (up_left, up_top, d_right, d_bottom)
    region = image.crop(box)
    h_sum = sp.sum(region, 0)
    m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
    s = sp.average(sp.diff(m))
    m = int(round(d_right - up_left)/s)
    if m % 3 != 0:
        m += 3 - m % 3
    n = int(round(d_bottom - up_top)/s)
    if n % 4 != 0:
        n += 4 - n % 4
    s = int(round(s))+1

    region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
    region.save("0.png")
    pix = region.load()
    matrix = mix.off(rec.matrix(pix, s, m, n))
    str2 = hamming.decode(array_to_str(matrix))

    return hamming.bin_to_str(str2)
开发者ID:aroundnothing,项目名称:optar,代码行数:30,代码来源:picture.py


示例9: sum2

def sum2(input, dtype=None):
    """
    Returns sum of all non-masked :obj:`Dds` elements-squared.
    
    :type input: :obj:`Dds`
    :param input: Input elements for which sum of squared-elements is calculated.
    :type dtype: :obj:`numpy.dtype` or dtype :obj:`str`
    :param dtype: Type used for summation of elements.
    
    :rtype: scalar
    :return: Sum of the squared-elements (i.e. :samp:`scipy.sum((input.asarray())**2, dtype)`).
    """
    mskVal = None
    if (hasattr(input, "mtype") and (input.mtype != None)):
        mskVal = input.mtype.maskValue()
    
    mpiComm = None
    if (hasattr(input, "mpi") and hasattr(input.mpi, "comm") and (input.mpi.comm != None)):
        mpiComm = input.mpi.comm

    inArr = input.subd.asarray()
    if (mskVal != None):
        s = sp.sum(sp.where(inArr != mskVal, inArr**2, 0), dtype=dtype)
    else:
        s = sp.sum(inArr**2, dtype=dtype)
    
    if (mpiComm != None):
        s = mpiComm.allreduce(s, mango.mpi.SUM)

    return s
开发者ID:pymango,项目名称:pymango,代码行数:30,代码来源:_core.py


示例10: _msge_with_gradient_underdetermined

    def _msge_with_gradient_underdetermined(self, data, delta, xvschema, skipstep):
        """ Calculate the mean squared generalization error and it's gradient for underdetermined equation system.
        """
        (l, m, t) = data.shape
        d = None
        j, k = 0, 0
        nt = sp.ceil(t / skipstep)
        for s in range(0, t, skipstep):
            trainset, testset = xvschema(s, t)

            (a, b) = self._construct_eqns(sp.atleast_3d(data[:, :, trainset]))
            (c, d) = self._construct_eqns(sp.atleast_3d(data[:, :, testset]))

            e = sp.linalg.inv(sp.eye(a.shape[0]) * delta ** 2 + a.dot(a.transpose()))

            cc = c.transpose().dot(c)

            be = b.transpose().dot(e)
            bee = be.dot(e)
            bea = be.dot(a)
            beea = bee.dot(a)
            beacc = bea.dot(cc)
            dc = d.transpose().dot(c)

            j += sp.sum(beacc * bea - 2 * bea * dc) + sp.sum(d ** 2)
            k += sp.sum(beea * dc - beacc * beea) * 4 * delta

        return j / (nt * d.size), k / (nt * d.size)
开发者ID:BioinformaticsArchive,项目名称:SCoT,代码行数:28,代码来源:var.py


示例11: _msge_with_gradient_overdetermined

    def _msge_with_gradient_overdetermined(self, data, delta, xvschema, skipstep):
        """ Calculate the mean squared generalization error and it's gradient for overdetermined equation system.
        """
        (l, m, t) = data.shape
        d = None
        l, k = 0, 0
        nt = sp.ceil(t / skipstep)
        for s in range(0, t, skipstep):
            #print(s,drange)
            trainset, testset = xvschema(s, t)

            (a, b) = self._construct_eqns(sp.atleast_3d(data[:, :, trainset]))
            (c, d) = self._construct_eqns(sp.atleast_3d(data[:, :, testset]))

            #e = sp.linalg.inv(np.eye(a.shape[1])*delta**2 + a.transpose().dot(a), overwrite_a=True, check_finite=False)
            e = sp.linalg.inv(sp.eye(a.shape[1]) * delta ** 2 + a.transpose().dot(a))

            ba = b.transpose().dot(a)
            dc = d.transpose().dot(c)
            bae = ba.dot(e)
            baee = bae.dot(e)
            baecc = bae.dot(c.transpose().dot(c))

            l += sp.sum(baecc * bae - 2 * bae * dc) + sp.sum(d ** 2)
            k += sp.sum(baee * dc - baecc * baee) * 4 * delta

        return l / (nt * d.size), k / (nt * d.size)
开发者ID:BioinformaticsArchive,项目名称:SCoT,代码行数:27,代码来源:var.py


示例12: test_Event_Activity_ground_motion_model_logic_split

 def test_Event_Activity_ground_motion_model_logic_split(self):      
     num_events = 6
     max_weights = 5
     ea = Event_Activity(num_events)
     indexes = arange(6)
     activity = array((indexes*10, indexes*20))
     
     ea.set_event_activity(activity, indexes)
     atten_model_weights = [array([.4, .6]),array([.1, .4, .5])]
     a = DummyEventSet()
     b = DummyEventSet()
     source_model = [a, b]
     #event_set_indexes = [array([0,1,3]), array([2,4])]
     event_set_indexes = [[0,1,3], [2,4]]
     for sp, esi, amw in map(None, source_model, event_set_indexes,
                             atten_model_weights):
         sp.atten_model_weights = amw
         sp.event_set_indexes = esi
     source_model = Source_Model(source_model) 
         
     ea.ground_motion_model_logic_split(source_model, apply_weights=True)   
     self.assert_(allclose(sum(ea.event_activity), sum(activity)))
     self.assert_(allclose(ea.event_activity[0, 0, 0, 3], 12.))
     self.assert_(allclose(ea.event_activity[0, 0, 0, 4], 4.))
     self.assert_(allclose(ea.event_activity[0, 0, 1, 3], 24.))
     self.assert_(allclose(ea.event_activity[0, 0, 1, 4], 8.))
开发者ID:dynaryu,项目名称:eqrm,代码行数:26,代码来源:test_event_set.py


示例13: _update_network

def _update_network(network, net):
    # Infer Np and Nt from length of given prop arrays in file
    for element in ['pore', 'throat']:
        N = [_sp.shape(net[i])[0] for i in net.keys() if i.startswith(element)]
        if N:
            N = _sp.array(N)
            if _sp.all(N == N[0]):
                if (network._count(element) == N[0]) \
                        or (network._count(element) == 0):
                    network.update({element+'.all': _sp.ones((N[0],),
                                                             dtype=bool)})
                    net.pop(element+'.all', None)
                else:
                    raise Exception('Length of '+element+' data in file ' +
                                    'does not match network')
            else:
                raise Exception(element+' data in file have inconsistent ' +
                                'lengths')

    # Add data on dummy net to actual network
    for item in net.keys():
        # Try to infer array types and change if necessary
        # Chcek for booleans disguised and 1's and 0's
        num0s = _sp.sum(net[item] == 0)
        num1s = _sp.sum(net[item] == 1)
        if (num1s + num0s) == _sp.shape(net[item])[0]:
            net[item] = net[item].astype(bool)
        # Write data to network object
        if item not in network:
            network.update({item: net[item]})
        else:
            logger.warning('\''+item+'\' already present')
    return network
开发者ID:TomTranter,项目名称:OpenPNM,代码行数:33,代码来源:IO.py


示例14: score_samples

    def score_samples(self, X, y=None):
        """Compute the negative weighted log probabilities for each sample.

        Parameters
        ----------
        X : array-like, shape (n_samples, n_features)
            List of n_features-dimensional data points. Each row
            corresponds to a single data point.

        Returns
        -------
        log_prob : array, shape (n_samples, n_clusters)
            Log probabilities of each data point in X.
        """
        X = check_array(X, copy=False, order='C', dtype=sp.float64)
        nt, d = X.shape
        K = sp.empty((nt, self.C))

        # Start the prediction for each class
        for c in xrange(self.C):
            # Compute the constant term
            K[:, c] = self.logdet[c] - 2*sp.log(self.prop[c]) + self.cst

            # Remove the mean
            Xc = X - self.mean[c]

            # Do the projection
            Px = sp.dot(Xc,
                        sp.dot(self.Q[c], self.Q[c].T))
            temp = sp.dot(Px, self.Q[c]/sp.sqrt(self.a[c]))
            K[:, c] += sp.sum(temp**2, axis=1)
            K[:, c] += sp.sum((Xc - Px)**2, axis=1)/self.b[c]

        return -K
开发者ID:mfauvel,项目名称:HDDA,代码行数:34,代码来源:hdda.py


示例15: test_set_boundary_conditions_bctypes

    def test_set_boundary_conditions_bctypes(self):
        self.alg.setup(invading_phase=self.water,
                       defending_phase=self.air,
                       trapping=True)
        Ps = sp.random.randint(0, self.net.Np, 10)

        self.alg.set_boundary_conditions(pores=Ps, bc_type='inlets')
        assert sp.sum(self.alg['pore.inlets']) == sp.size(sp.unique(Ps))
        self.alg['pore.inlets'] = False

        self.alg.set_boundary_conditions(pores=Ps, bc_type='outlets')
        assert sp.sum(self.alg['pore.outlets']) == sp.size(sp.unique(Ps))
        self.alg['pore.outlets'] = False

        self.alg.set_boundary_conditions(pores=Ps, bc_type='residual')
        assert sp.sum(self.alg['pore.residual']) == sp.size(sp.unique(Ps))
        self.alg['pore.residual'] = False

        flag = False
        try:
            self.alg.set_boundary_conditions(pores=Ps, bc_type='bad_type')
        except:
            flag = True
        assert flag

        flag = False
        try:
            self.alg.set_boundary_conditions(bc_type=None, mode='bad_type')
        except:
            flag = True
        assert flag
开发者ID:MichaelHoeh,项目名称:OpenPNM,代码行数:31,代码来源:DrainageTest.py


示例16: get_fluid_image

    def get_fluid_image(self, size=None, saturation=None):
        r"""
        Returns a binary image of the invading fluid configuration

        Parameters
        ----------
        size : scalar
            The size of invaded pores, so these and all larger pores will be
            filled, if they are accessible.

        saturation : scalar
            The fractional filling of the pore space to return.  The size of
            the invaded pores are adjusted by trial and error until this
            value is reached. If size is sent then saturation is ignored.

        """
        if size is not None:
            im = self._iminv >= size
        else:
            Vp = sp.sum(self.image)
            for r in sp.unique(self._iminv):
                im = self._iminv >= r
                if sp.sum(im)/Vp >= saturation:
                    break
        return im
开发者ID:zhangwise,项目名称:porespy,代码行数:25,代码来源:__mio__.py


示例17: __init__

	def __init__(self, grid, fArray, zDrawsSorted):
		assert(len(grid) == len(fArray))
		(self.grid, self.fArray) = (grid, fArray)
		self.zDraws = zDraws		
		self.slopes = scipy.zeros(len(grid) - 1)
		self.dx = grid[1] - grid[0]
		for i in range(len(grid) - 1):
			self.slopes[i] = (fArray[i+1] - fArray[i]) / self.dx
		# set up sums
		self.cellSums = scipy.zeros(len(grid) + 1)
		self.boundaryIndices = [len(zDraws)] * len(grid)
		for (i, x) in enumerate(grid):
			indices = scipy.nonzero(self.zDraws >= x)[0]
			if (len(indices) > 0):
				self.boundaryIndices[i] = indices[0]
		self.cellSums[0] = scipy.sum(self.zDraws[0:self.boundaryIndices[0]])
		for i in range(1, len(self.cellSums)-1):
			self.cellSums[i] = scipy.sum(self.zDraws[self.boundaryIndices[i-1] : self.boundaryIndices[i]])
		self.cellSums[-1] = scipy.sum(self.zDraws[self.boundaryIndices[-1] : ])
		
		diff = scipy.sum(self.zDraws) - scipy.sum(self.cellSums)
		print("diff: %f" % diff)
		for i in range(len(grid)):
			if (self.boundaryIndices[i] < len(self.zDraws)):
				print("grid point %f, boundary %f" % (self.grid[i], self.zDraws[self.boundaryIndices[i]]))
			else:
				print("grid point %f, no draws to right" % self.grid[i])
开发者ID:Twizanex,项目名称:bellman,代码行数:27,代码来源:incrMonteCarlo.py


示例18: from_gene

    def from_gene(self, gene): 

        sg = gene.splicegraph.vertices
        breakpoints = sp.unique(sg.ravel())
        self.segments = sp.zeros((2, 0), dtype='int')
        for j in range(1, breakpoints.shape[0]):
            s = sp.sum(sg[0, :] < breakpoints[j])
            e = sp.sum(sg[1, :] < breakpoints[j])
            if s > e:
                self.segments = sp.c_[self.segments, [breakpoints[j-1], breakpoints[j]]]

        ### match nodes to segments
        self.seg_match = sp.zeros((0, sg.shape[1]), dtype='bool')
        for j in range(sg.shape[1]):
            tmp = ((sg[0, j] <= self.segments[0, :]) & (sg[1, j] >= self.segments[1, :]))
            if self.seg_match.shape[0] == 0:
                self.seg_match = tmp.copy().reshape((1, tmp.shape[0]))
            else:
                self.seg_match = sp.r_[self.seg_match, tmp.reshape((1, tmp.shape[0]))]

        ### create edge graph between segments
        self.seg_edges = sp.zeros((self.segments.shape[1], self.segments.shape[1]), dtype='bool')
        k, l = sp.where(sp.triu(gene.splicegraph.edges))

        for m in range(k.shape[0]):
            ### donor segment
            d = sp.where(self.seg_match[k[m], :])[0][-1]
            ### acceptor segment
            a = sp.where(self.seg_match[l[m], :])[0][0]
            self.seg_edges[d, a] = True
开发者ID:ratschlab,项目名称:spladder,代码行数:30,代码来源:segmentgraph.py


示例19: _maximum_likelihood

    def _maximum_likelihood(self, X):
        n_samples, n_features = X.shape if X.ndim > 1 else (1, X.shape[0])
        n_components = self.n_components

        # Predict mean
        mu = X.mean(axis=0)

        # Predict covariance
        cov = sp.cov(X, rowvar=0)
        eigvals, eigvecs = self._eig_decomposition(cov)
        sigma2 = ((sp.sum(cov.diagonal()) - sp.sum(eigvals.sum())) /
                  (n_features - n_components))  # FIXME: M < D?

        weight = sp.dot(eigvecs, sp.diag(sp.sqrt(eigvals - sigma2)))
        M = sp.dot(weight.T, weight) + sigma2 * sp.eye(n_components)
        inv_M = spla.inv(M)

        self.eigvals = eigvals
        self.eigvecs = eigvecs
        self.predict_mean = mu
        self.predict_cov = sp.dot(weight, weight.T) + sigma2 * sp.eye(n_features)
        self.latent_mean = sp.transpose(sp.dot(inv_M, sp.dot(weight.T, X.T - mu[:, sp.newaxis])))
        self.latent_cov = sigma2 * inv_M
        self.sigma2 = sigma2    # FIXME!
        self.weight = weight
        self.inv_M = inv_M

        return self.latent_mean
开发者ID:Yevgnen,项目名称:prml,代码行数:28,代码来源:pca.py


示例20: quality

def quality(func, mesh, interpolator='nn', n=33):
    """Compute a quality factor (the quantity r**2 from TOMS792).

    interpolator must be in ('linear', 'nn').
    """
    fz = func(mesh.x, mesh.y)
    tri = Triangulation(mesh.x, mesh.y)
    intp = getattr(tri, interpolator+'_extrapolator')(fz, bbox=(0.,1.,0.,1.))
    Y, X = sp.mgrid[0:1:complex(0,n),0:1:complex(0,n)]
    Z = func(X, Y)
    iz = intp[0:1:complex(0,n),0:1:complex(0,n)]
    #nans = sp.isnan(iz)
    #numgood = n*n - sp.sum(sp.array(nans.flat, sp.int32))
    numgood = n*n

    SE = (Z - iz)**2
    SSE = sp.sum(SE.flat)
    meanZ = sp.sum(Z.flat) / numgood
    SM = (Z - meanZ)**2
    SSM = sp.sum(SM.flat)


    r2 = 1.0 - SSE/SSM
    print func.func_name, r2, SSE, SSM, numgood
    return r2
开发者ID:jmsole-METEOSIM,项目名称:pyroms,代码行数:25,代码来源:testfuncs.py



注:本文中的scipy.sum函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python scipy.tanh函数代码示例发布时间:2022-05-27
下一篇:
Python scipy.subtract函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap