• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python misc.logsumexp函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.misc.logsumexp函数的典型用法代码示例。如果您正苦于以下问题:Python logsumexp函数的具体用法?Python logsumexp怎么用?Python logsumexp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了logsumexp函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: messages_backwards2

    def messages_backwards2(self):
        # this method is just for numerical testing
        # returns HSMM messages using HMM embedding. the way of the future!
        Al = np.log(self.trans_matrix)
        T, num_states = self.T, self.num_states

        betal = np.zeros((T,num_states))
        betastarl = np.zeros((T,num_states))

        starts = cumsum(self.rs,strict=True)
        ends = cumsum(self.rs,strict=False)
        foo = np.zeros((num_states,ends[-1]))
        for idx, row in enumerate(self.bwd_enter_rows):
            foo[idx,starts[idx]:ends[idx]] = row
        bar = np.zeros_like(self.hmm_bwd_trans_matrix)
        for start, end in zip(starts,ends):
            bar[start:end,start:end] = self.hmm_bwd_trans_matrix[start:end,start:end]

        pmess = np.zeros(ends[-1])

        # betal[-1] is 0
        for t in range(T-1,-1,-1):
            pmess += self.hmm_aBl[t]
            betastarl[t] = logsumexp(np.log(foo) + pmess, axis=1)
            betal[t-1] = logsumexp(Al + betastarl[t], axis=1)

            pmess = logsumexp(np.log(bar) + pmess, axis=1)
            pmess[ends-1] = np.logaddexp(pmess[ends-1],betal[t-1] + np.log(1-self.ps))
        betal[-1] = 0.

        return betal, betastarl
开发者ID:chiaolun,项目名称:pyhsmm,代码行数:31,代码来源:hsmm_inb_states.py


示例2: bayesFactors

def bayesFactors(model1, model2):
    """
    Computes the Bayes factor for two competing models.

    The computation is based on Newton & Raftery 1994 (Eq. 13).

    Parameters
    ----------
    model1 : PyAstronomy.funcFit.TraceAnalysis instance
        TraceAnalysis for the first model.
    model2 : PyAstronomy.funcFit.TraceAnalysis instance
        TraceAnalysis for the second model.

    Returns
    -------
    BF : float
        The Bayes factor BF (neither log10(BF) nor ln(BF)).
        Note that a small value means that the first model
        is favored, i.e., BF=p(M2)/p(M1).

    """
    logp1 = model1["deviance"] / (-2.)
    logp2 = model2["deviance"] / (-2.)

    # Given a number of numbers x1, x2, ... whose logarithms are given by l_i=ln(x_i) etc.,
    # logsum calculates: ln(sum_i exp(l_i)) = ln(sum_i x_i)
    bf = numpy.exp(sm.logsumexp(-logp1) - numpy.log(len(logp1))
                   - (sm.logsumexp(-logp2) - numpy.log(len(logp2))))
    return bf
开发者ID:sczesla,项目名称:PyAstronomy,代码行数:29,代码来源:bayesFactors.py


示例3: observe

 def observe(self, population):
     lweights = np.array([s.lweight for s in population])
     #print(lweights)
     lweights = lweights - logsumexp(lweights) #+ 1000
     #print(lweights)
     indices = np.array([self.prop2idx[s.prop_obj] for s in population])
     for i in range(len(lweights)):
         prop_idx = indices[i]
         self.num_samp[prop_idx] = self.num_samp[prop_idx] + 1
         self.sum[prop_idx] = logsumexp((self.sum[prop_idx], lweights[i]))
         self.sqr_sum[prop_idx] = logsumexp((self.sqr_sum[prop_idx], 2*lweights[i]))
     lnum_samp = log(self.num_samp)
     self.var = exp(logsumexp([self.sum, self.sqr_sum - lnum_samp], 0) - lnum_samp)
     #self.var = exp(self.var - logsumexp(self.var))
     
     if self.var.size > 1:
         tmp = self.var.sum()
         if tmp == 0 or np.isnan(tmp):
             prop_prob = np.array([1./self.var.size] * self.var.size)
         else:
             prop_prob = (self.var.sum() - self.var)
             prop_prob = prop_prob/prop_prob.sum()/2 + np.random.dirichlet(1 + self.num_samp)/2
     else:
         prop_prob = np.array([1./self.var.size] * self.var.size)
     self.prop_dist = categorical(prop_prob)
开发者ID:ingmarschuster,项目名称:ModelSelection,代码行数:25,代码来源:proposal_oracle.py


示例4: predictive_log_likelihood

    def predictive_log_likelihood(self, X_pred, data_index=0, M=100):
        """
        Hacky way of computing the predictive log likelihood
        :param X_pred:
        :param data_index:
        :param M:
        :return:
        """
        Tpred = X_pred.shape[0]

        data = self.data_list[data_index]
        conditional_mean = self.emission_distn.conditional_mean(data)
        conditional_cov = self.emission_distn.conditional_cov(data, flat=True)

        lls = []
        z_preds = data["states"].predict_states(
                conditional_mean, conditional_cov, Tpred=Tpred, Npred=M)
        for m in range(M):
            ll_pred = self.emission_distn.log_likelihood(
                {"z": z_preds[...,m], "x": X_pred})
            lls.append(ll_pred)

        # Compute the average
        hll = logsumexp(lls) - np.log(M)

        # Use bootstrap to compute error bars
        samples = np.random.choice(lls, size=(100, M), replace=True)
        hll_samples = logsumexp(samples, axis=1) - np.log(M)
        std_hll = hll_samples.std()

        return hll, std_hll
开发者ID:HIPS,项目名称:pgmult,代码行数:31,代码来源:lds.py


示例5: _get_predictive_likelihoods

def _get_predictive_likelihoods(k):
    future_likelihoods = logsumexp(
            np.log(scaled_alphal[:-k].dot(np.linalg.matrix_power(trans_matrix,k))) \
                    + cmaxes[:-k,None] + aBl[k:], axis=1)
    past_likelihoods = logsumexp(alphal[:-k], axis=1)

    return future_likelihoods - past_likelihoods
开发者ID:WilsonKong,项目名称:pyhsmm,代码行数:7,代码来源:parallel.py


示例6: compute_forward_messages

def compute_forward_messages(optimizer, preorder_node_lst, gen_per_len, subtree_data_likelihoods):
    node_likelihoods = {}
    for node in preorder_node_lst:
        # Skip root node
        if node.parent_node is None:
            root_id = node.oid
            continue

        have_data    = False
        trans_matrix = numpy.log(optimizer.get_transition_matrix(int(node.edge_length*gen_per_len))).transpose()
        if node.parent_node.oid in node_likelihoods:
            trans_matrix += node_likelihoods[node.parent_node.oid]
            have_data     = True

        if node.parent_node.oid in subtree_data_likelihoods:
            for sibling in node.parent_node.child_nodes():
                if sibling.oid == node.oid:
                    continue
                if sibling.oid in subtree_data_likelihoods[node.parent_node.oid]:
                    have_data     = True  
                    trans_matrix += subtree_data_likelihoods[node.parent_node.oid][sibling.oid]

        if have_data:
            tot_probs      = logsumexp(trans_matrix, axis=1)
            norm_factor    = logsumexp(tot_probs)
            log_posteriors = tot_probs - norm_factor
            node_likelihoods[node.oid] = log_posteriors

    return node_likelihoods
开发者ID:mgymrek,项目名称:MUTEA,代码行数:29,代码来源:tree_posterior_inference.py


示例7: magic_function

def magic_function(encoding, train_toks, classifier):
    V, N, M = encoding.encode(train_toks)
    Np = (np.dot(N, np.dot(classifier.weight_n(), M.transpose())))
    Vp = (np.dot(V, np.dot(classifier.weight_v(), M.transpose())))
    aclist = [(lambda x,y: 'n' if x > y else 'v')(x,y) for (x,y) in zip(np.diag(Np), np.diag(Vp))]
    total = 0
    correct = 0
    Z = np.exp(Np) + np.exp(Vp)
    nprob = sm.logsumexp(Vp)/Z
    vprob = sm.logsumexp(Vp)/Z
    ll = []
    for (tok, tag) in train_toks:
        if tag == 'n':
            ll.append(np.exp(np.log(np.exp(np.diag(Np)[total])) - np.log(np.exp(np.diag(Vp)[total]) + np.exp(np.diag(Np)[total]))))
        elif tag == 'v':
            ll.append(np.exp(np.log(np.exp(np.diag(Vp)[total])) - np.log(np.exp(np.diag(Vp)[total]) + np.exp(np.diag(Np)[total]))))
        if aclist[total] == tag:
            correct += 1
        total += 1
    acc = float(correct)/total
    empn, empv = classifier.getemp()
    nfeat = np.dot(np.exp(nprob), empn)
    vfeat = np.dot(np.exp(vprob), empv)
    grad_n = (nfeat - empn)
    grad_v = (vfeat - empv)
    return acc, -float(sum(ll))/len(ll), grad_n, grad_v
开发者ID:f00barin,项目名称:bilppattach,代码行数:26,代码来源:bilme.py


示例8: get_segment_quality_end

    def get_segment_quality_end(self, end_index: int, call_state: int) -> float:
        """Calculates the complement of phred-scaled posterior probability that a site marks the end of a segment.
        This is done by directly summing the probability of the following complementary paths in the log space:

            ...      [end_index]                           [end_index+1]          ...
                     call_state                        =>  call_state
                     (any state except for call_state) =>  (any state)

        Args:
            end_index: right breakpoint index of a segment
            call_state: segment call state index

        Returns:
            a phred-scaled probability
        """
        assert 0 <= end_index < self.num_sites

        all_other_states_list = self.leave_one_out_state_lists[call_state]

        if end_index == self.num_sites - 1:
            logp = logsumexp(self.log_posterior_prob_tc[self.num_sites - 1, all_other_states_list])
        else:
            complementary_paths_unnorm_logp = [(self.alpha_tc[end_index, end_state] +
                                                self.log_trans_tcc[end_index, end_state, next_state] +
                                                self.log_emission_tc[end_index + 1, next_state] +
                                                self.beta_tc[end_index + 1, end_state])
                                               for end_state in all_other_states_list
                                               for next_state in self.all_states_list]
            complementary_paths_unnorm_logp.append((self.alpha_tc[end_index, call_state] +
                                                    self.log_trans_tcc[end_index, call_state, call_state] +
                                                    self.log_emission_tc[end_index + 1, call_state] +
                                                    self.beta_tc[end_index + 1, call_state]))
            logp = logsumexp(np.asarray(complementary_paths_unnorm_logp)) - self.log_data_likelihood

        return logp_to_phred(logp)
开发者ID:frank-y-liu,项目名称:gatk,代码行数:35,代码来源:segment_quality_utils.py


示例9: test_mixture_weight_init

def test_mixture_weight_init():
    train_m_file = 'nltcs_2015-01-29_18-39-06/train.m.log'
    valid_m_file = 'nltcs_2015-01-29_18-39-06/valid.m.log'
    test_m_file = 'nltcs_2015-01-29_18-39-06/test.m.log'

    logging.basicConfig(level=logging.DEBUG)

    train = dataset.csv_2_numpy(train_m_file,
                                path='',
                                type='float32')
    valid = dataset.csv_2_numpy(valid_m_file,
                                path='',
                                type='float32')
    test = dataset.csv_2_numpy(test_m_file,
                               path='',
                               type='float32')

    k_components = train.shape[1]
    unif_weights = numpy.array([1 for i in range(k_components)])
    unif_weights = unif_weights / unif_weights.sum()

    rand_weights = numpy.random.rand(k_components)
    rand_weights = rand_weights / rand_weights.sum()

    unif_mixture = logsumexp(train + numpy.log(unif_weights), axis=1).mean()
    rand_mixture = logsumexp(train + numpy.log(rand_weights), axis=1).mean()

    print('UNIF W LL', unif_mixture)
    print('RAND W LL', rand_mixture)
开发者ID:arranger1044,项目名称:spyn,代码行数:29,代码来源:test_sgd.py


示例10: transitioncounts

def transitioncounts(fwdlattice, bwdlattice, framelogprob, log_transmat):
    n_observations, n_components = fwdlattice.shape
    lneta = np.zeros((n_observations-1, n_components, n_components))
    from scipy.misc import logsumexp
    logprob = logsumexp(fwdlattice[n_observations-1, :])
    print 'logprob', logprob

    for t in range(n_observations - 1):
        for i in range(n_components):
            for j in range(n_components):
                lneta[t, i, j] = fwdlattice[t, i] + log_transmat[i, j] \
                    + framelogprob[t + 1, j] + bwdlattice[t + 1, j] - logprob


    print framelogprob

    print 'fwdlattice[:, 0]'
    print fwdlattice[:, 0]
    print 'logtransmat[0,0]'
    print log_transmat[0,0]
    print 'framelogprob'
    print framelogprob[:, 0]
    print 'bwdlattice[:, 0]'
    print bwdlattice[:, 0]
    print 'lneta{0,0}'
    print lneta[:, 0, 0]
    return np.exp(logsumexp(lneta, 0))
开发者ID:gkiss,项目名称:mixtape,代码行数:27,代码来源:test_GaussianHMMCUDAImpl.py


示例11: hsmm_messages_forwards_log

def hsmm_messages_forwards_log(
    trans_potential, initial_state_potential,
    reverse_cumulative_obs_potentials, reverse_dur_potentials, reverse_dur_survival_potentials,
    alphal, alphastarl,
    left_censoring=False, right_censoring=True):

    T, _ = alphal.shape

    alphastarl[0] = initial_state_potential
    for t in xrange(T-1):
        cB = reverse_cumulative_obs_potentials(t)
        alphal[t] = logsumexp(
            alphastarl[t+1-cB.shape[0]:t+1] + cB + reverse_dur_potentials(t), axis=0)
        if left_censoring:
            raise NotImplementedError
        alphastarl[t+1] = logsumexp(
            alphal[t][:,na] + trans_potential(t), axis=0)
    t = T-1
    cB = reverse_cumulative_obs_potentials(t)
    alphal[t] = logsumexp(
        alphastarl[t+1-cB.shape[0]:t+1] + cB + reverse_dur_potentials(t), axis=0)

    if not right_censoring:
        normalizer = logsumexp(alphal[t])
    else:
        normalizer = None # TODO

    return alphal, alphastarl, normalizer
开发者ID:WilsonKong,项目名称:pyhsmm,代码行数:28,代码来源:hsmm_states.py


示例12: log_weighted_ave

def log_weighted_ave(arrs, weights):
    arrs = np.array(arrs)
    log_weights = np.log(weights)
    log_weights -= logsumexp(log_weights)
    for _ in range(len(arrs.shape) - 1):
        log_weights = log_weights[..., np.newaxis]
    return logsumexp(arrs + log_weights, axis=0)
开发者ID:futurulus,项目名称:colors-in-context,代码行数:7,代码来源:blending.py


示例13: e_step

def e_step(x,N,d):
	global miu
	global p
	global sigma
	
	soft_p = np.zeros((N))
	labels = np.zeros(N)
	#e-step
	for i in range(N):
		[t, w] = Gaussian(x[i])
		#print 't',t
		#print 'w',w
		num1 = np.exp(logsumexp(t[0], b = w[0]))
		#print 'num',num1
		num2 = np.exp(logsumexp(t[1], b = w[1]))
		soft_p[i] = num1 / (num1 + num2)


		if soft_p[i] >= 0.5 :
			labels[i] = 1
		else:
			labels[i] = 0
	#print 'label',labels
	#print 'soft_p',soft_p	
	#print np.sum(soft_p)
	#return soft_p
	return labels
开发者ID:zoezou2015,项目名称:ML_hm3,代码行数:27,代码来源:mix_em.py


示例14: sample_lpost_based

def sample_lpost_based(num_samples, initial_particles, proposal_method, population_size = 20):
    rval = []
    anc = proposal_method.process_initial_samples(initial_particles)
    num_initial = len(rval)
    
    while len(rval) - num_initial < num_samples:
        ancest_dist = np.array([a.lpost for a in anc])
        ancest_dist = categorical(ancest_dist - logsumexp(ancest_dist), p_in_logspace = True)
        
        #choose ancestor uniformly at random from previous samples
        pop = [proposal_method.gen_proposal(anc[ancest_dist.rvs()])
                for _ in range(population_size)]

        prop_w = np.array([s.lweight for s in pop])
        prop_w = exp(prop_w - logsumexp(prop_w))
        
        
        # Importance Resampling
        while True:
            try:
                draws = np.random.multinomial(population_size, prop_w)
                break
            except ValueError:
                prop_w /= prop_w.sum()
                
        for idx in range(len(draws)):
            rval.extend([pop[idx]] * draws[idx])
            anc.append(pop[idx])
    
    return (np.array([s.sample for s in rval]), np.array([s.lpost for s in rval]))
开发者ID:ingmarschuster,项目名称:ModelSelection,代码行数:30,代码来源:pmc.py


示例15: basis_log_like

 def basis_log_like(beta):
   beta = beta[0] + beta[1:]
   logBk = beta - logsumexp(beta) - np.log(self._dx)
   logLams = logsumexp(logW[k,:]) + logBk + np.log(self._dt) + np.log(self._dx)
   stable_ll = np.sum(logLams*Zbasis[k] - np.exp(logLams))
   #print "!!!!!!!!!! basis_log_like %2.5f, %2.5f"%(ll, stable_ll)
   return stable_ll
开发者ID:andymiller,项目名称:flecks,代码行数:7,代码来源:st_mix_lgcp.py


示例16: recalc_log_gt_posteriors

def recalc_log_gt_posteriors(log_gt_priors, down, up, p_geom, read_counts_array, nalleles, allele_sizes, diploid=False, norm=False):
    stutter_dist = geom(p_geom)
    nsamples     = read_counts_array.shape[0]
    log_down, log_eq, log_up = map(numpy.log, [down, 1-down-up, up])
    if diploid:
        num_gts = nalleles**2
        LLs = numpy.zeros((nsamples, num_gts)) + log_gt_priors
        gtind = 0
        for a1 in xrange(nalleles):
            for a2 in xrange(nalleles):
                if a1 != a2 and DEBUG_HAPLOID:
                    LLs[:,gtind] = numpy.log(0)
                    gtind += 1
                    continue
                step_probs1 = numpy.hstack(([log_down + stutter_dist.logpmf(abs(allele_sizes[x]-allele_sizes[a1])) for x in range(0, a1)],
                                            [log_eq],
                                            [log_up   + stutter_dist.logpmf(abs(allele_sizes[x]-allele_sizes[a1])) for x in range(a1+1, nalleles)]))
                step_probs2 = numpy.hstack(([log_down + stutter_dist.logpmf(abs(allele_sizes[x]-allele_sizes[a2])) for x in range(0, a2)],
                                            [log_eq],
                                            [log_up   + stutter_dist.logpmf(abs(allele_sizes[x]-allele_sizes[a2])) for x in range(a2+1, nalleles)]))
                step_probs = numpy.logaddexp(step_probs1+log_one_half, step_probs2+log_one_half)
                LLs[:,gtind] += numpy.sum(read_counts_array*step_probs, axis=1)
#                if a1 == a2: LLs[:,gtind]+= numpy.log(2) # account for phase
                gtind += 1
    else:
        LLs      = numpy.zeros((nsamples, nalleles)) + log_gt_priors
        for j in xrange(nalleles):
            step_probs = numpy.hstack(([log_down + stutter_dist.logpmf(abs(allele_sizes[x]-allele_sizes[j])) for x in range(0, j)],
                                       [log_eq],
                                       [log_up   + stutter_dist.logpmf(abs(allele_sizes[x]-allele_sizes[j])) for x in range(j+1, nalleles)]))
            LLs [:,j] += numpy.sum(read_counts_array*step_probs, axis=1)
    if norm: return numpy.sum(logsumexp(LLs, axis=1))
    else:
        log_samp_totals = logsumexp(LLs, axis=1)[numpy.newaxis].T
        return LLs - log_samp_totals
开发者ID:mgymrek,项目名称:MUTEA,代码行数:35,代码来源:geom_stutter_em.py


示例17: _whitened_logpdf

 def _whitened_logpdf(self, X, pool=None):
     logpdfs = [logweight + kde(X, pool=pool)
                for logweight, kde in zip(self._logweights, self._kdes)]
     if len(X.shape) == 1:
         return logsumexp(logpdfs)
     else:
         return logsumexp(logpdfs, axis=0)
开发者ID:drudd,项目名称:kombine,代码行数:7,代码来源:clustered_kde.py


示例18: up_tree_pass

 def up_tree_pass(self,X,nodes):
     '''
     Calculates prior probability of latent variables and combines 
     prior probability of children to calculate posterior for the 
     latent variable corresponding to node
     
     Parameters:
     -----------
     
     X: numpy array of size 'n x m'
         Explanatory variables
         
     nodes: list of size equal number of nodes in HME
          List with all nodes of HME
          
     '''
     self._prior(X)
     children = self.get_childrens(nodes)
     
     # check that all children are of the same type
     if len(set([e.node_type for e in children])) != 1:
            raise ValueError("Children nodes should have the same node type")
            
     # prior probabilities calculation
     for i,child_node in enumerate(children):
         if child_node.node_type == "expert":
            self.responsibilities[:,i] += child_node.weights
         elif child_node.node_type == "gate":
            self.responsibilities[:,i] += logsumexp(child_node.responsibilities, axis = 1)
         else:
             raise TypeError("Unidentified node type")
             
     #prevent underflow
     self.normaliser         = logsumexp(self.responsibilities, axis = 1)
开发者ID:AmazaspShumik,项目名称:Mixture-Models,代码行数:34,代码来源:nodes_hme.py


示例19: predictStar

def predictStar(clfstar, clfgalaxy, X, Xerr, index):
    if np.any(np.isnan(Xerr)):
        print index
    #numerator = PStar * np.exp(clfstar.logprob_a(X, Xerr))
    #demominator = PStar * np.exp(clfstar.logprob_a(X, Xerr)) + PGalaxy * np.exp(clfgalaxy.logprob_a(X, Xerr))
    #P(Star|X, XErr)


    logcondstar = misc.logsumexp(clfstar.logprob_a(X, Xerr))
    logcondgal = misc.logsumexp(clfgalaxy.logprob_a(X, Xerr))

    fraction = np.log(PStar) + logcondstar \
            - np.logaddexp(np.log(PStar) +  logcondstar, np.log(PGalaxy) + logcondgal)
    
    fraction = np.exp(fraction)

    if np.isnan(fraction):
        raise Exception('Invalid Fractions Nan')
    if fraction > 1 or fraction < 0:
        raise Exception('Invalid Fraction Range: {0}'.format(fraction))

    if fraction >= 0.5:
        return 1
    else:
        return 0
开发者ID:NabeelSarwar,项目名称:Thesis,代码行数:25,代码来源:deconv.py


示例20: smoother

    def smoother(self, next_state, filtered_state):
        gpb_ = self._init_gpb()
        state = SwitchingKalmanState(n_models=self.n_models)

        for k in xrange(self.n_models):
            kalman = KalmanFilter(model=self.models[k])
            for j in xrange(self.n_models):
                # Smoothing step
                (gpb_[k].m[:,j], gpb_[k].P[:,:,j]) = kalman._smoother(\
                    filtered_state.model(j), next_state.model(j), self.embeds[j][k])

        # Posterior Transition
        # p(s_t=j | s_t+1=k, y_1:T) \approx \propto p(s_t+1=k | s_t=j) * p(s_t=j | y_1:t)
        U = self.log_transmat.T + filtered_state.M
        U = U.T - logsumexp(U, axis=1)
        # p(s_t=j, s_t+1=k | y_1:T) = p(s_t=j | s_t+1=k, y_1:T) * p(s_t+1=k | y_1:T)
        M = U + next_state.M
        # p(s_t=j | y1:T) = \sum_k p(s_t=j, s_t+1=k | y_1:T)
        state.M = logsumexp(M, axis=1)
        # p(s_t+1=k | s_t=j, y_1:T) = p(s_t=j, s_t+1=k | y_1:T) / p(s_t=j | y_1:T)
        W = np.exp(M.T - state.M) # WARKING: W is W.T in Murphy's paper

        # Collapse step
        for j in xrange(self.n_models):
            # (state.m[:,j], state.P[:,:,j]) = self._collapse(m_[:,:,j], P_[:,:,:,j], W[:,j])
            m, P = self._collapse(gpb_[j].m, gpb_[j].P, W[:,j], self.masks[j])
            state._states[j] = KalmanState(mean=m, covariance=P)

        return state
开发者ID:deeplearning-ai-research,项目名称:switching-kalman-filter,代码行数:29,代码来源:switchingkalmanfilter.py



注:本文中的scipy.misc.logsumexp函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python misc.toimage函数代码示例发布时间:2022-05-27
下一篇:
Python misc.lena函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap