• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python linalg.expm_frechet函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中scipy.linalg.expm_frechet函数的典型用法代码示例。如果您正苦于以下问题:Python expm_frechet函数的具体用法?Python expm_frechet怎么用?Python expm_frechet使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了expm_frechet函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: em_objective_for_aitken

def em_objective_for_aitken(
        T, node_to_idx, site_weights,
        m,
        transq_unscaled, transp,
        interact_trans, interact_dwell,
        data,
        root_distn1d,
        trans_out, dwell_out,
        scale,
        ):
    """
    Recast EM as a fixed-point problem.
    
    This approach is inspired by the introduction of the following paper.
    A QUASI-NEWTON ACCELERATION OF THE EM ALGORITHM
    Kenneth Lange
    1995

    """
    # Unpack some stuff.
    nsites, nnodes, nstates = data.shape
    n = nstates

    # Scale the rate matrices according to the edge ratios.
    transq = transq_unscaled * scale[:, None, None]

    # Compute the probability transition matrix arrays
    # and the interaction matrix arrays.
    trans_indicator = np.ones((n, n)) - np.identity(n)
    dwell_indicator = np.identity(n)
    for edge in T.edges():
        na, nb = edge
        eidx = node_to_idx[nb] - 1
        Q = transq[eidx]
        transp[eidx] = expm(Q)
        interact_trans[eidx] = expm_frechet(
                Q, Q * trans_indicator, compute_expm=False)
        interact_dwell[eidx] = expm_frechet(
                Q, Q * dwell_indicator, compute_expm=False)

    # Compute the expectations.
    validation = 0
    expectation_step(
            m.indices, m.indptr,
            transp, transq,
            interact_trans, interact_dwell,
            data,
            root_distn1d,
            trans_out, dwell_out,
            validation,
            )

    # Compute the per-edge ratios.
    trans_sum = (trans_out * site_weights[:, None]).sum(axis=0)
    dwell_sum = (dwell_out * site_weights[:, None]).sum(axis=0)
    scaling_ratios = trans_sum / -dwell_sum

    # Return the new scaling factors.
    return scale * scaling_ratios
开发者ID:argriffing,项目名称:npctmctree,代码行数:59,代码来源:run.py


示例2: test_medium_matrix

 def test_medium_matrix(self):
     # profile this to see the speed difference
     n = 1000
     A = np.random.exponential(size=(n, n))
     E = np.random.exponential(size=(n, n))
     sps_expm, sps_frechet = expm_frechet(A, E, method="SPS")
     blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(A, E, method="blockEnlarge")
     assert_allclose(sps_expm, blockEnlarge_expm)
     assert_allclose(sps_frechet, blockEnlarge_frechet)
开发者ID:hildensia,项目名称:scipy,代码行数:9,代码来源:test_matfuncs.py


示例3: test_problematic_matrix

 def test_problematic_matrix(self):
     # this test case uncovered a bug which has since been fixed
     A = np.array([[1.50591997, 1.93537998], [0.41203263, 0.23443516]], dtype=float)
     E = np.array([[1.87864034, 2.07055038], [1.34102727, 0.67341123]], dtype=float)
     A_norm_1 = scipy.linalg.norm(A, 1)
     sps_expm, sps_frechet = expm_frechet(A, E, method="SPS")
     blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(A, E, method="blockEnlarge")
     assert_allclose(sps_expm, blockEnlarge_expm)
     assert_allclose(sps_frechet, blockEnlarge_frechet)
开发者ID:hildensia,项目名称:scipy,代码行数:9,代码来源:test_matfuncs.py


示例4: test_fuzz

 def test_fuzz(self):
     # try a bunch of crazy inputs
     rfuncs = (
             np.random.uniform,
             np.random.normal,
             np.random.standard_cauchy,
             np.random.exponential)
     ntests = 100
     for i in range(ntests):
         rfunc = random.choice(rfuncs)
         target_norm_1 = random.expovariate(1.0)
         n = random.randrange(2, 16)
         A_original = rfunc(size=(n,n))
         E_original = rfunc(size=(n,n))
         A_original_norm_1 = scipy.linalg.norm(A_original, 1)
         scale = target_norm_1 / A_original_norm_1
         A = scale * A_original
         E = scale * E_original
         M = np.vstack([
             np.hstack([A, E]),
             np.hstack([np.zeros_like(A), A])])
         expected_expm = scipy.linalg.expm(A)
         expected_frechet = scipy.linalg.expm(M)[:n, n:]
         observed_expm, observed_frechet = expm_frechet(A, E)
         assert_allclose(expected_expm, observed_expm)
         assert_allclose(expected_frechet, observed_frechet)
开发者ID:ymarfoq,项目名称:outilACVDesagregation,代码行数:26,代码来源:test_matfuncs.py


示例5: test_small_norm_expm_frechet

 def test_small_norm_expm_frechet(self):
     # methodically test matrices with a range of norms, for better coverage
     M_original = np.array([
         [1, 2, 3, 4],
         [5, 6, 7, 8],
         [0, 0, 1, 2],
         [0, 0, 5, 6],
         ], dtype=float)
     A_original = np.array([
         [1, 2],
         [5, 6],
         ], dtype=float)
     E_original = np.array([
         [3, 4],
         [7, 8],
         ], dtype=float)
     A_original_norm_1 = scipy.linalg.norm(A_original, 1)
     selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
     m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
     for ma, mb in m_neighbor_pairs:
         ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
         ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
         target_norm_1 = 0.5 * (ell_a + ell_b)
         scale = target_norm_1 / A_original_norm_1
         M = scale * M_original
         A = scale * A_original
         E = scale * E_original
         expected_expm = scipy.linalg.expm(A)
         expected_frechet = scipy.linalg.expm(M)[:2, 2:]
         observed_expm, observed_frechet = expm_frechet(A, E)
         assert_allclose(expected_expm, observed_expm)
         assert_allclose(expected_frechet, observed_frechet)
开发者ID:ymarfoq,项目名称:outilACVDesagregation,代码行数:32,代码来源:test_matfuncs.py


示例6: _check_hky_transition_expectations

def _check_hky_transition_expectations(t):
    n = 4
    kappa = 3.3
    nt_probs = np.array([0.1, 0.2, 0.3, 0.4])

    # Get an HKY rate matrix with arbitrary expected rate.
    pre_Q = hkymodel.get_pre_Q(kappa, nt_probs)

    # Rescale the rate matrix to have expected rate of 1.0.
    rates = pre_Q.sum(axis=1)
    expected_rate = rates.dot(nt_probs)
    pre_Q /= expected_rate

    # Convert the pre-rate matrix to an actual rate matrix
    # by subtracting row sums from the diagonal.
    rates = pre_Q.sum(axis=1)
    Q = pre_Q - np.diag(rates)

    # Create the transition probability matrix over time t.
    P = expm(Q*t)
    assert_allclose(P.sum(axis=1), 1)

    # Create a joint state distribution matrix.
    J = np.diag(nt_probs).dot(P)
    assert_allclose(J.sum(), 1)

    # Get the expm frechet matrix.
    C = pre_Q * t
    S = expm_frechet(Q*t, C, compute_expm=False)

    # Get the weighted sum of entries of S.
    expectation_a = ((S / P) * J).sum()
    assert_allclose(expectation_a, t)

    # Try an equivalent calculation which does not use P or J.
    expectation_b = np.diag(nt_probs).dot(S).sum()
    assert_allclose(expectation_b, t)

    # Use the library function.
    T = nx.DiGraph()
    root = 'N0'
    edge = ('N0', 'N1')
    T.add_edge(*edge)
    edge_to_Q = {edge : Q * t}
    edge_to_combination = {edge : pre_Q * t}
    root_distn = nt_probs
    data_weight_pairs = []
    for sa, sb in product(range(n), repeat=2):
        vec_a = np.zeros(n)
        vec_a[sa] = 1
        vec_b = np.zeros(n)
        vec_b[sb] = 1
        data = {'N0' : vec_a, 'N1' : vec_b}
        weight = J[sa, sb]
        data_weight_pairs.append((data, weight))
    edge_to_expectation = get_edge_to_expectation(
            T, root, edge_to_Q, edge_to_combination,
            root_distn, data_weight_pairs)
    expectation_c = edge_to_expectation[edge]
    assert_allclose(expectation_c, t)
开发者ID:argriffing,项目名称:npctmctree,代码行数:60,代码来源:test_transition_expectations.py


示例7: compute_prop_grad

    def compute_prop_grad(self, k, j, compute_prop=True):
        """
        Calculate the gradient of propagator wrt the control amplitude
        in the timeslot using the expm_frechet method
        The propagtor is calculated (almost) for 'free' in this method
        and hence it is returned if compute_prop==True
        Returns:
            [prop], prop_grad
        """
        dyn = self.parent
        A = dyn.get_dyn_gen(k)*dyn.tau[k]
        E = dyn.get_ctrl_dyn_gen(j)*dyn.tau[k]

        if compute_prop:
            prop, propGrad = la.expm_frechet(A, E)
            return prop, propGrad
        else:
            propGrad = la.expm_frechet(A, E, compute_expm=False)
            return propGrad
开发者ID:chemouda,项目名称:qutip,代码行数:19,代码来源:propcomp.py


示例8: test_expm_frechet

 def test_expm_frechet(self):
     # a test of the basic functionality
     M = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [0, 0, 1, 2], [0, 0, 5, 6]], dtype=float)
     A = np.array([[1, 2], [5, 6]], dtype=float)
     E = np.array([[3, 4], [7, 8]], dtype=float)
     expected_expm = scipy.linalg.expm(A)
     expected_frechet = scipy.linalg.expm(M)[:2, 2:]
     for kwargs in ({}, {"method": "SPS"}, {"method": "blockEnlarge"}):
         observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
         assert_allclose(expected_expm, observed_expm)
         assert_allclose(expected_frechet, observed_frechet)
开发者ID:hildensia,项目名称:scipy,代码行数:11,代码来源:test_matfuncs.py


示例9: test_expm_jacobian_vector_product

    def test_expm_jacobian_vector_product(self):
        n = 4
        x = numpy.random.randn(n, n)
        E = numpy.random.randn(n, n)

        # use algopy to get the jacobian vector product
        ax = UTPM.init_jac_vec(x.flatten(), E.flatten())
        ay = expm(ax.reshape((n, n))).reshape((n*n,))
        g1 = UTPM.extract_jac_vec(ay)

        # compute the jacobian vector product directly using expm_frechet
        M = expm_frechet(x, E, compute_expm=False).flatten()

        assert_allclose(g1, M, rtol=1e-6)
开发者ID:b45ch1,项目名称:algopy,代码行数:14,代码来源:test_linalg.py


示例10: get_traj_stats

    def get_traj_stats(self, J_other):
        n = self.nstates

        # compute the observed initial distribution
        distn = J_other.sum(axis=1)

        # compute conditional expected dwell times
        dwell = np.zeros(n)
        for i in range(n):
            E = np.zeros((n, n), dtype=float)
            E[i, i] = 1
            interact = expm_frechet(self.Q, E, compute_expm=False)
            dwell[i] = (J_other * interact / self.P).sum()
        assert_allclose(dwell.sum(), 1)

        # compute conditional expected transition counts
        trans = np.zeros((n, n), dtype=float)
        for i in range(n):
            E = np.zeros((n, n), dtype=float)
            E[i, 1-i] = 1
            interact = expm_frechet(self.Q, self.Q*E, compute_expm=False)
            trans[i, 1-i] = (J_other * interact / self.P).sum()

        return distn, dwell, trans
开发者ID:argriffing,项目名称:ctmczoo,代码行数:24,代码来源:model2s.py


示例11: test_expm_jacobian

    def test_expm_jacobian(self):
        n = 4
        x = numpy.random.randn(n, n)

        # use algopy to get the jacobian
        ax = UTPM.init_jacobian(x)
        ay = expm(ax)
        g1 = UTPM.extract_jacobian(ay)

        # compute the jacobian directly using expm_frechet
        M = numpy.zeros((n, n, n*n))
        ident = numpy.identity(n*n)
        for i in range(n*n):
            E = ident[i].reshape(n, n)
            M[:, :, i] = expm_frechet(x, E, compute_expm=False)

        assert_allclose(g1, M, rtol=1e-6)
开发者ID:b45ch1,项目名称:algopy,代码行数:17,代码来源:test_linalg.py


示例12: _compute_prop_grad

    def _compute_prop_grad(self, k, j, compute_prop=True):
        """
        Calculate the gradient of propagator wrt the control amplitude
        in the timeslot using the expm_frechet method
        The propagtor is calculated (almost) for 'free' in this method
        and hence it is returned if compute_prop==True
        Returns:
            [prop], prop_grad
        """
        dyn = self.parent

        if dyn.oper_dtype == Qobj:
            A = dyn._get_phased_dyn_gen(k).full()*dyn.tau[k]
            E = dyn._get_phased_ctrl_dyn_gen(k, j).full()*dyn.tau[k]
            if compute_prop:
                prop_dense, prop_grad_dense = la.expm_frechet(A, E)
                prop = Qobj(prop_dense, dims=dyn.dyn_dims)
                prop_grad = Qobj(prop_grad_dense,
                                            dims=dyn.dyn_dims)
            else:
                prop_grad_dense = la.expm_frechet(A, E, compute_expm=False)
                prop_grad = Qobj(prop_grad_dense,
                                            dims=dyn.dyn_dims)
        elif dyn.oper_dtype == np.ndarray:
            A = dyn._get_phased_dyn_gen(k)*dyn.tau[k]
            E = dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k]
            if compute_prop:
                prop, prop_grad = la.expm_frechet(A, E)
            else:
                prop_grad = la.expm_frechet(A, E,
                                                    compute_expm=False)
        else:
            # Assuming some sparse matrix
            spcls = dyn._dyn_gen[k].__class__
            A = (dyn._get_phased_dyn_gen(k)*dyn.tau[k]).toarray()
            E = (dyn._get_phased_ctrl_dyn_gen(k, j)*dyn.tau[k]).toarray()
            if compute_prop:
                prop_dense, prop_grad_dense = la.expm_frechet(A, E)
                prop = spcls(prop_dense)
                prop_grad = spcls(prop_grad_dense)
            else:
                prop_grad_dense = la.expm_frechet(A, E, compute_expm=False)
                prop_grad = spcls(prop_grad_dense)

        if compute_prop:
            return prop, prop_grad
        else:
            return prop_grad
开发者ID:NunoEdgarGub1,项目名称:qutip,代码行数:48,代码来源:propcomp.py


示例13: do_em

def do_em(T, root, edge_to_rate, edge_to_Q, root_distn1d,
        data_prob_pairs, guess_edge_to_rate):
    # Extract the number of states.
    n = root_distn1d.shape[0]

    # Do the EM iterations.
    nsteps = 3
    for em_iteration_index in range(nsteps):
        
        # Compute the scaled edge-specific transition rate matrices.
        edge_to_scaled_Q = {}
        for edge in T.edges():
            rate = guess_edge_to_rate[edge]
            Q = edge_to_Q[edge]
            edge_to_scaled_Q[edge] = rate * Q

        # Compute the edge-specific transition probability matrices.
        edge_to_P = {}
        for edge in T.edges():
            scaled_Q = edge_to_scaled_Q[edge]
            #print(edge, 'Q:')
            #print(scaled_Q)
            P = expm(scaled_Q)
            edge_to_P[edge] = P

        # For each edge, compute the interaction matrices
        # corresponding to all transition counts and dwell times.
        trans_indicator = np.ones((n, n)) - np.identity(n)
        dwell_indicator = np.identity(n)
        edge_to_interact_trans = {}
        edge_to_interact_dwell = {}
        for edge in T.edges():

            # extract edge-specific transition matrices
            Q = edge_to_scaled_Q[edge]
            P = edge_to_P[edge]

            # compute the transition interaction matrix
            interact = expm_frechet(Q, Q * trans_indicator, compute_expm=False)
            edge_to_interact_trans[edge] = interact

            # compute the dwell interaction matrix
            interact = expm_frechet(Q, Q * dwell_indicator, compute_expm=False)
            edge_to_interact_dwell[edge] = interact

        # Initialize edge-specific summaries.
        edge_to_trans_expectation = defaultdict(float)
        edge_to_dwell_expectation = defaultdict(float)

        # Compute the edge-specific summaries
        # conditional on the current edge-specific rate guesses
        # and on the leaf state distribution computed
        # from the true edge-specific rates.
        for node_to_data_fvec1d, lhood in data_prob_pairs:

            # Compute the joint endpoint state distribution for each edge.
            edge_to_J = get_edge_to_distn2d(
                    T, edge_to_P, root, root_distn1d, node_to_data_fvec1d)
            
            # For each edge, compute the transition expectation contribution
            # and compute the dwell expectation contribution.
            # These will be scaled by the data likelihood.
            for edge in T.edges():

                # extract some edge-specific matrices
                P = edge_to_P[edge]
                J = edge_to_J[edge]
                #print(edge)
                #print(P)
                #print(J)
                #print()

                # transition contribution
                interact = edge_to_interact_trans[edge]
                total = 0
                for i in range(n):
                    for j in range(n):
                        if J[i, j]:
                            coeff = J[i, j] / P[i, j]
                            total += coeff * interact[i, j]
                edge_to_trans_expectation[edge] += lhood * total

                # dwell contribution
                interact = edge_to_interact_dwell[edge]
                total = 0
                for i in range(n):
                    for j in range(n):
                        if J[i, j]:
                            coeff = J[i, j] / P[i, j]
                            total += coeff * interact[i, j]
                edge_to_dwell_expectation[edge] += lhood * total

        # According to EM, update each edge-specific rate guess
        # using a ratio of transition and dwell expectations.
        for edge in T.edges():
            trans = edge_to_trans_expectation[edge]
            dwell = edge_to_dwell_expectation[edge]
            ratio = trans / -dwell
            old_rate = guess_edge_to_rate[edge] 
            new_rate = old_rate * ratio
#.........这里部分代码省略.........
开发者ID:argriffing,项目名称:npctmctree,代码行数:101,代码来源:run.py


示例14: do_cythonized_em

def do_cythonized_em(T, root,
        edge_to_rate, edge_to_Q, root_distn1d,
        data_prob_pairs, guess_edge_to_rate):
    """
    Try the Cython implementation.
    def expectation_step(
            idx_t[:] csr_indices, # (nnodes-1,)
            idx_t[:] csr_indptr, # (nnodes+1,)
            cnp.float_t[:, :, :] transp, # (nnodes-1, nstates, nstates)
            cnp.float_t[:, :, :] transq, # (nnodes-1, nstates, nstates)
            cnp.float_t[:, :, :] interact_trans, # (nnodes-1, nstates, nstates)
            cnp.float_t[:, :, :] interact_dwell, # (nnodes-1, nstates, nstates)
            cnp.float_t[:, :, :] data, # (nsites, nnodes, nstates)
            cnp.float_t[:] root_distn, # (nstates,)
            cnp.float_t[:, :] trans_out, # (nsites, nnodes-1)
            cnp.float_t[:, :] dwell_out, # (nsites, nnodes-1)
            int validation=1,
            ):

    """
    # Define a toposort node ordering and a corresponding csr matrix.
    nodes = nx.topological_sort(T, [root])
    node_to_idx = dict((na, i) for i, na in enumerate(nodes))
    m = nx.to_scipy_sparse_matrix(T, nodes)

    # Stack the transition rate matrices into a single array.
    nnodes = len(nodes)
    nstates = root_distn1d.shape[0]
    n = nstates
    transq = np.empty((nnodes-1, nstates, nstates), dtype=float)
    for (na, nb), Q in edge_to_Q.items():
        edge_idx = node_to_idx[nb] - 1
        transq[edge_idx] = Q

    # Allocate a transition probability matrix array
    # and some interaction matrix arrays.
    transp = np.empty_like(transq)
    interact_trans = np.empty_like(transq)
    interact_dwell = np.empty_like(transq)

    # Stack the data into a single array,
    # and construct an array of site weights.
    nsites = len(data_prob_pairs)
    datas, probs = zip(*data_prob_pairs)
    site_weights = np.array(probs, dtype=float)
    data = np.empty((nsites, nnodes, nstates), dtype=float)
    for site_index, site_data in enumerate(datas):
        for i, na in enumerate(nodes):
            data[site_index, i] = site_data[na]

    # Initialize expectation arrays.
    trans_out = np.empty((nsites, nnodes-1), dtype=float)
    dwell_out = np.empty((nsites, nnodes-1), dtype=float)

    # Initialize the per-edge rate matrix scaling factor guesses.
    scaling_guesses = np.empty(nnodes-1, dtype=float)
    scaling_ratios = np.ones(nnodes-1, dtype=float)
    for (na, nb), rate in guess_edge_to_rate.items():
        eidx = node_to_idx[nb] - 1
        scaling_guesses[eidx] = rate

    # Pre-scale the rate matrix.
    transq *= scaling_guesses[:, None, None]

    # Do the EM iterations.
    nsteps = 1000
    for em_iteration_index in range(nsteps):

        # Scale the rate matrices according to the edge ratios.
        transq *= scaling_ratios[:, None, None]

        # Compute the probability transition matrix arrays
        # and the interaction matrix arrays.
        trans_indicator = np.ones((n, n)) - np.identity(n)
        dwell_indicator = np.identity(n)
        for edge in T.edges():
            na, nb = edge
            eidx = node_to_idx[nb] - 1
            Q = transq[eidx]
            #print(edge, 'Q:')
            #print(Q)
            transp[eidx] = expm(Q)
            interact_trans[eidx] = expm_frechet(
                    Q, Q * trans_indicator, compute_expm=False)
            interact_dwell[eidx] = expm_frechet(
                    Q, Q * dwell_indicator, compute_expm=False)

        # Compute the expectations.
        validation = 1
        expectation_step(
                m.indices, m.indptr,
                transp, transq,
                interact_trans, interact_dwell,
                data,
                root_distn1d,
                trans_out, dwell_out,
                validation,
                )

        # Compute the per-edge ratios.
#.........这里部分代码省略.........
开发者ID:argriffing,项目名称:npctmctree,代码行数:101,代码来源:run.py


示例15: em_function

def em_function(
        T, node_to_idx, site_weights,
        m,
        transq_unscaled,
        data,
        root_distn1d,
        mem,
        use_log_scale,
        scale,
        ):
    """
    This is purely for edge rate scale and does not consider other parameters.

    Parameters
    ----------
    T : x
        x
    node_to_idx : x
        x
    site_weights : x
        x
    m : x
        x
    transq_unscaled : x
        x
    data : x
        x
    root_distn1d : x
        x
    mem : x
        x
    use_log_scale : bool
        True if the scale argument is in logarithmic units.
    scale : 1d float ndarray
        Edge rate scaling factors, possibly in logarithmic units.

    Returns
    -------
    scale : 1d float ndarray
        Updated edge rate scaling factors, possibly in logarithmic units.

    """
    #TODO improve docstring and add unit tests

    # Transform the scaling factors if necessary.
    if use_log_scale:
        log_scale = scale
        scale = np.exp(scale)

    # Unpack some stuff.
    nsites, nnodes, nstates = data.shape

    # Scale the rate matrices according to the edge ratios.
    # Use in-place operations to avoid unnecessary memory copies.
    #transq = transq_unscaled * scale[:, None, None]
    mem.transq[...] = transq_unscaled
    mem.transq *= scale[:, None, None]

    # Compute the probability transition matrix arrays
    # and the interaction matrix arrays.
    for edge in T.edges():
        na, nb = edge
        eidx = node_to_idx[nb] - 1
        Q = mem.transq[eidx]
        mem.transp[eidx] = expm(Q)
        mem.interact_trans[eidx] = expm_frechet(
                Q, Q * mem.trans_indicator, compute_expm=False)
        mem.interact_dwell[eidx] = expm_frechet(
                Q, Q * mem.dwell_indicator, compute_expm=False)

    # Compute the expectations using Cython.
    validation = 0
    expectation_step(
            m.indices, m.indptr,
            mem.transp, mem.transq,
            mem.interact_trans, mem.interact_dwell,
            data,
            root_distn1d,
            mem.trans_out, mem.dwell_out,
            validation,
            )

    # Compute the per-edge ratios.
    trans_sum = (mem.trans_out * site_weights[:, None]).sum(axis=0)
    dwell_sum = (mem.dwell_out * site_weights[:, None]).sum(axis=0)
    if use_log_scale:
        log_scaling_ratios = np.log(trans_sum) - np.log(-dwell_sum)
        return log_scale + log_scaling_ratios
    else:
        scaling_ratios = trans_sum / -dwell_sum
        return scale * scaling_ratios
开发者ID:argriffing,项目名称:npctmctree,代码行数:91,代码来源:em.py



注:本文中的scipy.linalg.expm_frechet函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python linalg.fractional_matrix_power函数代码示例发布时间:2022-05-27
下一篇:
Python linalg.expm_cond函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap