• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.mul函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.mul函数的典型用法代码示例。如果您正苦于以下问题:Python mul函数的具体用法?Python mul怎么用?Python mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了mul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: beta_div

def beta_div(X, W, H, beta):
    """Compute beta divergence D(X|WH)

    Parameters
    ----------
    X : Theano tensor
        data
    W : Theano tensor
        Bases
    H : Theano tensor
        activation matrix
    beta : Theano scalar


    Returns
    -------
    div : Theano scalar
        beta divergence D(X|WH)"""
    div = ifelse(
      T.eq(beta, 2),
      T.sum(1. / 2 * T.power(X - T.dot(H, W), 2)),
      ifelse(
        T.eq(beta, 0),
        T.sum(X / T.dot(H, W) - T.log(X / T.dot(H, W)) - 1),
        ifelse(
          T.eq(beta, 1),
          T.sum(T.mul(X, (T.log(X) - T.log(T.dot(H, W)))) + T.dot(H, W) - X),
          T.sum(1. / (beta * (beta - 1.)) * (T.power(X, beta) +
                (beta - 1.) * T.power(T.dot(H, W), beta) -
                beta * T.power(T.mul(X, T.dot(H, W)), (beta - 1)))))))
    return div
开发者ID:rserizel,项目名称:beta_nmf,代码行数:31,代码来源:costs.py


示例2: beta_H_groupSparse

def beta_H_groupSparse(X, W, H, beta, l_sp, start, stop):
    """Update activation with beta divergence

    Parameters
    ----------
    X : Theano tensor
        data
    W : Theano tensor
        Bases
    H : Theano tensor
        activation matrix
    beta : Theano scalar

    Returns
    -------
    H : Theano tensor
        Updated version of the activations
    """
    results, _ = theano.scan(fn=lambda start_i, stop_i, prior_results, H:
                             T.set_subtensor(
                                prior_results[:, start_i:stop_i].T,
                                H[:, start_i:stop_i].T /
                                H[:, start_i:stop_i].norm(2, axis=1)).T,
                             outputs_info=T.zeros_like(H),
                             sequences=[start, stop],
                             non_sequences=H)
    cst = results[-1]
    up = ifelse(T.eq(beta, 2), (T.dot(X, W)) / (T.dot(T.dot(H, W.T), W) +
                                                l_sp * cst),
                               (T.dot(T.mul(T.power(T.dot(H, W.T),
                                            (beta - 2)), X), W)) /
                               (T.dot(T.power(T.dot(H, W.T), (beta-1)), W) +
                                l_sp * cst))
    return T.mul(H, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:34,代码来源:updates.py


示例3: t_forward_step

    def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco,ln_b1,ln_s1, ln_b2,ln_s2,ln_b3,ln_s3,
                       t_n_out):

        cur_w_in_sig_ln = self.ln(cur_w_in_sig, ln_b1, ln_s1)

        pre_w_out_sig = T.dot(pre_out_sig, w_ifco)
        pre_w_out_sig_ln = self.ln(pre_w_out_sig, ln_b2, ln_s2)

        preact = T.add(cur_w_in_sig_ln, pre_w_out_sig_ln, b_ifco)



        inner_act = self.activation # T.nnet.hard_sigmoid #T.tanh # T.nnet.hard_sigmoid T.tanh
        gate_act = self.sigmoid()  # T.nnet.hard_sigmoid #T.nnet.sigmoid

        # Input Gate
        ig_t1 = gate_act(preact[:, 0:t_n_out])
        # Forget Gate
        fg_t1 = gate_act(preact[:, 1 * t_n_out:2 * t_n_out])
        # Cell State
        cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(preact[:, 2 * t_n_out:3 * t_n_out])))

        mask = T.addbroadcast(mask, 1)
        cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig

        cs_t1_ln = self.ln(cs_t1, ln_b3, ln_s3)
        # Output Gate
        og_t1 = gate_act(preact[:, 3 * t_n_out:4 * t_n_out])
        # Output LSTM
        out_sig = T.mul(og_t1, inner_act(cs_t1_ln))

        out_sig = mask * out_sig + (1. - mask) * pre_out_sig

        return [out_sig, cs_t1]
开发者ID:dzungcamlang,项目名称:recnet,代码行数:34,代码来源:ln_reccurent_layer.py


示例4: t_forward_step

    def t_forward_step(self, mask, cur_w_in_sig, pre_out_sig, pre_cell_sig, w_ifco, b_ifco,
                       t_n_out):

        ifco = T.add(T.dot(pre_out_sig, w_ifco), b_ifco)

        inner_act = self.activation
        gate_act = self.sigmoid()

        # Input Gate
        ig_t1 = gate_act(T.add(ifco[:, 0:t_n_out], cur_w_in_sig[:, 0:t_n_out]))
        # Forget Gate
        fg_t1 = gate_act(T.add(ifco[:, 1 * t_n_out:2 * t_n_out],
                               cur_w_in_sig[:, 1 * t_n_out:2 * t_n_out]))
        # Cell State
        cs_t1 = T.add(T.mul(fg_t1, pre_cell_sig), T.mul(ig_t1, inner_act(
            T.add(ifco[:, 2 * t_n_out:3 * t_n_out], cur_w_in_sig[:, 2 * t_n_out:3 * t_n_out]))))

        mask = T.addbroadcast(mask, 1)
        cs_t1 = mask * cs_t1 + (1. - mask) * pre_cell_sig
        # functionality: cs_t1 =   T.switch(mask , cs_t1, pre_cell_sig)

        # Output Gate
        og_t1 = gate_act(
            T.add(ifco[:, 3 * t_n_out:4 * t_n_out], cur_w_in_sig[:, 3 * t_n_out:4 * t_n_out]))
        # Output LSTM
        out_sig = T.mul(og_t1, inner_act(cs_t1))

        out_sig = mask * out_sig + (1. - mask) * pre_out_sig

        return [out_sig, cs_t1]
开发者ID:dzungcamlang,项目名称:recnet,代码行数:30,代码来源:recurrent_layer.py


示例5: beta_H_Sparse

def beta_H_Sparse(X, W, H, beta, l_sp):
    """Update activation with beta divergence

    Parameters
    ----------
    X : Theano tensor
        data
    W : Theano tensor
        Bases
    H : Theano tensor
        activation matrix
    beta : Theano scalar

    Returns
    -------
    H : Theano tensor
        Updated version of the activations
    """
    up = ifelse(T.eq(beta, 2), (T.dot(X, W)) / (T.dot(T.dot(H, W.T), W) +
                                                l_sp),
                               (T.dot(T.mul(T.power(T.dot(H, W.T),
                                            (beta - 2)), X), W)) /
                               (T.dot(T.power(T.dot(H, W.T), (beta-1)), W) +
                                l_sp))
    return T.mul(H, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:25,代码来源:updates.py


示例6: W_beta_sub_withcst

def W_beta_sub_withcst(X, W, Wsub, H, Hsub, beta, sum_grp, lambda_grp, card_grp):
    """Update group activation with beta divergence

    Parameters
    ----------
    X : Theano tensor
        data
    W : Theano tensor
        Bases
    Wsub : Theano tensor
        group Bases        
    H : Theano tensor
        activation matrix
    Hsub : Theano tensor
        group activation matrix
    beta : Theano scalar

    Returns
    -------
    H : Theano tensor
        Updated version of the activations
    """
    up = ifelse(T.eq(beta, 2), (T.dot(X.T, Hsub) + lambda_grp * sum_grp) /
                               (T.dot(T.dot(H, W.T).T, Hsub) + lambda_grp * card_grp * Wsub),
                (T.dot(T.mul(T.power(T.dot(H, W.T), (beta - 2)), X).T, Hsub)+
                 lambda_grp * sum_grp) /
                (T.dot(T.power(T.dot(H, W.T), (beta-1)).T, Hsub) +
                 lambda_grp * card_grp * Wsub))
    return T.mul(Wsub, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:29,代码来源:updates.py


示例7: H_beta_sub

def H_beta_sub(X, W, Wsub, H, Hsub, beta):
    """Update group activation with beta divergence

    Parameters
    ----------
    X : Theano tensor
        data
    W : Theano tensor
        Bases
    Wsub : Theano tensor
        group Bases        
    H : Theano tensor
        activation matrix
    Hsub : Theano tensor
        group activation matrix
    beta : Theano scalar

    Returns
    -------
    H : Theano tensor
        Updated version of the activations
    """
    up = ifelse(T.eq(beta, 2), (T.dot(X, Wsub)) / (T.dot(T.dot(H, W.T), Wsub)),
                (T.dot(T.mul(T.power(T.dot(H, W.T), (beta - 2)), X), Wsub)) /
                (T.dot(T.power(T.dot(H, W.T), (beta-1)), Wsub)))
    return T.mul(Hsub, up)
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:26,代码来源:updates.py


示例8: get_cost_updates

    def get_cost_updates(self, corruption_level, learning_rate):
        """ This function computes the cost and the updates for one trainng
        step of the dA """

        tilde_x = self.get_corrupted_input(self.x, corruption_level)
        y = self.get_hidden_values(tilde_x)
        z = self.get_reconstructed_input(y)

        L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
        # Calculate cross-entropy cost (as alternative to MSE) of the reconstruction of the minibatch.

        weight_decay = 0.5 * self.lamda * (T.sum(T.mul(self.W, self.W)) + T.sum(T.mul(self.W_prime, self.W_prime)))
        # Calculate weight decay term to prevent overfitting

        rho_hat = T.sum(y, axis=1) / tilde_x.shape[1]
        KL_divergence = self.beta * T.sum(self.rho * T.log(self.rho / rho_hat) + (1-self.rho) * T.log((1 - self.rho)/(1-rho_hat)))
        # KL divergence sparsity term

        # Calculate overall errors
        cost = T.mean(L) + weight_decay + KL_divergence

        # Compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)

        # Generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
        ]

        return (cost, updates)
开发者ID:TakuTsuzuki,项目名称:Hackathon2015,代码行数:32,代码来源:sAE.py


示例9: grad

 def grad(self, inputs, g_outputs):
     (rho, ) = inputs
     (gz,) = g_outputs
     A = self.Id - tt.mul(rho, self.Wd)
     dinv = tt.nlinalg.matrix_inverse(A).T
     out = tt.mul(dinv, - self.Wd)
     return [tt.as_tensor(tt.sum(tt.mul(out, gz)), ndim=1)]
开发者ID:jGaboardi,项目名称:pysal,代码行数:7,代码来源:ops.py


示例10: minus_corr

def minus_corr(u, v):
    um = T.sub(u, T.mean(u))
    vm = T.sub(v, T.mean(v))
    r_num = T.sum(T.mul(um, vm))
    r_den = T.sqrt(T.mul(T.sum(T.sqr(um)), T.sum(T.sqr(vm))))
    r = T.true_div(r_num, r_den)
    r = T.neg(r)
    return r
开发者ID:dp00143,项目名称:NeuralCorrelation,代码行数:8,代码来源:functions.py


示例11: f1_score

 def f1_score(self, y):
     n_total = y.shape[0]
     n_relevant_documents_predicted = T.sum(T.eq(T.ones(self.y_pred.shape), self.y_pred))
     two_vector = T.add(T.ones(self.y_pred.shape), T.ones(self.y_pred.shape))
     n_relevant_predicted_correctly = T.sum(T.eq(T.add(self.y_pred, y), two_vector))
     precision = T.true_div(n_relevant_predicted_correctly, n_relevant_documents_predicted)
     recall = T.true_div(n_relevant_predicted_correctly, n_total)
     f1_score =  T.mul(2.0, T.true_div(T.mul(precision, recall), T.add(precision, recall)))
     return [f1_score, precision, recall]
开发者ID:ericrincon,项目名称:Deep-Learning-NLP,代码行数:9,代码来源:LogisticRegression.py


示例12: lmul_T

 def lmul_T(self, x):
     CC, RR = self.split_right_shape(tuple(x.shape), T=True)
     x_WT = theano.dot(
             x.reshape((tensor.mul(*CC), tensor.mul(*RR))),
             self._W.T)
     cshape = self.col_shape()
     yshp = tensor.stack(*(CC + cshape))
     rval = x_WT.reshape(yshp, ndim=len(CC) + len(cshape))
     return rval
开发者ID:HaniAlmousli,项目名称:pylearn,代码行数:9,代码来源:matrixmul.py


示例13: set_dropout

 def set_dropout(self, dropout, activation_function):
     action_with_drop = None
     if dropout > 0:
         action_with_drop = lambda X: T.mul(activation_function(X),self.dropout_function)            
         self.activation_cv_dropout = lambda X: T.mul(activation_function(X),self.dropout_function_cv)
     else:
         action_with_drop = activation_function
         self.activation_cv_dropout = activation_function
         
     return action_with_drop
开发者ID:ANB2,项目名称:MachineLearning,代码行数:10,代码来源:neural_network_layer.py


示例14: __objective_triple

 def __objective_triple(self, triple):
     """
     form the objective function value of a triple
     :param triple: (entity_l, entity_r, relation)
     :return:
     """
     l_index, r_index, relation_index = triple
     return T.nlinalg.norm(T.mul(self.Relation_L[relation_index, :, :], self.Entity[:, l_index]) -
                           T.mul(self.Relation_R[relation_index, :, :], self.Entity[:, r_index]),
                           ord=1)
开发者ID:subhadeepmaji,项目名称:ml_algorithms,代码行数:10,代码来源:RelationEmbedding.py


示例15: lmul

 def lmul(self, x):
     # dot(x, A)
     RR, CC = self.split_left_shape(tuple(x.shape), T=False)
     xW = theano.dot(
             x.reshape((tensor.mul(*RR), tensor.mul(*CC))),
             self._W)
     rshape = self.row_shape()
     yshp = tensor.stack(*(RR + rshape))
     rval = xW.reshape(yshp, ndim=len(RR) + len(rshape))
     return rval
开发者ID:HaniAlmousli,项目名称:pylearn,代码行数:10,代码来源:matrixmul.py


示例16: sequence_iteration

    def sequence_iteration(self, output, mask, use_dropout=0, dropout_value=0.5):

        dot_product = T.dot(output, self.t_w_out)

        linear_o = T.add(dot_product, self.t_b_out)


        mask = T.addbroadcast(mask, 2)  # to do nesseccary?
        output = T.mul(mask, linear_o) + T.mul((1. - mask), 1e-6)

        return output  # result
开发者ID:dzungcamlang,项目名称:recnet,代码行数:11,代码来源:output_layer.py


示例17: square_dist

 def square_dist(self, X, Xs):
     X = tt.mul(X, 1.0 / self.ls)
     X2 = tt.sum(tt.square(X), 1)
     if Xs is None:
         sqd = (-2.0 * tt.dot(X, tt.transpose(X))
                + (tt.reshape(X2, (-1, 1)) + tt.reshape(X2, (1, -1))))
     else:
         Xs = tt.mul(Xs, 1.0 / self.ls)
         Xs2 = tt.sum(tt.square(Xs), 1)
         sqd = (-2.0 * tt.dot(X, tt.transpose(Xs))
                + (tt.reshape(X2, (-1, 1)) + tt.reshape(Xs2, (1, -1))))
     return tt.clip(sqd, 0.0, np.inf)
开发者ID:springcoil,项目名称:pymc3,代码行数:12,代码来源:cov.py


示例18: square_dist

 def square_dist(self, X, Z):
     X = tt.mul(X, 1.0 / self.lengthscales)
     Xs = tt.sum(tt.square(X), 1)
     if Z is None:
         sqd = -2.0 * tt.dot(X, tt.transpose(X)) +\
               (tt.reshape(Xs, (-1, 1)) + tt.reshape(Xs, (1, -1)))
     else:
         Z = tt.mul(Z, 1.0 / self.lengthscales)
         Zs = tt.sum(tt.square(Z), 1)
         sqd = -2.0 * tt.dot(X, tt.transpose(Z)) +\
               (tt.reshape(Xs, (-1, 1)) + tt.reshape(Zs, (1, -1)))
     return tt.clip(sqd, 0.0, np.inf)
开发者ID:aasensio,项目名称:pymc3,代码行数:12,代码来源:cov.py


示例19: beta_div

def beta_div(X, W, H, beta):
    """Compute betat divergence"""
    div = ifelse(T.eq(beta, 2),
                 T.sum(1. / 2 * T.power(X - T.dot(H, W), 2)), 
                 ifelse(T.eq(beta, 0),
                        T.sum(X / T.dot(H, W) - T.log(X / T.dot(H, W)) - 1),
                        ifelse(T.eq(beta, 1),
                               T.sum(T.mul(X, (T.log(X) - T.log(T.dot(H, W)))) + T.dot(H, W) - X),
                                T.sum(1. / (beta * (beta - 1.)) * (T.power(X, beta) +
                                                                   (beta - 1.) *
                                                                   T.power(T.dot(H, W), beta) -
                                                                   beta *
                                                                   T.power(T.mul(X, T.dot(H, W)),
                                                                           (beta - 1)))))))
    return div
开发者ID:mikimaus78,项目名称:groupNMF,代码行数:15,代码来源:costs.py


示例20: __init

def __init():
    dataset = T.matrix("dataset", dtype=config.globalFloatType())
    trans_dataset = T.transpose(dataset)
    dot_mul = T.dot(dataset, trans_dataset)
    l2 = T.sqrt(T.sum(T.square(dataset), axis=1))
    
#     p =printing.Print("l2")
#     l2 = p(l2)
    
    l2_inv2 = T.inv(l2).dimshuffle(['x', 0])
#     p =printing.Print("l2_inv2")
#     l2_inv2 = p(l2_inv2)
    
    l2_inv1 = T.transpose(l2_inv2)
#     p =printing.Print("l2_inv1")
#     l2_inv1 = p(l2_inv1)
    
    l2_inv = T.dot(l2_inv1, l2_inv2)
    
#     p =printing.Print("l2_inv")
#     l2_inv = p(l2_inv)
    
    affinty = (T.mul(dot_mul, l2_inv) + 1) / 2
    globals()['__affinty_fun'] = theano.function(
             [dataset],
             [affinty],
             allow_input_downcast=True
             )
开发者ID:persistforever,项目名称:sentenceEmbedding,代码行数:28,代码来源:affinity_matrix.py



注:本文中的theano.tensor.mul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.neq函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.mod函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap