• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python theano.dot函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.dot函数的典型用法代码示例。如果您正苦于以下问题:Python dot函数的具体用法?Python dot怎么用?Python dot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了dot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: oneStep

def oneStep(u_tm4,u_t,x_tm3,x_tm1,y_tm1,W,W_in_1,W_in_2,W_feedback,W_out):
    x_t=T.tanh(theano.dot(x_tm1,W)+\
                theano.dot(u_t,W_in_1)+\
                theano.dot(u_tm4,W_in_2)+\
                theano.dot(y_tm1,W_feedback))
    y_t=theano.dot(x_tm3,W_out)
    return [x_t,y_t]
开发者ID:LibCorner,项目名称:Theano_note,代码行数:7,代码来源:Scan_tutial.py


示例2: encoder

def encoder(infomatf, infomatb, htm1matf, ctm1matf, htm1matb, ctm1matb, Eenf, Eenb, Wenf, Wenb, benf, benb):
    # infomat is a matrix, having # batch * D
    dim = Eenf.shape[1]
    #
    xtmatf = theano.dot(infomatf, Eenf)
    xtmatb = theano.dot(infomatb, Eenb)
    #
    pretranf = T.concatenate([xtmatf, htm1matf], axis=1)
    pretranb = T.concatenate([xtmatb, htm1matb], axis=1)
    #
    posttranf = theano.dot(pretranf, Wenf) + benf
    posttranb = theano.dot(pretranb, Wenb) + benb
    #
    itmatf = T.nnet.sigmoid(posttranf[:, 0:dim])
    ftmatf = T.nnet.sigmoid(posttranf[:, dim : (2 * dim)])
    gtmatf = T.tanh(posttranf[:, (2 * dim) : (3 * dim)])
    otmatf = T.nnet.sigmoid(posttranf[:, (3 * dim) :])
    ctmatf = ftmatf * ctm1matf + itmatf * gtmatf
    #
    htmatf = otmatf * T.tanh(ctmatf)
    #
    itmatb = T.nnet.sigmoid(posttranb[:, 0:dim])
    ftmatb = T.nnet.sigmoid(posttranb[:, dim : (2 * dim)])
    gtmatb = T.tanh(posttranb[:, (2 * dim) : (3 * dim)])
    otmatb = T.nnet.sigmoid(posttranb[:, (3 * dim) :])
    ctmatb = ftmatb * ctm1matb + itmatb * gtmatb
    #
    htmatb = otmatb * T.tanh(ctmatb)
    #
    return htmatf, ctmatf, htmatb, ctmatb
开发者ID:hychyc07,项目名称:arctic-captions,代码行数:30,代码来源:weatherman.py


示例3: test_specify_shape_inplace

        def test_specify_shape_inplace(self):
            # test that specify_shape don't break inserting inplace op

            dtype = self.dtype
            if dtype is None:
                dtype = theano.config.floatX

            rng = numpy.random.RandomState(utt.fetch_seed())
            a = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            a = self.cast_value(a)
            a_shared = self.shared_constructor(a)
            b = numpy.asarray(rng.uniform(1, 2, [40, 40]), dtype=dtype)
            b = self.cast_value(b)
            b_shared = self.shared_constructor(b)
            s = numpy.zeros((40, 40), dtype=dtype)
            s = self.cast_value(s)
            s_shared = self.shared_constructor(s)
            f = theano.function([], updates={s_shared: theano.dot(a_shared, b_shared) + s_shared})
            topo = f.maker.env.toposort()
            f()
            # [Gemm{inplace}(<TensorType(float64, matrix)>, 0.01, <TensorType(float64, matrix)>, <TensorType(float64, matrix)>, 2e-06)]
            if theano.config.mode != "FAST_COMPILE":
                assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
                assert all(
                    node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
                )
                assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
            # Their is no inplace gemm for sparse
            # assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "StructuredDot")
            s_shared_specify = tensor.specify_shape(s_shared, s_shared.get_value(borrow=True).shape)

            # now test with the specify shape op in the output
            f = theano.function(
                [], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
            )
            topo = f.maker.env.toposort()
            shp = f()
            assert numpy.all(shp == (40, 40))
            if theano.config.mode != "FAST_COMPILE":
                assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
                assert all(
                    node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
                )
                assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
            # now test with the specify shape op in the inputs and outputs
            a_shared = tensor.specify_shape(a_shared, a_shared.get_value(borrow=True).shape)
            b_shared = tensor.specify_shape(b_shared, b_shared.get_value(borrow=True).shape)

            f = theano.function(
                [], s_shared.shape, updates={s_shared: theano.dot(a_shared, b_shared) + s_shared_specify}
            )
            topo = f.maker.env.toposort()
            shp = f()
            assert numpy.all(shp == (40, 40))
            if theano.config.mode != "FAST_COMPILE":
                assert sum([node.op.__class__.__name__ in ["Gemm", "GpuGemm", "StructuredDot"] for node in topo]) == 1
                assert all(
                    node.op == tensor.blas.gemm_inplace for node in topo if isinstance(node.op, tensor.blas.Gemm)
                )
                assert all(node.op.inplace for node in topo if node.op.__class__.__name__ == "GpuGemm")
开发者ID:NicolasBouchard,项目名称:Theano,代码行数:60,代码来源:test_sharedvar.py


示例4: apply

    def apply(self, state_below, mask_below, init_state=None, context=None):
        if state_below.ndim == 3:
            batch_size = state_below.shape[1]
            n_steps = state_below.shape[0]
        else:
            raise NotImplementedError


        if self.with_contex:
            if init_state is None:
                init_state = T.tanh(theano.dot(context, self.W_c_init))
            c_z = theano.dot(context, self.W_cz)
            c_r = theano.dot(context, self.W_cr)
            c_h = theano.dot(context, self.W_ch)
            non_sequences = [c_z, c_r, c_h]
            rval, updates = theano.scan(self._step_forward_with_context,
                                        sequences=[state_below, mask_below],
                                        outputs_info=[init_state],
                                        non_sequences=non_sequences,
                                        n_steps=n_steps
                                        )

        else:
            if init_state is None:
                init_state = T.alloc(numpy.float32(0.), batch_size, self.n_hids)
            rval, updates = theano.scan(self._step_forward,
                                        sequences=[state_below, mask_below],
                                        outputs_info=[init_state],
                                        n_steps=n_steps
                                        )
        self.output = rval
        return self.output
开发者ID:subercui,项目名称:RL_reasoning,代码行数:32,代码来源:model.py


示例5: _step_forward_with_attention

    def _step_forward_with_attention(self, x_t, x_m, h_tm1, c, c_mask, c_x):
        '''
        x_t: input at time t
        x_m: mask of x_t
        h_tm1: previous state
        c_x: contex of the rnn
        '''
        # attended = self.attention_layer.apply(c, c_mask, h_tm1)
        # c_z = theano.dot(attended, self.W_cz)
        # c_r = theano.dot(attended, self.W_cr)
        # c_h = theano.dot(attended, self.W_ch)

        # return [self._step_forward_with_context(x_t, x_m, h_tm1, c_z, c_r, c_h), attended]

        #### new arc
        h1 = self._step_forward(x_t, x_m, h_tm1)
        attended = self.attention_layer.apply(c, c_mask, c_x,  h1 )
        z = T.nnet.sigmoid(theano.dot(attended, self.W_cz)
                           + theano.dot(h1, self.W_hz2) + self.b_z2)
        r = T.nnet.sigmoid(theano.dot(attended, self.W_cr)
                           + theano.dot(h1, self.W_hr2) + self.b_r2)
        c_h = theano.dot(attended, self.W_ch)
        h2 = T.tanh((T.dot(h1, self.W_hh2) + self.b_h2) * r + c_h)
        h2 = h1 * z + (1. - z) * h2
        if x_m:
            h2 = x_m[:, None] * h2 + (1. - x_m)[:, None] * h1
        return h2, attended
开发者ID:mingxuan,项目名称:RnnSearch,代码行数:27,代码来源:trans_model.py


示例6: one_step_no_output

 def one_step_no_output(self, x_t, h_tm1, W_xc, W_hc, b_c, W_ih, W_hh, W_ho, b_o, b_h):
     C = sigmoid(theano.dot(x_t, W_xc) + theano.dot(h_tm1, W_hc) + b_c)
     h_t_hat = T.tanh(theano.dot(x_t, W_ih) + theano.dot(h_tm1, W_hh) + b_h)
     h_t = (1 - C) * h_t_hat + C * x_t
     if self.ignore_zero:
         return [h_t, h_t], theano.scan_module.until(T.eq(T.sum(abs(x_t)), 0))
     return [h_t, h_t]
开发者ID:benywon,项目名称:bAbi,代码行数:7,代码来源:RNN.py


示例7: one_step

 def one_step(self, x_t, h_tm1, W_ih, W_hh, b_h, W_ho, b_o):
     h_t = T.tanh(theano.dot(x_t, W_ih) + theano.dot(h_tm1, W_hh) + b_h)
     y_t = theano.dot(h_t, W_ho) + b_o
     y_t = sigmoid(y_t)
     if self.ignore_zero:
         return [h_t, y_t], theano.scan_module.until(T.eq(T.sum(abs(x_t)), 0))
     return [h_t, y_t]
开发者ID:benywon,项目名称:bAbi,代码行数:7,代码来源:RNN.py


示例8: setL

 def setL(x, name1="w", name2="b", name3="b_", act="sigmoid"):
     w = self.seg.params[name1]
     b = self.seg.params[name2]
     b_ = self.seg.params[name3]
     activate = self.getfunc(act)
     y = activate(theano.dot(x, w) + b)
     z = activate(theano.dot(y, w.T) + b_)
     return zip([w, b, b_], theano.grad(self.lossfunc(x, z), [w, b, b_]))
开发者ID:sugaton,项目名称:theano_feedforwardNN,代码行数:8,代码来源:wordseg.py


示例9: step

def step(x_t, h_t_1, W_h, W_x, W_y):
    # Add breakpoint

    h = t.tanh(theano.dot(W_h, h_t_1) + theano.dot(W_x, x_t) + b_h)
    y = (theano.dot(W_y, h) + b_y)
    e_y = t.exp(y - y.max())
    smax_y = e_y / e_y.sum()
    return h, smax_y
开发者ID:liuhy0908,项目名称:thenao_examples,代码行数:8,代码来源:rnn.py


示例10: __init__

    def __init__(self, name, inp):
        eqvars = self.arrdict[name]
        w_hidden, b_hidden, w_output, b_output = eqvars

        hidden = T.dot(w_hidden.T, inp) + b_hidden
        hidden_act = M.tanh(hidden)
        output = (T.dot(w_output.T, hidden_act) + b_output)
        self.proj = output.sum()
开发者ID:DaiDengxin,项目名称:invariant,代码行数:8,代码来源:invar_theano.py


示例11: __init__

 def __init__(self, input, w, b, params=[]):
     self.output=nnet.softmax(theano.dot(input, w)+b)
     self.l1=abs(w).sum()
     self.l2_sqr = (w**2).sum()
     self.argmax=theano.tensor.argmax(theano.dot(input, w)+b, axis=input.ndim-1)
     self.input = input
     self.w = w
     self.b = b
     self.params = params
开发者ID:yamins81,项目名称:simffa,代码行数:9,代码来源:theano_sgd.py


示例12: hidden_cov_units_preactivation_given_v

    def hidden_cov_units_preactivation_given_v(self, v, small=0.5):
        """Return argument to the sigmoid that would give mean of covariance hid units

        See the math at the top of this file for what 'adjusted' means.

        return b - 0.5 * dot(adjusted(v), U)**2
        """
        unit_v = v / (TT.sqrt(TT.mean(v**2, axis=1)+small)).dimshuffle(0,'x') # adjust row norm
        return self.b + 0.5 * dot(dot(unit_v, self.U)**2, self.P)
开发者ID:GavinHwang,项目名称:DeepLearningTutorials,代码行数:9,代码来源:mcrbm.py


示例13: build_mdn_predict

def build_mdn_predict(proj, x, tparams):
    x_diff_squared_avg = tensor.mean((x[:,1:] - x[:,:-1])**2,axis=1)
    invsigma_given_x = tensor.maximum(tensor.nnet.sigmoid(theano.dot(
        proj, tparams['U_sigma']) + tparams['b_sigma'])
                                      , 1e-8)/ x_diff_squared_avg[:, None]
    mu = theano.dot(proj, tparams['U_mu']) + tparams['b_mu']
    p_mix_given_x = tensor.maximum(tensor.minimum(tensor.nnet.softmax(
        tensor.dot(proj, tparams['U_mix']) + tparams['b_mix']), 1e-6), 1-1e-6)
    p_mix_given_x = tensor.log(p_mix_given_x / (tensor.sum(p_mix_given_x, axis=1)[:, None] + 10 * EPS) + EPS)
    return invsigma_given_x, mu, p_mix_given_x
开发者ID:markstoehr,项目名称:structured_gaussian_mixtures,代码行数:10,代码来源:mdn_lstm.py


示例14: one_step

 def one_step(x_t, h_tminus1, c_tminus1):
     i_t = sigmoid(theano.dot(x_t, self.W_xi) + theano.dot(h_tminus1, self.W_hi) + self.b_i)
     f_t = sigmoid(theano.dot(x_t, self.W_xf) + theano.dot(h_tminus1, self.W_hf) + self.b_f)
     o_t = sigmoid(theano.dot(x_t, self.W_xo) + theano.dot(h_tminus1, self.W_ho) + self.b_o)
     g_t = self.activation_fun(theano.dot(x_t, self.W_xg) + theano.dot(h_tminus1, self.W_hg) + self.b_g)
     c_t = f_t * c_tminus1 + i_t * g_t
     h_t = o_t * self.activation_fun(c_t)
     y_t = sigmoid(theano.dot(h_t, self.W_hy) + self.b_y)
     return [h_t, c_t, y_t]
开发者ID:ayushkovsky,项目名称:neuralnetworks,代码行数:9,代码来源:lstm_model.py


示例15: step

    def step(self, x_t, h_tm1, W_ih, W_hh, b_h, W_ho, b_o):

        # h_t = g(W_ih x_t + W_hh h_tm1 + bh)

        ### Does not work on recurrent layer, see http://arxiv.org/pdf/1311.0701v7.pdf
        h_t = self.g(theano.dot(x_t, W_ih) + theano.dot(h_tm1, W_hh) + b_h)

        # y_t = act(W_ho h_t + b_o)

        ### y_t = self.act(theano.dot(h_t, W_ho) + b_o)
        y_t = self.act(theano.dot(h_t, W_ho) + b_o)

        return [h_t, y_t]
开发者ID:journocode,项目名称:recurrency,代码行数:13,代码来源:recurrent.py


示例16: image_step_val

def image_step_val(Imat, htm1mat, ctm1mat, 
                   Wcnn, Wxi, Whi, bi, Wxf, Whf, bf, 
                   Wxc, Whc, bc, Wxo, Who, bo, Why, by, forbatch):
    xtmat = theano.dot(Imat, Wcnn)
    itmat = sigma(theano.dot(xtmat,Wxi) + theano.dot(htm1mat,Whi) + T.outer(forbatch,bi) )
    ftmat = sigma(theano.dot(xtmat,Wxf) + theano.dot(htm1mat,Whf) + T.outer(forbatch,bf) )
    ctmat = ftmat * ctm1mat + itmat*act(theano.dot(xtmat,Wxc)+theano.dot(htm1mat,Whc)+T.outer(forbatch,bc) )
    otmat = sigma(theano.dot(xtmat,Wxo) + theano.dot(htm1mat,Who) + T.outer(forbatch,bo) )
    htmat = otmat * act(ctmat)
#    yt = T.concatenate([addzero,tempyt],axis=0)
    return htmat, ctmat    
开发者ID:lizuyao2010,项目名称:lstm_theano,代码行数:11,代码来源:ggmodeladam.py


示例17: encoder

def encoder(wordt, htm1, ctm1, 
            Een, Wxien, Whien, bien, Wxfen, Whfen, bfen, 
            Wxcen, Whcen, bcen, Wxoen, Whoen, boen):
    xt = theano.dot(wordt, Een)
    it = sigma(theano.dot(xt,Wxien) + theano.dot(htm1,Whien) + bien )
    ft = sigma(theano.dot(xt,Wxfen) + theano.dot(htm1,Whfen) + bfen )
    ct = ft * ctm1 + it*act(theano.dot(xt,Wxcen)+theano.dot(htm1,Whcen)+bcen )
    ot = sigma(theano.dot(xt,Wxoen) + theano.dot(htm1,Whoen) + boen )
    ht = ot * act(ct)
#    yt = T.concatenate([addzero,tempyt],axis=0)
    return ht, ct
开发者ID:lizuyao2010,项目名称:lstm_theano,代码行数:11,代码来源:walkmanweightdecay.py


示例18: one_lstm_step

 def one_lstm_step(x_t, h_tm1, c_tm1, W_xi, W_hi, W_xf, W_hf, W_xc, W_hc, W_xo, W_ho
                    ):
     i_t = T.nnet.sigmoid(theano.dot(x_t, W_xi) + theano.dot(h_tm1, W_hi) )
     f_t = T.nnet.sigmoid(theano.dot(x_t, W_xf) + theano.dot(h_tm1, W_hf) )
     c_t = f_t * c_tm1 + i_t * T.tanh(theano.dot(x_t, W_xc) + theano.dot(h_tm1, W_hc) ) 
     o_t = T.nnet.sigmoid(theano.dot(x_t, W_xo)+ theano.dot(h_tm1, W_ho) ) 
     h_t = o_t * T.tanh(c_t)
     return [h_t, c_t]
开发者ID:synetkim,项目名称:multi_asr,代码行数:8,代码来源:lstm.py


示例19: mk_training_fn

    def mk_training_fn(self):
        """The Constant Stochastic Gradient Step Fn with Optimal Preconditioning Matrix"""
        q_size = self.q_size
        avg_C = self.avg_C
        t = self.t
        updates = self.updates
        # Trying to stick to variables names as given in the publication
        # https://arxiv.org/pdf/1704.04289v1.pdf
        S = self.batch_size
        N = self.total_size

        # inputs
        random = self.random
        inarray = self.inarray

        # gradient of log likelihood
        gt = -1 * (1. / S) * (self.dlogp_elemwise.sum(axis=0) +
                              (S / N) * self.dlog_prior)

        # update moving average of Noise Covariance
        gt_diff = (self.dlogp_elemwise - self.dlogp_elemwise.mean(axis=0))
        V = (1. / (S - 1)) * theano.dot(gt_diff.T, gt_diff)
        C_t = (1. - 1. / t) * avg_C + (1. / t) * V
        # BB^T = C 
        B = tt.switch(t < 0, tt.eye(q_size), tt.slinalg.cholesky(C_t))
        # Optimal Preconditioning Matrix
        H = (2. * S / N) * tt.nlinalg.matrix_inverse(C_t)
        # step value on the log likelihood gradient preconditioned with H
        step = -1 * theano.dot(H, gt.dimshuffle([0, 'x']))

        # sample gaussian noise dW
        dW = random.normal(
            (q_size, 1), dtype=theano.config.floatX, avg=0.0, std=1.0)
        # noise term is inversely proportional to batch size
        noise_term = (1. / np.sqrt(S)) * theano.dot(H, theano.dot(B, dW))
        # step + noise term
        dq = (step + noise_term).flatten()

        # update time and avg_C 
        updates.update({avg_C: C_t, t: t + 1})

        f = theano.function(
            outputs=dq,
            inputs=inarray,
            updates=updates,
            allow_input_downcast=True)

        return f
开发者ID:alexander-belikov,项目名称:pymc3,代码行数:48,代码来源:sgmcmc.py


示例20: audcc_from_power

    def audcc_from_power(self, power, n_bands=None, n_audcc=None, dct_unitary=None,
            noise_level=None):
        """
        :type power: ndarray or NdArrayResult with ndim=2

        :param power: a power spectrogram with each frame in a row.  A frequency-scaled
        spectrogram makes sense here too.

        :type n_bands: int
        :param n_bands:  number of critical bands of power

        :type n_audcc: int
        :param n_audcc:  number of cepstral coefficients to calculate

        :type dct_unitary: Bool
        :param dct_unitary: True means apply different scaling to first coef.

        """
        n_audcc = self.n_audcc if n_audcc is None else n_audcc
        dct_unitary = self.dct_unitary if dct_unitary is None else dct_unitary
        n_bands = self.n_bands if n_bands is None else n_bands
        noise_level = self.noise_level if noise_level is None else noise_level

        dct = fourier.dct_matrix(n_audcc, n_bands, unitary=dct_unitary)

        dct = theano.tensor.as_tensor_variable(dct, name="AudioFeatures.dct<%i>"%id(dct))
        return theano.dot(theano.tensor.log(power + noise_level), dct.T)
开发者ID:jaberg,项目名称:Classifaudio,代码行数:27,代码来源:audiofeature.py



注:本文中的theano.dot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python theano.function函数代码示例发布时间:2022-05-27
下一篇:
Python theano.clone函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap