• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.tensordot函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.tensordot函数的典型用法代码示例。如果您正苦于以下问题:Python tensordot函数的具体用法?Python tensordot怎么用?Python tensordot使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了tensordot函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: marginalize_over_v_z

    def marginalize_over_v_z(self, h):
        # energy = \sum_{i=1}^{|h|} h_i*b_i - \beta * ln(1 + e^{b_i})

        # In theory should use the following line
        # energy = (h * self.b).T
        # However, when there is broadcasting, the Theano element-wise multiplication between np.NaN and 0 is 0 instead of np.NaN!
        # so we use T.tensordot and T.diagonal instead as a workaround!
        # See Theano issue #3848 (https://github.com/Theano/Theano/issues/3848)
        energy = T.tensordot(h, self.b, axes=0)
        energy = T.diagonal(energy, axis1=1, axis2=2).T

        if self.penalty == "softplus_bi":
            energy = energy - self.beta * T.log(1 + T.exp(self.b))[:, None]

        elif self.penalty == "softplus0":
            energy = energy - self.beta * T.log(1 + T.exp(0))[:, None]

        else:
            raise NameError("Invalid penalty term")

        energy = T.set_subtensor(energy[(T.isnan(energy)).nonzero()], 0)  # Remove NaN
        energy = T.sum(energy, axis=0, keepdims=True).T

        ener = T.tensordot(h, self.W, axes=0)
        ener = T.diagonal(ener, axis1=1, axis2=2)
        ener = T.set_subtensor(ener[(T.isnan(ener)).nonzero()], 0)
        ener = T.sum(ener, axis=2) + self.c[None, :]
        ener = T.sum(T.log(1 + T.exp(ener)), axis=1, keepdims=True)

        return -(energy + ener)
开发者ID:MarcCote,项目名称:iRBM,代码行数:30,代码来源:orbm.py


示例2: test_tensordot_reshape

def test_tensordot_reshape():
    '''Test that the tensordot implementation using dimshuffle, reshape and dot
    gives the same results as the default (numpy) version'''
    # define some tensors
    a = numpy.arange(20, dtype=theano.config.floatX) / 20.0
    b = numpy.arange(10, dtype=theano.config.floatX) / 10.0
    c = numpy.arange(5, dtype=theano.config.floatX) / 5.0
    d = numpy.arange(8, dtype=theano.config.floatX) / 8.0
    
    tensor1 = numpy.tensordot(a, numpy.tensordot(b, numpy.tensordot(c, d, 0), 0), 0)
    tensor2 = numpy.tensordot(c, numpy.tensordot(d, a, 0), 0)
    tensor3 = tensor2.swapaxes(1, 2).swapaxes(0, 2) # d, a, c
    
    x = T.tensor4('x')
    y = T.tensor3('y')
    
    # case 1: number of axes to sum over
    default1 = theano.function([x,y], T.tensordot(x, y, 2))(tensor1, tensor2)
    reshape1 = theano.function([x,y], B.tensordot(x, y, 2))(tensor1, tensor2)
    assert numpy.allclose(default1, reshape1)
    
    # case 2: axis pairs
    default2 = theano.function([x,y], T.tensordot(x, y, axes=[(0, 3), (1, 0)]))(tensor1, tensor3)
    reshape2 = theano.function([x,y], B.tensordot(x, y, axes=[(0, 3), (1, 0)]))(tensor1, tensor3)
    assert numpy.allclose(default2, reshape2)

    default3 = theano.function([x,y], T.tensordot(x, y, axes=[(0, 3, 2), (1, 0, 2)]))(tensor1, tensor3)
    reshape3 = theano.function([x,y], B.tensordot(x, y, axes=[(0, 3, 2), (1, 0, 2)]))(tensor1, tensor3)
    assert numpy.allclose(default3, reshape3)
开发者ID:vlb,项目名称:Theano,代码行数:29,代码来源:test_basic_ops.py


示例3: sym_mask_logdensity_estimator_intermediate

    def sym_mask_logdensity_estimator_intermediate(self, x, mask):
        non_linearity_name = self.parameters["nonlinearity"].get_name()
        assert non_linearity_name == "sigmoid" or non_linearity_name == "RLU"
        x = x.T  # BxD
        mask = mask.T  # BxD
        output_mask = constantX(1) - mask  # BxD
        D = constantX(self.n_visible)
        d = mask.sum(1)  # d is the 1-based index of the dimension whose value to infer (not the size of the context)
        masked_input = x * mask  # BxD
        h = self.nonlinearity(T.dot(masked_input, self.W1) + T.dot(mask, self.Wflags) + self.b1)  # BxH
        for l in xrange(self.n_layers - 1):
            h = self.nonlinearity(T.dot(h, self.Ws[l]) + self.bs[l])  # BxH
        z_alpha = T.tensordot(h, self.V_alpha, [[1], [1]]) + T.shape_padleft(self.b_alpha)
        z_mu = T.tensordot(h, self.V_mu, [[1], [1]]) + T.shape_padleft(self.b_mu)
        z_sigma = T.tensordot(h, self.V_sigma, [[1], [1]]) + T.shape_padleft(self.b_sigma)
        temp = T.exp(z_alpha)  # + 1e-6
        # temp += T.shape_padright(temp.sum(2)/1e-3)
        Alpha = temp / T.shape_padright(temp.sum(2))  # BxDxC
        Mu = z_mu  # BxDxC
        Sigma = T.exp(z_sigma)  # + 1e-6 #BxDxC

        # Alpha = Alpha * T.shape_padright(output_mask) + T.shape_padright(mask)
        # Mu = Mu * T.shape_padright(output_mask)
        # Sigma = Sigma * T.shape_padright(output_mask) + T.shape_padright(mask)
        # Phi = -constantX(0.5) * T.sqr((Mu - T.shape_padright(x*output_mask)) / Sigma) - T.log(Sigma) - constantX(0.5 * np.log(2*np.pi)) #BxDxC

        Phi = (
            -constantX(0.5) * T.sqr((Mu - T.shape_padright(x)) / Sigma)
            - T.log(Sigma)
            - constantX(0.5 * np.log(2 * np.pi))
        )  # BxDxC
        logdensity = (log_sum_exp(Phi + T.log(Alpha), axis=2) * output_mask).sum(1) * D / (D - d)
        return (logdensity, z_alpha, z_mu, z_sigma, Alpha, Mu, Sigma, h)
开发者ID:Irene-Li,项目名称:susyML,代码行数:33,代码来源:OrderlessMoGNADE.py


示例4: get_output

  def get_output(self, train=False):
    input = self.get_input(train)
    proj_input = self.activation(T.tensordot(input, self.att_proj, axes=(3,0)))
    #else:
    #  proj_fun = lambda proj_i, inp: T.tensordot(inp, proj_i, axes=((1,3), (0,1)))
    #  lin_proj_input, _ = theano.scan(fn=proj_fun, sequences=self.att_proj, non_sequences=input)
    #  proj_input = self.activation(lin_proj_input.dimshuffle((1,0,2,3)))
    if self.context == 'word':
      att_scores = T.tensordot(proj_input, self.att_scorer, axes=(3, 0))
    elif self.context == 'clause':
      #att_scores = T.tensordot(proj_input, self.att_scorer, axes=(3, 1)).sum(axis=2)
      def step(a_t, h_tm1, W_in, W, sc):
        h_t = T.tanh(T.tensordot(a_t, W_in, axes=(2,0)) + T.tensordot(h_tm1, W, axes=(2,0)))
        s_t = T.tensordot(h_t, sc, axes=(2,0))
        return h_t, s_t
      [_, scores], _ = theano.scan(step, sequences=[proj_input.dimshuffle(2,0,1,3)], outputs_info=[T.zeros((proj_input.shape[0], self.td1, self.rec_hid_dim)), None], non_sequences=[self.rec_in_weights, self.rec_hid_weights, self.att_scorer])
      att_scores = scores.dimshuffle(1,2,0)
    elif self.context == 'para':
      att_scores = T.tensordot(proj_input, self.att_scorer, axes=(3, 2)).sum(axis=(1, 2))
    # Nested scans. For shame!
    def get_sample_att(sample_input, sample_att):
      sample_att_inp, _ = theano.scan(fn=lambda s_att_i, s_input_i: T.dot(s_att_i, s_input_i), sequences=[T.nnet.softmax(sample_att), sample_input])
      return sample_att_inp

    att_input, _ = theano.scan(fn=get_sample_att, sequences=[input, att_scores])
    return att_input
开发者ID:edvisees,项目名称:exp-parser,代码行数:26,代码来源:attention.py


示例5: test_transfer

    def test_transfer(self):
        tensor1 = self.rng.rand(20, 10, 5, 8).astype("float32")
        tensor2 = self.rng.rand(5, 8, 20).astype("float32")
        tensor3 = self.rng.rand(8, 20, 5).astype("float32")

        x = tensor.ftensor4("x")
        y = tensor.ftensor3("y")

        tdot1 = tensor.tensordot(x, y, 2)
        f1 = theano.function([x, y], tdot1, mode=mode_with_gpu)
        topo1 = f1.maker.fgraph.toposort()
        assert topo1[-1].op == cuda.host_from_gpu
        # Let DebugMode debug
        f1(tensor1, tensor2)

        tdot2 = tensor.tensordot(x, y, axes=[(0, 3), (1, 0)])
        f2 = theano.function([x, y], tdot2, mode=mode_with_gpu)
        topo2 = f2.maker.fgraph.toposort()
        assert topo2[-1].op == cuda.host_from_gpu
        f2(tensor1, tensor3)

        tdot3 = tensor.tensordot(x, y, axes=[(0, 3, 2), (1, 0, 2)])
        f3 = theano.function([x, y], tdot3, mode=mode_with_gpu)
        topo3 = f3.maker.fgraph.toposort()
        assert topo3[-1].op == cuda.host_from_gpu
        f3(tensor1, tensor3)
开发者ID:scyoyo,项目名称:Theano,代码行数:26,代码来源:test_opt.py


示例6: shade

    def shade(self, shape, lights, camera):
        # See: http://en.wikipedia.org/wiki/Phong_reflection_model#Description

        # Since our material params are 1d we calculate bw shadings first and
        # convert to color after
        light = lights[0]
        material = shape.material
        normals = shape.normals(camera.rays)

        ambient_light = material.ka

        # diffuse (lambertian)
        diffuse_shadings = material.kd*T.tensordot(normals, -light.normed_dir(), 1)

        # specular
        rm = 2.0*(T.tensordot(normals, -light.normed_dir(), 1).dimshuffle(
            0, 1, 'x'))*normals + light.normed_dir()
        specular_shadings = material.ks*(T.tensordot(rm, camera.look_at, 1) ** material.shininess)

        # phong
        phong_shadings = ambient_light + diffuse_shadings + specular_shadings

        colorized = phong_shadings.dimshuffle(0, 1, 'x') * material.color.dimshuffle('x', 'x', 0) * light.intensity.dimshuffle('x', 'x', 0)
        clipped = T.clip(colorized, 0, 1)
        distances = shape.distance(camera.rays)
        return broadcasted_switch(T.isinf(distances), [0., 0., 0.], clipped)
开发者ID:lebek,项目名称:reversible-raytracer,代码行数:26,代码来源:shader.py


示例7: sym_masked_neg_loglikelihood_gradient

    def sym_masked_neg_loglikelihood_gradient(self, x, mask):
        """ x is a matrix of column datapoints (DxB) D = n_visible, Bfloat = batch size """
        logdensity, z_alpha, z_mu, z_sigma, Alpha, Mu, Sigma, h = self.sym_mask_logdensity_estimator_intermediate(
            x, mask
        )

        #        nnz = output_mask.sum(0)
        #        sparsity_multiplier = T.shape_padright(T.shape_padleft((B+1e-6)/(nnz+1e-6)))

        #        wPhi = T.maximum(Phi + T.log(Alpha), constantX(-100.0)) #BxDxC
        #        lp_current = log_sum_exp(wPhi, axis = 2) * output_mask #BxD
        #        lp_current_sum = (lp_current.sum(1) * D / (D-d)).sum() #1

        loglikelihood = logdensity.mean(dtype=floatX)
        loss = -loglikelihood

        dp_dz_alpha = T.grad(loss, z_alpha)  # BxDxC
        gb_alpha = dp_dz_alpha.sum(0)  # DxC
        gV_alpha = T.tensordot(h.T, dp_dz_alpha, [[1], [0]]).dimshuffle((1, 0, 2))  # DxHxC

        dp_dz_mu = T.grad(loss, z_mu)  # BxDxC
        dp_dz_mu = dp_dz_mu * Sigma  # Heuristic
        gb_mu = dp_dz_mu.sum(0)  # DxC
        gV_mu = T.tensordot(h.T, dp_dz_mu, [[1], [0]]).dimshuffle((1, 0, 2))  # DxHxC

        dp_dz_sigma = T.grad(loss, z_sigma)  # BxDxC
        gb_sigma = dp_dz_sigma.sum(0)  # DxC
        gV_sigma = T.tensordot(h.T, dp_dz_sigma, [[1], [0]]).dimshuffle((1, 0, 2))  # DxHxC

        if self.n_layers > 1:
            gWs, gbs, gW1, gWflags, gb1 = T.grad(loss, [self.Ws, self.bs, self.W1, self.Wflags, self.b1])
            gradients = {
                "V_alpha": gV_alpha,
                "b_alpha": gb_alpha,
                "V_mu": gV_mu,
                "b_mu": gb_mu,
                "V_sigma": gV_sigma,
                "b_sigma": gb_sigma,
                "Ws": gWs,
                "bs": gbs,
                "W1": gW1,
                "b1": gb1,
                "Wflags": gWflags,
            }
        else:
            gW1, gWflags, gb1 = T.grad(loss, [self.W1, self.Wflags, self.b1])
            gradients = {
                "V_alpha": gV_alpha,
                "b_alpha": gb_alpha,
                "V_mu": gV_mu,
                "b_mu": gb_mu,
                "V_sigma": gV_sigma,
                "b_sigma": gb_sigma,
                "W1": gW1,
                "b1": gb1,
                "Wflags": gWflags,
            }
        # Gradients
        return (loss, gradients)
开发者ID:Irene-Li,项目名称:susyML,代码行数:59,代码来源:OrderlessMoGNADE.py


示例8: output

    def output(self, input_vectors):
        """
        Calculate the n_output dot product scalars of this layer
        @param input_vectors: n_input vectors (actual shape should be (n_batch, n_input, n_dimension)
        """

        return T.sum(T.tensordot(input_vectors, self.W1, [[1], [0]]) *
                     T.tensordot(input_vectors, self.W2, [[1], [0]]), axis=1)
开发者ID:murbard,项目名称:vectornet,代码行数:8,代码来源:vectornet.py


示例9: output

    def output(self, train):
        X = self.get_input(train)
        X = X.dimshuffle((1,0,2))


        if self.is_entity:
            Entity = X[-1:].dimshuffle(1,0,2)
            X = X[:-1]

        b_y = self.b_y
        b_yn = T.repeat(T.repeat(b_y.reshape((1,self.output_dim)),X.shape[0],axis=0).reshape((1,X.shape[0],self.output_dim)), X.shape[1], axis=0)

        xif = T.dot(X, self.W_if) + self.b_if
        xib = T.dot(X, self.W_ib) + self.b_ib

        xff = T.dot(X, self.W_ff) + self.b_ff
        xfb = T.dot(X, self.W_fb) + self.b_fb

        xcf = T.dot(X, self.W_cf) + self.b_cf
        xcb = T.dot(X, self.W_cb) + self.b_cb

        xof = T.dot(X, self.W_of) + self.b_of
        xob = T.dot(X, self.W_ob) + self.b_ob

        [outputs_f, memories_f], updates_f = theano.scan(
            self._step,
            sequences=[xif, xff, xof, xcf],
            outputs_info=[
                alloc_zeros_matrix(X.shape[1], self.output_dim),
                alloc_zeros_matrix(X.shape[1], self.output_dim)
            ],
            non_sequences=[self.U_if, self.U_ff, self.U_of, self.U_cf],
            truncate_gradient=self.truncate_gradient
        )
        [outputs_b, memories_b], updates_b = theano.scan(
            self._step,
            sequences=[xib, xfb, xob, xcb],
            outputs_info=[
                alloc_zeros_matrix(X.shape[1], self.output_dim),
                alloc_zeros_matrix(X.shape[1], self.output_dim)
            ],
            non_sequences=[self.U_ib, self.U_fb, self.U_ob, self.U_cb],
            truncate_gradient=self.truncate_gradient
        )
        if self.return_sequences:
            y = T.add(T.add(
                    T.tensordot(outputs_f.dimshuffle((1,0,2)), self.W_yf, [[2],[0]]),
                    T.tensordot(outputs_b[::-1].dimshuffle((1,0,2)), self.W_yb, [[2],[0]])),
                b_yn)
            # y = T.add(T.tensordot(
            #     T.add(outputs_f.dimshuffle((1, 0, 2)),
            #           outputs_b[::-1].dimshuffle((1,0,2))),
            #     self.W_y,[[2],[0]]),b_yn)
            if self.is_entity:
                return T.concatenate([y, Entity], axis=1)
            else:
                return y
        return T.concatenate((outputs_f[-1], outputs_b[0]))
开发者ID:whyjay,项目名称:CRCN,代码行数:58,代码来源:recurrent.py


示例10: output

 def output(self, input_value):
     if self.size is not None:
         if self.dotdim is None:
             input_value = T.tensordot(input_value, self.weight, axes = [input_value.ndim - 1, 0]) + self.bias
         else:
             input_value = T.tensordot(input_value, self.weight, axes = [self.dotdim + 1, 0]) + self.bias
             if self.dotdim + 1 < input_value.ndim - 1:
                 input_value = input_value.swapaxes(input_value.ndim - 1, self.dotdim + 1)
     return self.activation_function(input_value)
开发者ID:stczhc,项目名称:neupy,代码行数:9,代码来源:activations.py


示例11: complex_tensordot

def complex_tensordot(a, b, axes=2):
    AR, AI = a[0, ...], a[1, ...]
    BR, BI = b[0, ...], b[1, ...]

    output = tensor.stack([
        tensor.tensordot(AR, BR, axes=axes) - tensor.tensordot(AI, BI, axes=axes),
        tensor.tensordot(AR, BI, axes=axes) + tensor.tensordot(AI, BR, axes=axes),
    ], axis=0)
    return output
开发者ID:Nehoroshiy,项目名称:urnn,代码行数:9,代码来源:theano_complex_extension.py


示例12: apply_mat_to_kron

def apply_mat_to_kron(x, a, b, arg_type="numpy"):
    X = x.reshape((x.shape[0], a.shape[0], b.shape[0]))
    if arg_type == "numpy":
        result = np.tensordot(np.tensordot(X, a, axes=([1], [0])), b, axes=([1], [0]))
    elif arg_type == "theano":
        result = T.tensordot(T.tensordot(X, a, axes=([1], [0])), b, axes=([1], [0]))
    else:
        raise ValueError("arg_type must be 'numpy' or 'theano'")
    return result.reshape((x.shape[0], -1))
开发者ID:Nehoroshiy,项目名称:kron_layer_lasagne,代码行数:9,代码来源:old_kron_layer.py


示例13: contrastive_divergence_1

 def contrastive_divergence_1(self, v1):
     '''Determine the weight updates according to CD-1'''
     h1 = self.sample_h_given_v(v1)
     v2 = self.sample_v_given_h(h1)
     h2p = self.propup(v2)
     updates = T.tensordot(v1, h1, [[0],[0]]) - T.tensordot(v2, h2p, [[0],[0]])
     f = 1.0 / self.minibatch_size
     return (updates * f,
             T.sum(v1 - v2, axis=0) * f,
             T.sum(h1 - h2p, axis=0) * f)
开发者ID:malie,项目名称:theano-rbm-on-word-tuples,代码行数:10,代码来源:rbm-minibatch.py


示例14: function

    def function(self, xs, h_prevs, c_prevs):
        biases = T.shape_padright(T.ones_like(xs[:,0]))
        input_vector = T.concatenate((xs, h_prevs, biases), axis=1)

        forget_gate = T.nnet.sigmoid(T.tensordot(input_vector, self.W_forget_theano, axes=[[1],[1]]))
        input_gate = T.nnet.sigmoid(T.tensordot(input_vector, self.W_input_theano, axes=[[1],[1]]))
        candidate_vector = T.tanh(T.tensordot(input_vector, self.W_candidate_theano, axes=[[1],[1]]))
        cell_state = forget_gate*c_prevs + input_gate * candidate_vector

        output = T.nnet.sigmoid(T.tensordot(input_vector, self.W_output_theano, axes=[[1],[1]]))
        h = output * T.tanh(cell_state)
        return h, cell_state
开发者ID:MichSchli,项目名称:Tensor-LSTM,代码行数:12,代码来源:network_ops.py


示例15: get_output_for

    def get_output_for(self, inputs, **kwargs):
        """

        :param inputs: inputs: list of theano.TensorType
            `inputs[0]` should always be the symbolic input variable.  When
            this layer has a mask input (i.e. was instantiated with
            `mask_input != None`, indicating that the lengths of sequences in
            each batch vary), `inputs` should have length 2, where `inputs[1]`
            is the `mask`.  The `mask` should be supplied as a Theano variable
            denoting whether each time step in each sequence in the batch is
            part of the sequence or not.  `mask` should be a matrix of shape
            ``(n_batch, n_time_steps)`` where ``mask[i, j] = 1`` when ``j <=
            (length of sequence i)`` and ``mask[i, j] = 0`` when ``j > (length
            of sequence i)``.
        :return: theano.TensorType
            Symbolic output variable.
        """
        input = inputs[0]
        mask = None
        if self.mask_incoming_index > 0:
            mask = inputs[self.mask_incoming_index]

        # compute the bi-affine part
        # first via tensor dot ([batch, length, dim] * [dim, dim, num_label])
        # output shape = [batch, length, dim, num_label]
        out = T.tensordot(input, self.U, axes=[[2], [0]])
        # second via tensor dot ([batch, length, dim, num_label] * [batch, dim, length)
        # output shape = [batch, length, length, num_label]
        out = T.batched_tensordot(out, input.dimshuffle(0, 2, 1), axes=([2], [1]))
        out = out.dimshuffle(0, 1, 3, 2)

        # compute head bias part by tensor dot ([batch, length, dim] * [dim, num_label])
        # the shape of s_h should be [batch, length, num_label]
        if self.W_h is not None:
            s_h = T.tensordot(input, self.W_h, axes=[[2], [0]])
            out = out + s_h.dimshuffle(0, 1, 'x', 2)

        # compute child part by tensor dot ([batch, length, dim] * [dim, num_label]
        # the shape of s_c should be [batch, length, num_label]
        if self.W_c is not None:
            s_c = T.tensordot(input, self.W_c, axes=[[2], [0]])
            out = out + s_c.dimshuffle(0, 'x', 1, 2)

        # add bias part.
        if self.b is not None:
            out = out + self.b.dimshuffle('x', 'x', 'x', 0)

        if mask is not None:
            mask_shuffled = mask.dimshuffle(0, 1, 'x', 'x')
            out = out * mask_shuffled
            mask_shuffled = mask.dimshuffle(0, 'x', 1, 'x')
            out = out * mask_shuffled
        return out
开发者ID:XuezheMax,项目名称:NeuroNLP,代码行数:53,代码来源:crf.py


示例16: __init__

    def __init__(self, word_context, char_context, V, K, word_context_sz, char_context_sz, rng):
        """
        Initialize the parameters of the language model
        """
        # word training contexts
        self.word_context = word_context
        # character training contexts
        self.char_context = char_context

        # initialize context word embedding matrix Rw of shape (V, K)
        Rw_values = np.asarray(rng.uniform(-0.01, 0.01, size=(V, K)), 
                              dtype=theano.config.floatX)
        self.Rw = theano.shared(value=Rw_values, name='Rw', borrow=True)
        # initialize context character embedding matrix Rc of shape (V, K)
        Rc_values = np.asarray(rng.uniform(-0.01, 0.01, size=(V, K)), 
                              dtype=theano.config.floatX)
        self.Rc = theano.shared(value=Rc_values, name='Rc', borrow=True)

        # initialize target word embedding matrix Q of shape (V, K)
        Q_values = np.asarray(rng.uniform(-0.01, 0.01, size=(V, K)), 
                              dtype=theano.config.floatX)
        self.Q = theano.shared(value=Q_values, name='Q', borrow=True)
        # initialize word weight tensor Cw of shape (word_context_sz, K, K)
        Cw_values = np.asarray(rng.normal(0, math.sqrt(0.1), 
                                          size=(word_context_sz, K, K)), 
                              dtype=theano.config.floatX)
        self.Cw = theano.shared(value=Cw_values, name='Cw', borrow=True)
        # initialize character weight tensor Cc of shape (char_context_sz, K, K)
        Cc_values = np.asarray(rng.normal(0, math.sqrt(0.1), 
                                          size=(char_context_sz, K, K)), 
                               dtype=theano.config.floatX)
        self.Cc = theano.shared(value=Cc_values, name='Cc', borrow=True)
        # initialize bias vector 
        b_values = np.asarray(rng.normal(0, math.sqrt(0.1), size=(V,)), 
                              dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, name='b', borrow=True)
        # context word representations
        self.r_w = self.Rw[word_context]
        # context character representations
        self.r_c = self.Rc[char_context]
        # predicted word representation for target word by word context
        self.qw_hat = T.tensordot(self.Cw, self.r_w, axes=[[0,1], [1,2]])
        # predicted word representation for target word by character context
        self.qc_hat = T.tensordot(self.Cc, self.r_c, axes=[[0,1], [1,2]])
        # combine word and charafter predictions
        self.q_hat = self.qw_hat + self.qc_hat
        # similarity score between predicted word and all target words
        self.s = T.transpose(T.dot(self.Q, self.q_hat) + T.reshape(self.b, (V,1)))
        # softmax activation function
        self.p_w_given_h = T.nnet.softmax(self.s)
        # parameters of the model
        self.params = [self.Rw, self.Rc, self.Q, self.Cw, self.Cc, self.b]
开发者ID:ddahlmeier,项目名称:neural_lm,代码行数:52,代码来源:lbl_hybrid.py


示例17: get_output

 def get_output(self, train=False):
     [X_w, X_t] = self.get_input(train)
     t_w = self.W_t[X_w[:,:, 0]] # doc_l, n_tags*n_samples, n_dim
     w_w = self.W_w[X_w[:,:, 1]]
     dot_tw = T.sum(w_w * t_w, axis=2)
     inter_1 = T.tensordot(w_w, self.S, axes = [[2],[2]])
     inter_2 = T.tensordot(t_w, self.P, axes = [[2],[2]]) # doc_l, n_tags*n_samples, 2,5
     inter = T.sum(inter_1 * inter_2, axis = 3)
     sim_tw = T.tensordot(inter + T.shape_padleft(self.B, 2), self.U, axes=[[2],[0]]) 
     sim_tw = T.reshape(sim_tw, (X_w.shape[0], X_w.shape[1]))
     dot_sum_w = T.sum(dot_tw * T.nnet.sigmoid(sim_tw), axis = 0)/(X_w.shape[0])
     dot_w = theano.tensor.reshape(dot_sum_w, (X_w.shape[1], 1))
     return self.activation(dot_w)
     '''
开发者ID:ktsaurabh,项目名称:recursive_WSABIE,代码行数:14,代码来源:embeddings.py


示例18: __init__

    def __init__(self, model, glm, latent):
        """ Initialize the filtered stim model
        """
        self.model = model
        self.bkgd_model = model["bkgd"]
        self.n = glm.n
        self.tuningcurves = latent[self.bkgd_model["tuningcurves"]]
        self.spatial_basis = self.tuningcurves.spatial_basis
        self.tc_spatial_shape = self.tuningcurves.spatial_shape
        self.tc_spatial_ndim = self.tuningcurves.spatial_ndim
        self.temporal_basis = self.tuningcurves.temporal_basis
        self.Bx = self.tuningcurves.Bx
        self.Bt = self.tuningcurves.Bt
        self.w_x = self.tuningcurves.w_x[:, self.tuningcurves.Y[self.n]]
        self.w_t = self.tuningcurves.w_t[:, self.tuningcurves.Y[self.n]]

        # Create a shared variable for the filtered stimulus. This is a 4D
        # tensor with dimensions:
        #   - time
        #   - location (pixel)
        #   - spatial basis
        #   - temporal basis
        # To get a stimulus current we need to select a location and take a
        # weighted sum along both the spatial and temporal axes.
        self.filtered_stim = theano.shared(name="stim", value=np.ones((1, 1, 1, 1)))

        self.locations = latent[self.bkgd_model["locations"]]
        self.L = self.locations.Lmatrix[self.n, :]
        self.loc_index = self.locations.location_prior.ravel_index(self.L)

        # Expose outputs to the Glm class

        # It matters that we do the dot products in order of outermost
        # to innermost dimension. This improves memory efficiency.
        # Compute the spatially filtered stimulus
        # Result is T x L x B_t
        self.I_stim_t = T.tensordot(self.filtered_stim, self.w_t, axes=[[3], [0]])
        self.I_stim_t.name = "I_stim_t"

        # Take dot product with temporal basis coefficients
        # Result is T x L (where L is number of locations)
        self.I_stim_xt = T.tensordot(self.I_stim_t, self.w_x, axes=[[2], [0]])
        self.I_stim_xt.name = "I_stim_xt"

        self.I_stim = self.I_stim_xt[:, self.loc_index]
        self.I_stim.name = "I_stim"

        # There are no latent variables in this class. They all belong
        # to global latent variables.
        self.log_p = T.constant(0.0)
开发者ID:remtcs,项目名称:theano_pyglm,代码行数:50,代码来源:bkgd.py


示例19: learningstep

 def learningstep(self, Y, L, W, epsilon, threshold):
     s = self._activation(Y,L,W,threshold)
     s.name = 's_%d.%d[t]'%(self._nmultilayer,self._nlayer)
     W_new = W + epsilon*(T.tensordot(s,Y,axes=[0,0]) -
                          T.sum(s,axis=0)[:,np.newaxis]*W)
     W_new.name = 'W_%d.%d[t]'%(self._nmultilayer,self._nlayer)
     return s, W_new
开发者ID:dennisforster,项目名称:NeSi,代码行数:7,代码来源:mixturemodel_ssl_theano_scan.py


示例20: get_output_for

    def get_output_for(self, input, init=False, **kwargs):
        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)
        
        activation = T.tensordot(input, self.W, [[1], [0]])
        abs_dif = (T.sum(abs(activation.dimshuffle(0,1,2,'x') - activation.dimshuffle('x',1,2,0)),axis=2)
                    + 1e6 * T.eye(input.shape[0]).dimshuffle(0,'x',1))

        if init:
            mean_min_abs_dif = 0.5 * T.mean(T.min(abs_dif, axis=2),axis=0)
            abs_dif /= mean_min_abs_dif.dimshuffle('x',0,'x')
            self.init_updates = [(self.log_weight_scale, self.log_weight_scale-T.log(mean_min_abs_dif).dimshuffle(0,'x'))]
        
        f = T.sum(T.exp(-abs_dif),axis=2)

        if init:
            mf = T.mean(f,axis=0)
            f -= mf.dimshuffle('x',0)
            self.init_updates.append((self.b, -mf))
        else:
            f += self.b.dimshuffle('x',0)

        return T.concatenate([input, f], axis=1)
开发者ID:255BITS,项目名称:improved-gan,代码行数:25,代码来源:nn.py



注:本文中的theano.tensor.tensordot函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.tile函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.tensor4函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap