• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.lmatrix函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.lmatrix函数的典型用法代码示例。如果您正苦于以下问题:Python lmatrix函数的具体用法?Python lmatrix怎么用?Python lmatrix使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了lmatrix函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: arch_memnet_selfsup

    def arch_memnet_selfsup(self):
        '''
        memory net with self supervision.
        '''
        contexts = T.ltensor3('contexts')
        querys = T.lmatrix('querys')
        yvs = T.lmatrix('yvs')

        params = []
        question_layer = Embed(self.vocab_size, self.hidden_dim)
        q = T.reshape(question_layer(querys.flatten()),
                      (self.batchsize, self.sen_maxlen, self.hidden_dim)
                      )
        if self.kwargs.get('position_encoding'):
            lmat = position_encoding(self.sen_maxlen, self.hidden_dim).dimshuffle('x', 0, 1)
            print '[memory network] use PE'
            q = q * lmat
        u = mean(q, axis=1)
        params.extend(question_layer.params)

        mem_layer = MemoryLayer(self.batchsize, self.mem_size, self.unit_size, self.vocab_size, self.hidden_dim,
                                **self.kwargs)
        probs = mem_layer.get_probs(contexts, u).dimshuffle(0, 2)

        inputs = {
            'contexts': contexts,
            'querys': querys,
            'yvs': yvs,
            'cvs': T.lmatrix('cvs')
        }
        return (probs, inputs, params)
开发者ID:BinbinBian,项目名称:cbt-model,代码行数:31,代码来源:embedding.py


示例2: test_maxpool_layer_forward_pass

def test_maxpool_layer_forward_pass():
    W_emb = [[0, 0, 0, 0, 1],
             [0, 0, 0, 1, 0],
             [0, 0, 1, 0, 0],
             [0, 1, 0, 0, 0]]
    W_emb = np.array(W_emb)

    W_dense = [[0, 0, 0, 0, 1, 0, 0, 0, 0, 1],
               [0, 0, 0, 1, 0, 0, 0, 0,-0.5, 0],
               [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
               [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
               [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
    W_dense = np.array(W_dense, dtype=float).T

    bounds = T.lmatrix('bounds')
    X = T.lmatrix('X')

    l_in1 = InputLayer((None, 2), input_var=bounds)
    l_in2 = InputLayer((None, 2), input_var=X)
        
    h1 = lasagne.layers.EmbeddingLayer(l_in2, input_size=4, output_size=5, W=W_emb)
    h2 = lasagne.layers.FlattenLayer(h1)

    h3 = lasagne.layers.DenseLayer(h2, num_units=5, nonlinearity=rectify, W=W_dense)

    l_pool = MaxpoolLayer([l_in1, h3])

    predictions = get_output(l_pool)

    pred_func = theano.function([bounds, X], predictions, allow_input_downcast=True, on_unused_input='warn')

    test_bounds = np.array([[0, 4]])
    test_X = np.array([[0, 1], [0, 0], [1, 1], [3, 3]])

    print pred_func(test_bounds, test_X)
开发者ID:honzas83,项目名称:kitchen,代码行数:35,代码来源:test_text.py


示例3: multMatVect

def multMatVect(v, A, m1, B, m2):
    # TODO : need description for parameter and return
    """
    Multiply the first half of v by A with a modulo of m1 and the second half
    by B with a modulo of m2.

    Notes
    -----
    The parameters of dot_modulo are passed implicitly because passing them
    explicitly takes more time than running the function's C-code.

    """
    if multMatVect.dot_modulo is None:
        A_sym = tensor.lmatrix('A')
        s_sym = tensor.ivector('s')
        m_sym = tensor.iscalar('m')
        A2_sym = tensor.lmatrix('A2')
        s2_sym = tensor.ivector('s2')
        m2_sym = tensor.iscalar('m2')
        o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
        multMatVect.dot_modulo = function(
            [A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)

    # This way of calling the Theano fct is done to bypass Theano overhead.
    f = multMatVect.dot_modulo
    f.input_storage[0].storage[0] = A
    f.input_storage[1].storage[0] = v[:3]
    f.input_storage[2].storage[0] = m1
    f.input_storage[3].storage[0] = B
    f.input_storage[4].storage[0] = v[3:]
    f.input_storage[5].storage[0] = m2
    f.fn()
    r = f.output_storage[0].storage[0]

    return r
开发者ID:bbudescu,项目名称:Theano,代码行数:35,代码来源:rng_mrg.py


示例4: jointModelOutput

def jointModelOutput(num_sub_activities, num_affordances, num_sub_activities_anticipation, 
		num_affordances_anticipation, inputJointFeatures, inputHumanFeatures, inputObjectFeatures):

	shared_input_layer = TemporalInputFeatures(inputJointFeatures)
	shared_hidden_layer = LSTM('tanh','sigmoid','orthogonal',4,128)
	#shared_hidden_layer = simpleRNN('tanh','orthogonal',4,128)
	shared_layers = [shared_input_layer,shared_hidden_layer]
	human_layers = [ConcatenateFeatures(inputHumanFeatures),LSTM('tanh','sigmoid','orthogonal',4,256)]
	object_layers = [ConcatenateFeatures(inputObjectFeatures),LSTM('tanh','sigmoid','orthogonal',4,256)]

	human_anticipation = [softmax(num_sub_activities_anticipation)]
	human_detection = [softmax(num_sub_activities)]

	object_anticipation = [softmax(num_affordances_anticipation)]
	object_detection = [softmax(num_affordances)]

	trY_1_detection = T.lmatrix()
	trY_2_detection = T.lmatrix()
	trY_1_anticipation = T.lmatrix()
	trY_2_anticipation = T.lmatrix()
	sharedrnn = SharedRNNOutput(
				shared_layers, human_layers, object_layers, 
				human_detection, human_anticipation, object_detection,
				object_anticipation, softmax_loss, trY_1_detection, 
				trY_2_detection,trY_1_anticipation,trY_2_anticipation,1e-3
				)
	return sharedrnn
开发者ID:AndrewChiyz,项目名称:RNNexp,代码行数:27,代码来源:activity-rnn-full-model.py


示例5: test_blocksparse_grad_merge

def test_blocksparse_grad_merge():
    b = tensor.fmatrix()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    W_val, h_val, iIdx_val, b_val, oIdx_val = blocksparse_data()
    W = float32_shared_constructor(W_val)

    o = sparse_block_gemv_ss(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
    gW = theano.grad(o.sum(), W)

    lr = numpy.asarray(0.05, dtype='float32')

    upd = W - lr * gW

    f1 = theano.function([h, iIdx, b, oIdx], updates=[(W, upd)],
                         mode=mode_with_gpu)
    # not running with mode=gpu ensures that the elemwise is not merged in
    mode = None
    if theano.config.mode == 'FAST_COMPILE':
        mode = theano.compile.mode.get_mode('FAST_RUN')

    f2 = theano.function([h, iIdx, b, oIdx], updates=[(W, upd)], mode=mode)

    f2(h_val, iIdx_val, b_val, oIdx_val)
    W_ref = W.get_value()

    # reset the var
    W.set_value(W_val)
    f1(h_val, iIdx_val, b_val, oIdx_val)
    W_opt = W.get_value()

    utt.assert_allclose(W_ref, W_opt)
开发者ID:317070,项目名称:Theano,代码行数:34,代码来源:test_blocksparse.py


示例6: multMatVect

def multMatVect(v, A, m1, B, m2):
    """
    multiply the first half of v by A with a modulo of m1
    and the second half by B with a modulo of m2

    Note: The parameters of dot_modulo are passed implicitly because passing
    them explicitly takes more time then running the function's C-code.
    """
    if multMatVect.dot_modulo is None:
        A_sym = tensor.lmatrix("A")
        s_sym = tensor.ivector("s")
        m_sym = tensor.iscalar("m")
        A2_sym = tensor.lmatrix("A2")
        s2_sym = tensor.ivector("s2")
        m2_sym = tensor.iscalar("m2")
        o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
        multMatVect.dot_modulo = function([A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)

    # This way of calling the Theano fct is done to bypass Theano overhead.
    f = multMatVect.dot_modulo
    f.input_storage[0].storage[0] = A
    f.input_storage[1].storage[0] = v[:3]
    f.input_storage[2].storage[0] = m1
    f.input_storage[3].storage[0] = B
    f.input_storage[4].storage[0] = v[3:]
    f.input_storage[5].storage[0] = m2
    f.fn()
    r = f.output_storage[0].storage[0]

    return r
开发者ID:Tanjay94,项目名称:Theano,代码行数:30,代码来源:rng_mrg.py


示例7: test_multMatVect

def test_multMatVect():
    A1 = tensor.lmatrix('A1')
    s1 = tensor.ivector('s1')
    m1 = tensor.iscalar('m1')
    A2 = tensor.lmatrix('A2')
    s2 = tensor.ivector('s2')
    m2 = tensor.iscalar('m2')

    g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
    f0 = theano.function([A1, s1, m1, A2, s2, m2], g0)

    i32max = numpy.iinfo(numpy.int32).max

    A1 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
    s1 = numpy.random.randint(0, i32max, 3).astype('int32')
    m1 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
    A2 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
    s2 = numpy.random.randint(0, i32max, 3).astype('int32')
    m2 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")

    f0.input_storage[0].storage[0] = A1
    f0.input_storage[1].storage[0] = s1
    f0.input_storage[2].storage[0] = m1
    f0.input_storage[3].storage[0] = A2
    f0.input_storage[4].storage[0] = s2
    f0.input_storage[5].storage[0] = m2

    r_a1 = rng_mrg.matVecModM(A1, s1, m1)
    r_a2 = rng_mrg.matVecModM(A2, s2, m2)
    f0.fn()
    r_b = f0.output_storage[0].value

    assert numpy.allclose(r_a1, r_b[:3])
    assert numpy.allclose(r_a2, r_b[3:])
开发者ID:gyenney,项目名称:Tools,代码行数:34,代码来源:test_rng_mrg.py


示例8: train_minibatch_fn

    def train_minibatch_fn(self, evaluate=False):
        """
        Initialize this Theano function once
        """
        X = T.lmatrix('X_train')
        L_x = T.lvector('L_X_train')

        Y = T.lmatrix('Y_train')
        L_y = T.lvector('L_y_train')

        learning_rate = T.dscalar('learning_rate')
        momentum = T.dscalar('momentum')
        weight_decay = T.dscalar('weight_decay')

        loss, accuracy = self.loss(X, L_x, Y, L_y, weight_decay)
        updates = self.get_sgd_updates(loss, learning_rate, momentum)

        outputs = [loss, accuracy]

        if evaluate:
            precision, recall = self.evaluate(X, L_x, Y, L_y)
            outputs = outputs + [precision, recall]

        return theano.function(
            inputs=[X, L_x, Y, L_y, learning_rate, momentum, weight_decay],
            outputs=outputs,
            updates=updates
        )
开发者ID:tivaro,项目名称:ULL-P2,代码行数:28,代码来源:end_to_end_model.py


示例9: DRAmodelnoedge

def DRAmodelnoedge(nodeList,edgeList,edgeListComplete,edgeFeatures,nodeFeatures,nodeToEdgeConnections,clipnorm=25.0,train_for='joint'):
	edgeRNNs = {}
	edgeTypes = edgeList
	lstm_init = 'orthogonal'
	softmax_init = 'uniform'
	
	rng = np.random.RandomState(1234567890)

	for et in edgeTypes:
		inputJointFeatures = edgeFeatures[et]
		print inputJointFeatures
		edgeRNNs[et] = [TemporalInputFeatures(inputJointFeatures)] #128

	nodeRNNs = {}
	nodeTypes = nodeList.keys()
	nodeLabels = {}
	outputLayer = {}
	for nt in nodeTypes:
		num_classes = nodeList[nt]
		#nodeRNNs[nt] = [LSTM('tanh','sigmoid',lstm_init,truncate_gradient=4,size=256,rng=rng),softmax(num_classes,softmax_init,rng=rng)] #256
		nodeRNNs[nt] = [LSTM('tanh','sigmoid',lstm_init,truncate_gradient=4,size=args.nodeRNN_size,rng=rng)] #256
		if train_for=='joint':
			nodeLabels[nt] = {}
			nodeLabels[nt]['detection'] = T.lmatrix()
			nodeLabels[nt]['anticipation'] = T.lmatrix()
			outputLayer[nt] = [softmax(num_classes,softmax_init,rng=rng),softmax(num_classes+1,softmax_init,rng=rng)]
		else:
			nodeLabels[nt] = T.lmatrix()
			outputLayer[nt] = [softmax(num_classes,softmax_init,rng=rng)]
		et = nt+'_input'
		edgeRNNs[et] = [TemporalInputFeatures(nodeFeatures[nt])]
	learning_rate = T.fscalar()
	dra = DRAanticipation(edgeRNNs,nodeRNNs,outputLayer,nodeToEdgeConnections,edgeListComplete,softmax_loss,nodeLabels,learning_rate,clipnorm,train_for=train_for)
	return dra
开发者ID:AndrewChiyz,项目名称:RNNexp,代码行数:34,代码来源:activity-dra.py


示例10: test_blocksparse_gpu_gemv_opt

def test_blocksparse_gpu_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)

    assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockGemv)
开发者ID:ChihebTrabelsi,项目名称:Theano,代码行数:12,代码来源:test_opt.py


示例11: test_correct_solution

 def test_correct_solution(self):
     x = tensor.lmatrix()
     y = tensor.lmatrix()
     z = tensor.lscalar()
     b = theano.tensor.nlinalg.lstsq()(x, y, z)
     f = function([x, y, z], b)
     TestMatrix1 = np.asarray([[2, 1], [3, 4]])
     TestMatrix2 = np.asarray([[17, 20], [43, 50]])
     TestScalar = np.asarray(1)
     f = function([x, y, z], b)
     m = f(TestMatrix1, TestMatrix2, TestScalar)
     self.assertTrue(np.allclose(TestMatrix2, np.dot(TestMatrix1, m[0])))
开发者ID:EugenePY,项目名称:Theano,代码行数:12,代码来源:test_nlinalg.py


示例12: test_blocksparse_gpu_gemv_opt

def test_blocksparse_gpu_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o, mode=mode_with_gpu)

    assert sum(1 for n in f.maker.fgraph.apply_nodes
               if isinstance(n.op, GpuSparseBlockGemv)) == 1
开发者ID:Abioy,项目名称:Theano,代码行数:13,代码来源:test_opt.py


示例13: test7

def test7():
    A = T.lmatrix("A")
    A_start = T.lvector("A_start")
    f = T.lmatrix("f")
    tgt = T.ivector("tgt")
    v = Viterbi(A , A_start , f , tgt)
    decode = v.decode()
    ff = theano.function([A , A_start , f , tgt] , outputs = v.apply())
    ff2 = theano.function([A , A_start , f , tgt] , decode)
    print ff2([[1 , 3 , 1] , [1 , 2 , 2] , [2 , 1 , 3]]
            , [1 , 2 , 1]
            , [[1 , 2 , 3] , [2 , 2 , 1] , [3 , 3 , 2] , [1 , 1 , 2]]
            , [1 , 2 , 1 , 2])
开发者ID:gumaojie,项目名称:cws_theano,代码行数:13,代码来源:test.py


示例14: test_blocksparse_gpu_outer_opt

def test_blocksparse_gpu_outer_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], [o, tensor.grad(o.sum(),
                                                               wrt=W)],
                        mode=mode_with_gpu)

    assert isinstance(f.maker.fgraph.toposort()[-2].op, GpuSparseBlockOuter)
开发者ID:ChihebTrabelsi,项目名称:Theano,代码行数:14,代码来源:test_opt.py


示例15: test_blocksparse_inplace_gemv_opt

def test_blocksparse_inplace_gemv_opt():
    b = tensor.fmatrix()
    W = tensor.ftensor4()
    h = tensor.ftensor3()
    iIdx = tensor.lmatrix()
    oIdx = tensor.lmatrix()

    o = sparse_block_dot(W, h, iIdx, b, oIdx)

    f = theano.function([W, h, iIdx, b, oIdx], o)

    if theano.config.mode == "FAST_COMPILE":
        assert not f.maker.fgraph.toposort()[-1].op.inplace
    else:
        assert f.maker.fgraph.toposort()[-1].op.inplace
开发者ID:aalmah,项目名称:Theano,代码行数:15,代码来源:test_opt.py


示例16: getAlignment

    def getAlignment(self):
        unk_idx = self.config['unk_id']
        source_sentence = tensor.lmatrix('source')
        target_sentence = tensor.lmatrix('target')

        ftrans = open('/Users/lqy/Documents/transout.txt','w',0)

        falign = gzip.open('/Users/lqy/Documents/alignmentout','w',0)

        sampling_representation = encoder.apply(source_sentence, tensor.ones(source_sentence.shape))

        for i, line in enumerate(self.data_stream.get_epoch_iterator()):
            seq = self._oov_to_unk(line[0], self.config['src_vocab_size'], unk_idx)
            input_ = numpy.tile(seq, (config['beam_size'], 1))
            print "input_: ",input_
开发者ID:dery-hit,项目名称:blocks_exercise,代码行数:15,代码来源:test_zhu.py


示例17: test_ctc_targets

def test_ctc_targets():
    LENGTH = 20
    BATCHES = 4
    CLASSES = 2
    y_hat = T.tensor3('features')
    input_mask = T.matrix('features_mask')
    y_hat_mask = input_mask
    y = T.lmatrix('phonemes')
    y_mask = T.matrix('phonemes_mask')
    ctc_target = ctc_cost.get_targets(y, T.log(y_hat), y_mask, y_hat_mask)
    Y_hat = np.zeros((LENGTH, BATCHES, CLASSES + 1), dtype=floatX)
    Y_hat[:, :, 0] = .7
    Y_hat[:, :, 1] = .2
    Y_hat[:, :, 2] = .1
    Y_hat[3, :, 0] = .3
    Y_hat[3, :, 1] = .4
    Y_hat[3, :, 2] = .3
    Y = np.zeros((2, BATCHES), dtype='int64')
    Y_hat_mask = np.ones((LENGTH, BATCHES), dtype=floatX)
    Y_hat_mask[-5:] = 0
    # default blank symbol is the highest class index (3 in this case)
    Y_mask = np.asarray(np.ones_like(Y), dtype=floatX)
    target = ctc_target.eval({y_hat: Y_hat, y: Y,
                              y_hat_mask: Y_hat_mask, y_mask: Y_mask})
    # Note that this part is the same as the cross entropy gradient
    grad = -target / Y_hat
    test_grad = finite_diff(Y, Y_hat, Y_mask, Y_hat_mask, eps=1e-2, n_steps=5)
    testing.assert_almost_equal(grad.flatten()[:5],
                                test_grad.flatten()[:5], decimal=3)
开发者ID:daweileng,项目名称:CTC-Connectionist-Temporal-Classification,代码行数:29,代码来源:test_ctc.py


示例18: test_ctc_pseudo_cost

def test_ctc_pseudo_cost():
    LENGTH = 500
    BATCHES = 40
    CLASSES = 2
    N_LABELS = 45
    y_hat = T.tensor3('features')
    input_mask = T.matrix('features_mask')
    y_hat_mask = input_mask
    y = T.lmatrix('phonemes')
    y_mask = T.matrix('phonemes_mask')
    pseudo_cost = ctc_cost.pseudo_cost(y, y_hat, y_mask, y_hat_mask)

    Y_hat = np.zeros((LENGTH, BATCHES, CLASSES + 1), dtype=floatX)
    Y_hat[:, :, 0] = .75
    Y_hat[:, :, 1] = .2
    Y_hat[:, :, 2] = .05
    Y_hat[3, 0, 0] = .3
    Y_hat[3, 0, 1] = .4
    Y_hat[3, 0, 2] = .3
    Y = np.zeros((N_LABELS, BATCHES), dtype='int64')
    Y[25:, :] = 1
    Y_hat_mask = np.ones((LENGTH, BATCHES), dtype=floatX)
    Y_hat_mask[-5:] = 0
    # default blank symbol is the highest class index (3 in this case)
    Y_mask = np.asarray(np.ones_like(Y), dtype=floatX)
    Y_mask[30:] = 0
    cost = pseudo_cost.eval({y_hat: Y_hat, y: Y,
                             y_hat_mask: Y_hat_mask, y_mask: Y_mask})
    pseudo_grad = T.grad(ctc_cost.pseudo_cost(y, y_hat,
                                              y_mask, y_hat_mask).sum(),
                         y_hat)
    #test_grad2 = pseudo_grad.eval({y_hat: Y_hat, y: Y,
    #                               y_hat_mask: Y_hat_mask, y_mask: Y_mask})
    # TODO: write some more meaningful asserts here
    assert cost.sum() > 0
开发者ID:daweileng,项目名称:CTC-Connectionist-Temporal-Classification,代码行数:35,代码来源:test_ctc.py


示例19: test10

def test10():
    src = T.ltensor3("src")
    tgt = T.lmatrix("tgt")
    mask = T.matrix("mask")
    prd = T.matrix("prd")
    n_hids, vocab_size = 3, 60
    hs = HierarchicalSoftmax(src, n_hids, vocab_size)
    #prd = hs.test()
    res = hs.cost(tgt, mask)
    x = [
            [[1,1,1],[2,2,2],[3,3,3],[4,4,4]],
            [[3,3,3],[4,4,4],[5,5,5],[6,6,6]]
        ]
    y = [
            [1,1,1,1],
            [1,1,1,1]
        ]
    m = [
            [1,1,0,0],
            [1,1,0,0]
        ]
    fn3 = theano.function(inputs=[src,tgt,mask], outputs=[res], on_unused_input='ignore')
    res = fn3(x,y,m)
    print res , res[0].shape
    x_a = np.array(x)
    print x_a.shape, x_a[y]
开发者ID:gumaojie,项目名称:morphlm,代码行数:26,代码来源:test.py


示例20: __init__

    def __init__(self, R, k, E, U, EU, embedding_size):
        self.k = k # Slices count
        self.R = R
        self.embedding_size = embedding_size

        init_range = 0.07
        init_range_W = 0.001
        # Setup params
        #Tensor matrix
        W = np.random.uniform(low=-init_range_W, high=init_range_W, size=(self.embedding_size, self.embedding_size, k))
        #Neural matrix
        V = np.random.uniform(low=-init_range, high=init_range, size=(2*self.embedding_size, k))
        #Bias
        b = np.random.uniform(low=-init_range, high=init_range, size=(k,))
        #Concatenation
        u = np.random.uniform(low=-init_range, high=init_range, size=(k, ))

        self.embedding_size_t = theano.shared(self.embedding_size)
        self.W = theano.shared(np.asarray(W, dtype=theano.config.floatX), name="W")

        self.E, self.U, self.EU = E, U, EU # Shared among networks

        self.V, self.b, self.u = theano.shared(np.asarray(V, dtype=theano.config.floatX), name="V"+str(R)), \
                                 theano.shared(np.asarray(b, dtype=theano.config.floatX), name="b"+str(R)), \
                                 theano.shared(np.asarray(u, dtype=theano.config.floatX), name="u"+str(R))

        self.params = [self.W, self.U, self.V, self.b, self.u]


        self.input = T.lmatrix()

        self.inputs = [self.input] # For trainer
开发者ID:mapleyustat,项目名称:learning-wordnet,代码行数:32,代码来源:model.py



注:本文中的theano.tensor.lmatrix函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.log函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.le函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap