• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.ivector函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.ivector函数的典型用法代码示例。如果您正苦于以下问题:Python ivector函数的具体用法?Python ivector怎么用?Python ivector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ivector函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: predict_next_batch

 def predict_next_batch(self, session_ids, input_item_ids, predict_for_item_ids=None, batch=100):
     '''
     Gives predicton scores for a selected set of items. Can be used in batch mode to predict for multiple independent events (i.e. events of different sessions) at once and thus speed up evaluation.
     
     If the session ID at a given coordinate of the session_ids parameter remains the same during subsequent calls of the function, the corresponding hidden state of the network will be kept intact (i.e. that's how one can predict an item to a session).
     If it changes, the hidden state of the network is reset to zeros.
             
     Parameters
     --------
     session_ids : 1D array
         Contains the session IDs of the events of the batch. Its length must equal to the prediction batch size (batch param).
     input_item_ids : 1D array
         Contains the item IDs of the events of the batch. Every item ID must be must be in the training data of the network. Its length must equal to the prediction batch size (batch param).
     predict_for_item_ids : 1D array (optional)
         IDs of items for which the network should give prediction scores. Every ID must be in the training set. The default value is None, which means that the network gives prediction on its every output (i.e. for all items in the training set).
     batch : int
         Prediction batch size.
         
     Returns
     --------
     out : pandas.DataFrame
         Prediction scores for selected items for every event of the batch. 
         Columns: events of the batch; rows: items. Rows are indexed by the item IDs.
     
     '''
     if self.error_during_train: raise Exception
     if self.predict is None or self.predict_batch!=batch:
         X = T.ivector()
         Y = T.ivector()
         for i in range(len(self.layers)):
             self.H[i].set_value(np.zeros((batch,self.layers[i]), dtype=theano.config.floatX), borrow=True)
         if predict_for_item_ids is not None:
             H_new, yhat, _ = self.model(X, self.H, Y, 0)
         else:
             H_new, yhat = self.model_test(X, self.H)
         updatesH = OrderedDict()
         for i in range(len(self.H)):
             updatesH[self.H[i]] = H_new[i]
         if predict_for_item_ids is not None:
             self.predict = function(inputs=[X, Y], outputs=yhat, updates=updatesH, allow_input_downcast=True)
         else:
             self.predict = function(inputs=[X], outputs=yhat, updates=updatesH, allow_input_downcast=True)
         self.current_session = np.ones(batch) * -1
         self.predict_batch = batch
     session_change = np.arange(batch)[session_ids != self.current_session]
     if len(session_change) > 0:
         for i in range(len(self.H)):
             tmp = self.H[i].get_value(borrow=True)
             tmp[session_change] = 0
             self.H[i].set_value(tmp, borrow=True)
         self.current_session=session_ids.copy()
     in_idxs = self.itemidmap[input_item_ids]
     if predict_for_item_ids is not None:
         iIdxs = self.itemidmap[predict_for_item_ids]
         preds = np.asarray(self.predict(in_idxs, iIdxs)).T
         return pd.DataFrame(data=preds, index=predict_for_item_ids)
     else:
         in_idxs.values[np.isnan(in_idxs.values)] = 0 
         preds = np.asarray(self.predict(in_idxs)).T
         return pd.DataFrame(data=preds, index=self.itemidmap.index)
开发者ID:royee17,项目名称:DS_Workshop,代码行数:60,代码来源:gru4rec.py


示例2: test_multMatVect

def test_multMatVect():
    A1 = tensor.lmatrix('A1')
    s1 = tensor.ivector('s1')
    m1 = tensor.iscalar('m1')
    A2 = tensor.lmatrix('A2')
    s2 = tensor.ivector('s2')
    m2 = tensor.iscalar('m2')

    g0 = rng_mrg.DotModulo()(A1, s1, m1, A2, s2, m2)
    f0 = theano.function([A1, s1, m1, A2, s2, m2], g0)

    i32max = numpy.iinfo(numpy.int32).max

    A1 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
    s1 = numpy.random.randint(0, i32max, 3).astype('int32')
    m1 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")
    A2 = numpy.random.randint(0, i32max, (3, 3)).astype('int64')
    s2 = numpy.random.randint(0, i32max, 3).astype('int32')
    m2 = numpy.asarray(numpy.random.randint(i32max), dtype="int32")

    f0.input_storage[0].storage[0] = A1
    f0.input_storage[1].storage[0] = s1
    f0.input_storage[2].storage[0] = m1
    f0.input_storage[3].storage[0] = A2
    f0.input_storage[4].storage[0] = s2
    f0.input_storage[5].storage[0] = m2

    r_a1 = rng_mrg.matVecModM(A1, s1, m1)
    r_a2 = rng_mrg.matVecModM(A2, s2, m2)
    f0.fn()
    r_b = f0.output_storage[0].value

    assert numpy.allclose(r_a1, r_b[:3])
    assert numpy.allclose(r_a2, r_b[3:])
开发者ID:gyenney,项目名称:Tools,代码行数:34,代码来源:test_rng_mrg.py


示例3: bsgd1

def bsgd1(nn, data, name='sgd', lr=0.022, alpha=0.3, batch_size=500, epochs = 10):
	train_set_x, train_set_y = data[0]
	valid_set_x, valid_set_y = data[1]
	test_set_x, test_set_y = data[2]

	# valid_y_numpy = y_numpy[0]
	# test_y_numpy = y_numpy[1]
	test_y_numpy = map_48_to_39(test_y_numpy)
	valid_y_numpy = map_48_to_39(valid_y_numpy)
	print test_y_numpy

	num_samples = train_set_x.get_value(borrow=True).shape[0] 
	num_batches = num_samples / batch_size 

	layers = nn.layers
	x = T.matrix('x')
	y = T.ivector('y')
	y_eval = T.ivector('y_eval')

	cost = nn.cost(x, y)
	accuracy = nn.calcAccuracy(x, y)
	params = nn.params
	delta_params = nn.delta_params

	print theano.pp(cost)
	# theano.pp(accuracy)

	p_grads = [T.grad(cost=cost, wrt = p) for p in params]  
	# implementing gradient descent with momentum 
	print p_grads
	updates = OrderedDict()
	for dp, gp in zip(delta_params, p_grads):
		updates[dp] = dp*alpha - gp*lr
	for p, dp in zip(params, delta_params):
		updates[p] = p + updates[dp]

	# updates = [(p, p - lr*gp) for p, gp in zip(params, p_grads)]
	index = T.ivector('index')
	batch_sgd_train = theano.function(inputs=[index], outputs=[cost, accuracy], updates=updates, givens={x: train_set_x[index], y:train_set_y[index]})

	batch_sgd_valid = theano.function(inputs=[], outputs=[nn.calcAccuracy(x, y), nn.calcAccuracyTimit(x,y)], givens={x: valid_set_x, y:valid_set_y})

	batch_sgd_test = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: test_set_x, y:test_set_y})

	indices = np.arange(num_samples,  dtype=np.dtype('int32'))
	np.random.shuffle(indices)

	for n in xrange(epochs):
		np.random.shuffle(indices)
		for i in xrange(num_batches):
			batch = indices[i*batch_size: (i+1)*batch_size]
			batch_sgd_train(batch)

		# y_np = y.get_value()
		# print y.eval()

		print "epoch:", n,  "	validation accuracy:",  batch_sgd_valid()


	print batch_sgd_test()
开发者ID:adhaka,项目名称:kthasrdnn,代码行数:60,代码来源:sgd.py


示例4: __init__

    def __init__(self, dnodex,inputdim,dim):
        X=T.ivector()
	Y=T.ivector()
	Z=T.lscalar()
	eta = T.scalar()
        temperature=T.scalar()
        self.dnodex=dnodex
        num_input = inputdim
	dnodex.umatrix=theano.shared(floatX(np.random.randn(*(self.dnodex.nuser,inputdim, inputdim))))
        dnodex.pmatrix=theano.shared(floatX(np.random.randn(*(self.dnodex.npoi,inputdim))))
        dnodex.p_l2_norm=(dnodex.pmatrix**2).sum()
        dnodex.u_l2_norm=(dnodex.umatrix**2).sum()
        num_hidden = dim
        num_output = inputdim
        inputs = InputPLayer(dnodex.pmatrix[X,:], dnodex.umatrix[Z,:,:], name="inputs")
        lstm1 = LSTMLayer(num_input, num_hidden, input_layer=inputs, name="lstm1")
        lstm2 = LSTMLayer(num_hidden, num_hidden, input_layer=lstm1, name="lstm2")
        lstm3 = LSTMLayer(num_hidden, num_hidden, input_layer=lstm2, name="lstm3")
        softmax = SoftmaxPLayer(num_hidden, num_output, dnodex.umatrix[Z,:,:], input_layer=lstm3, name="yhat", temperature=temperature)

        Y_hat = softmax.output()

        self.layers = inputs, lstm1,lstm2,lstm3,softmax
        params = get_params(self.layers)
        #caches = make_caches(params)

	cost = T.mean(T.nnet.categorical_crossentropy(Y_hat, T.dot(dnodex.pmatrix[Y,:],dnodex.umatrix[Z,:,:])))+eta*dnodex.p_l2_norm+eta*dnodex.u_l2_norm
        updates = PerSGD(cost,params,eta,X,Z,dnodex)#momentum(cost, params, caches, eta)

        self.train = theano.function([X,Y,Z, eta, temperature], cost, updates=updates, allow_input_downcast=True)

        predict_updates = one_step_updates(self.layers)
        self.predict_char = theano.function([X, Z, temperature], Y_hat, updates=predict_updates, allow_input_downcast=True)
开发者ID:tonytongzhao,项目名称:PyRNN,代码行数:33,代码来源:personalize_rnn.py


示例5: multMatVect

def multMatVect(v, A, m1, B, m2):
    """
    multiply the first half of v by A with a modulo of m1
    and the second half by B with a modulo of m2

    Note: The parameters of dot_modulo are passed implicitly because passing
    them explicitly takes more time then running the function's C-code.
    """
    if multMatVect.dot_modulo is None:
        A_sym = tensor.lmatrix("A")
        s_sym = tensor.ivector("s")
        m_sym = tensor.iscalar("m")
        A2_sym = tensor.lmatrix("A2")
        s2_sym = tensor.ivector("s2")
        m2_sym = tensor.iscalar("m2")
        o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
        multMatVect.dot_modulo = function([A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)

    # This way of calling the Theano fct is done to bypass Theano overhead.
    f = multMatVect.dot_modulo
    f.input_storage[0].storage[0] = A
    f.input_storage[1].storage[0] = v[:3]
    f.input_storage[2].storage[0] = m1
    f.input_storage[3].storage[0] = B
    f.input_storage[4].storage[0] = v[3:]
    f.input_storage[5].storage[0] = m2
    f.fn()
    r = f.output_storage[0].storage[0]

    return r
开发者ID:Tanjay94,项目名称:Theano,代码行数:30,代码来源:rng_mrg.py


示例6: __theano_build__

 def __theano_build__(self):
   U, V, W = self.U, self.V, self.W
   x = T.ivector('x')
   y = T.ivector('y')
   def forward_prop_step(x_t, s_t_prev, U, V, W):
     s_t = T.tanh(U[:,x_t] + W.dot(s_t_prev))
     o_t = T.nnet.softmax(V.dot(s_t))
     return [o_t[0], s_t]
   [o,s], updates = theano.scan(
     forward_prop_step,
     sequences=x,
     outputs_info=[None, dict(initial=T.zeros(self.hidden_dim))],
     non_sequences=[U, V, W],
     truncate_gradient=self.bptt_truncate,
     strict=True)
   prediction = T.argmax(o, axis=1)
   o_error = T.sum(T.nnet.categorical_crossentropy(o, y))
   # Gradients
   dU = T.grad(o_error, U)
   dV = T.grad(o_error, V)
   dW = T.grad(o_error, W)      
   # Assign functions
   self.forward_propagation = theano.function([x], o)
   self.predict = theano.function([x], prediction)
   self.ce_error = theano.function([x, y], o_error)
   self.bptt = theano.function([x, y], [dU, dV, dW])
   # SGD
   learning_rate = T.scalar('learning_rate')
   self.sgd_step = theano.function([x,y,learning_rate], [], 
                   updates=[(self.U, self.U - learning_rate * dU),
                            (self.V, self.V - learning_rate * dV),
                            (self.W, self.W - learning_rate * dW)])
开发者ID:binxuankong,项目名称:poetry-generator,代码行数:32,代码来源:RNNTheano.py


示例7: main

def main(num_epochs=NUM_EPOCHS):
    print("Building network ...")
    # First, we build the network, starting with an input layer
    # Recurrent layers expect input of shape
    # (batch size, SEQ_LENGTH, num_inputs)
    
    #The network model
    
    l_in            = lasagne.layers.InputLayer(shape=(BATCH_SIZE, SEQ_LENGTH, num_inputs))
    l_forward_1     = lasagne.layers.LSTMLayer(l_in, N_HIDDEN, grad_clipping=GRAD_CLIP,nonlinearity=lasagne.nonlinearities.tanh)
    l_forward_2     = lasagne.layers.LSTMLayer(l_forward_1, N_HIDDEN, grad_clipping=GRAD_CLIP,nonlinearity=lasagne.nonlinearities.tanh)
    l_shp           = lasagne.layers.ReshapeLayer(l_forward_2, (-1, N_HIDDEN))
    l_dense         = lasagne.layers.DenseLayer(l_shp, num_units=num_inputs, lasagne.nonlinearity=linear)
    l_out           = lasagne.layers.ReshapeLayer(l_dense, (-1, SEQ_LENGTH, num_inputs))
    
    # create output out of input in order to save memory?
    network_output  = lasagne.layers.get_output(l_out)
    cost            = lasagne.objectives.squared_error(network_output,target_values).mean()
    all_params      = lasagne.layers.get_all_params(l_out,trainable=True)
    updates         = lasagne.updates.adagrad(cost, all_params, LEARNING_RATE)


    input_values    = T.ivector('target_output')
    target_values   = T.ivector('target_output')

    # Theano functions for training and computing cost
    print("Compiling functions ...")
    train           = theano.function([l_in.input_var, target_values], cost, updates=updates, allow_input_downcast=True)
    compute_cost    = theano.function([l_in.input_var, target_values], cost, allow_input_downcast=True)
开发者ID:AlexNguyen124,项目名称:ift6266h16,代码行数:29,代码来源:lstm_audio_2.py


示例8: initialize

    def initialize(self):
        users = T.ivector()
        items = T.ivector()
        ratings = T.vector()

        self.U = theano.shared(
            np.array(
                np.random.normal(scale=0.001, size=(self.n_users, self.n_factors)),
                dtype=theano.config.floatX
            )
        )
        self.I = theano.shared(
            np.array(
                np.random.normal(scale=0.001, size=(self.n_items, self.n_factors)),
                dtype=theano.config.floatX
            )
        )

        predictions = (self.U[users] * self.I[items]).sum(axis=1)

        train_error = (
            ((predictions - ratings) ** 2).mean() +
            self.regularization * (
                T.sum(self.U ** 2) +
                T.sum(self.I ** 2)
            )
        )
        test_error = ((predictions - ratings) ** 2).mean()

        params = [self.U, self.I]
        learning_rate = theano.shared(np.array(self.learning_rate, dtype=theano.config.floatX))
        updates = self.optimizer(train_error, params, learning_rate=learning_rate)
        self.train_theano = theano.function([users, items, ratings], train_error, updates=updates)
        self.test_theano = theano.function([users, items, ratings], test_error)
        self.predict_theano = theano.function([users, items], predictions)
开发者ID:rocknrollnerd,项目名称:adversarial_mf,代码行数:35,代码来源:mf.py


示例9: evaluate_ready

	def evaluate_ready(self, ispro = True):
		var_x = T.ivector()
		var_y = T.ivector()

		print "adopt   mention  level  evaluate ????????????????????       "+str(self.ismention)
		if self.model_type == "softmax" or self.model_type == "softmax_reg":

			if self.istransition:
				output = self.structure1(var_x, ispro = False)
				self.evafunc = theano.function([var_x], output)

			else:
				output = self.structure1(var_x, ispro)
				self.evafunc = theano.function([var_x], output)

		
		elif self.model_type == "maxneg":
			out1, out2 = self.structure2(var_x,ispro)
			self.evafunc = theano.function([var_x], [out1,out2])

		elif self.model_type == "maxout":

			out1, out2 = self.structure2(var_x,False)
			self.evafunc = theano.function([var_x], [out1,out2])
		else: raise Exception
开发者ID:mswellhao,项目名称:active_NER,代码行数:25,代码来源:token_model.py


示例10: __init__

  def __init__(self, vocabulary_size, hidden_size, output_size):
    X = tensor.ivector()
    Y = tensor.ivector()
    keep_prob = tensor.fscalar()
    learning_rate = tensor.fscalar()

    emb_layer = Embedding(vocabulary_size, hidden_size)
    lstm_layer = BiLSTM(hidden_size, hidden_size)
    dropout_layer = Dropout(keep_prob)
    fc_layer = FullConnect(2*hidden_size, output_size)
    crf = CRF(output_size)
    # graph defination
    X_emb = emb_layer(X)
    scores = fc_layer(tensor.tanh(lstm_layer(dropout_layer(X_emb))))
    
    loss, predict = crf(scores, Y, isTraining=True)
    # loss, predict and accuracy
    accuracy = tensor.sum(tensor.eq(predict, Y)) * 1.0 / Y.shape[0]

    params = emb_layer.params + lstm_layer.params + fc_layer.params + crf.params
    updates = MomentumSGD(loss, params, lr=learning_rate)

    print("Compiling train function: ")
    train = theano.function(inputs=[X, Y, keep_prob, learning_rate], outputs=[predict, accuracy, loss], 
      updates=updates, allow_input_downcast=True)

    print("Compiling evaluate function: ")
    evaluate = theano.function(inputs=[X_emb, Y, keep_prob], outputs=[predict, accuracy, loss], 
      allow_input_downcast=True)

    self.embedding_tensor = emb_layer.params[0]
    self.train = train
    self.evaluate = evaluate
    self.params = params
开发者ID:dongx-duan,项目名称:crf,代码行数:34,代码来源:train_emb.py


示例11: set_model

def set_model(args, init_w_emb, w_emb_dim, vocab_word, vocab_char, vocab_tag):
    print '\nBuilding a neural model: %s\n' % args.model

    """ neural architecture parameters """
    c_emb_dim = args.c_emb_dim
    w_hidden_dim = args.w_hidden_dim
    c_hidden_dim = args.c_hidden_dim
    output_dim = vocab_tag.size()
    window = args.window
    opt = args.opt

    """ symbol definition """
    x = T.ivector()
    c = T.ivector()
    b = T.ivector()
    y = T.ivector()
    lr = T.fscalar('lr')

    if args.model == 'char':
        return nn_char.Model(name=args.model, w=x, c=c, b=b, y=y, lr=lr,
                             init_w_emb=init_w_emb, vocab_w_size=vocab_word.size(), vocab_c_size=vocab_char.size(),
                             w_emb_dim=w_emb_dim, c_emb_dim=c_emb_dim, w_hidden_dim=w_hidden_dim,
                             c_hidden_dim=c_hidden_dim, output_dim=output_dim,
                             window=window, opt=opt)
    else:
        return nn_word.Model(name=args.model, x=x, y=y, lr=lr,
                             init_emb=init_w_emb, vocab_size=vocab_word.size(),
                             emb_dim=w_emb_dim, hidden_dim=w_hidden_dim, output_dim=output_dim,
                             window=window, opt=opt)
开发者ID:hiroki13,项目名称:neural-pos-tagger,代码行数:29,代码来源:train.py


示例12: create_iter_funcs_train

def create_iter_funcs_train(l_out, lr, mntm, wd):
    X = T.tensor4('X')
    y = T.ivector('y')
    X_batch = T.tensor4('X_batch')
    y_batch = T.ivector('y_batch')

    y_hat = layers.get_output(l_out, X, deterministic=False)

    # softmax loss
    train_loss = T.mean(
        T.nnet.categorical_crossentropy(y_hat, y))

    # L2 regularization
    train_loss += wd * regularize_network_params(l_out, l2)

    train_acc = T.mean(
        T.eq(y_hat.argmax(axis=1), y))

    all_params = layers.get_all_params(l_out, trainable=True)
    updates = lasagne.updates.nesterov_momentum(
        train_loss, all_params, lr, mntm)

    train_iter = theano.function(
        inputs=[theano.Param(X_batch), theano.Param(y_batch)],
        outputs=[train_loss, train_acc],
        updates=updates,
        givens={
            X: X_batch,
            y: y_batch,
        },
    )

    return train_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:33,代码来源:iter_funcs.py


示例13: create_iter_funcs_valid

def create_iter_funcs_valid(l_out, bs=None, N=50, mc_dropout=False):
    X = T.tensor4('X')
    y = T.ivector('y')
    X_batch = T.tensor4('X_batch')
    y_batch = T.ivector('y_batch')

    if not mc_dropout:
        y_hat = layers.get_output(l_out, X, deterministic=True)
    else:
        if bs is None:
            raise ValueError('a fixed batch size is required for mc dropout')
        X_repeat = T.extra_ops.repeat(X, N, axis=0)
        y_sample = layers.get_output(
            l_out, X_repeat, deterministic=False)

        sizes = [X_repeat.shape[0] / X.shape[0]] * bs
        y_sample_split = T.as_tensor_variable(
            T.split(y_sample, sizes, bs, axis=0))
        y_hat = T.mean(y_sample_split, axis=1)

    valid_loss = T.mean(
        T.nnet.categorical_crossentropy(y_hat, y))
    valid_acc = T.mean(
        T.eq(y_hat.argmax(axis=1), y))

    valid_iter = theano.function(
        inputs=[theano.Param(X_batch), theano.Param(y_batch)],
        outputs=[valid_loss, valid_acc],
        givens={
            X: X_batch,
            y: y_batch,
        },
    )

    return valid_iter
开发者ID:hjweide,项目名称:cifar-10-uncertainty,代码行数:35,代码来源:iter_funcs.py


示例14: __init__

 def __init__(self, input_params=None):
     rng = numpy.random.RandomState(23455)
     self._corpusWithEmbeddings = T.matrix("wordIndeices")
     self._dialogSentenceCount = T.ivector("dialogSentenceCount")
     self._sentenceWordCount = T.ivector("sentenceWordCount")
     
     # for list-type data
     self._layer0 = SentenceEmbeddingNN(self._corpusWithEmbeddings, self._dialogSentenceCount, self._sentenceWordCount, rng, wordEmbeddingDim=200, \
                                                      sentenceLayerNodesNum=1000, \
                                                      sentenceLayerNodesSize=[5, 200])
     
     self._average_layer  = sentenceEmbeddingAverage(self._corpusWithEmbeddings, self._dialogSentenceCount, self._sentenceWordCount, rng, wordEmbeddingDim=200)
     
     # Get sentence layer W
     semanicTransformW = theano.shared(
         numpy.asarray(
             rng.uniform(low=-0.2, high=0.2, size=(self._layer0.outputDimension, 200)),
             dtype=config.globalFloatType()
         ),
         borrow=True
     )
     self._nextSentence = T.dot(self._layer0.output, semanicTransformW)
         
     # construct the parameter array.
     self._params = [semanicTransformW] + self._layer0.params
     self._setParameters(input_params)
开发者ID:ivysoftware,项目名称:DeepEmbedding,代码行数:26,代码来源:dialogEmbeddingSentenceDirectAverage.py


示例15: test_unwrapper

def test_unwrapper():
    emb_size = 5
    y_time = tt.ivector()
    y_seq_id = tt.ivector()
    x = tt.tensor3()

    emb = IdentityInput(x, size=5)

    sequn = SeqUnwrapper(20)
    sequn.connect(emb, y_time, y_seq_id)

    rng = np.random.RandomState(23455)
    conv = LeNetConvPoolLayer()
    conv.connect(sequn, rng, (3, 1, 5, emb_size), (1, 1, ))
    #prev_layer = conv

    f = theano.function([x, y_time, y_seq_id], conv.output())

    xx = np.random.randn(20, 4, emb_size)
    y_time = [3, 7, 10, 12]
    y_seq_id = [0, 0, 0, 0]
    res = f(xx, y_time, y_seq_id)
    print res.shape
    print res
    import ipdb; ipdb.set_trace()
开发者ID:ishalyminov,项目名称:xtrack2,代码行数:25,代码来源:layers_test.py


示例16: compile

    def compile(self):
        '''
        Forward pass and Gradients
        '''
        # Get nicer names for parameters
        W1, W2, W3 = [self.W1] + self.params

        # FORWARD PASS
        # Embedding layer subspace
        self.z0    = T.ivector()                    # tweet in one hot

        # Use an intermediate sigmoid
        z1         = W1[:, self.z0]                 # embedding
        z2         = T.nnet.sigmoid(T.dot(W2, z1))  # subspace
        # Hidden layer
        z3         = T.dot(W3, z2)
        z4         = T.sum(z3, 1)                   # Bag of words
        self.hat_y = T.nnet.softmax(z4.T).T
        self.fwd   = theano.function([self.z0], self.hat_y)
        
        # TRAINING COST AND GRADIENTS
        # Train cost minus log probability
        self.y = T.ivector()                          # reference out
        self.F = -T.mean(T.log(self.hat_y)[self.y])   # For softmax out 
        # Update only last three parameters
        self.nablas = [] # Symbolic gradients
        self.grads  = [] # gradients
        for W in self.params:
            self.nablas.append(T.grad(self.F, W))
            self.grads.append(theano.function([self.z0, self.y], T.grad(self.F, W)))
        self.cost = theano.function([self.z0, self.y], self.F)
开发者ID:samiroid,项目名称:NLSE,代码行数:31,代码来源:nlse.py


示例17: __init__

 def __init__(self, input_params=None, sentenceLayerNodesNum=[150, 120], sentenceLayerNodesSize=[(2, 200), (3, 1)], negativeLambda=1, poolingSize=[(2, 1)], mode="max"):
     """
     mode is in {'max', 'average_inc_pad', 'average_exc_pad', 'sum'}
     """
     rng = numpy.random.RandomState(23455)
     self._corpusWithEmbeddings = T.matrix("wordIndeices")
     self._dialogSentenceCount = T.ivector("dialogSentenceCount")
     self._sentenceWordCount = T.ivector("sentenceWordCount")
     
     # for list-type data
     self._layer0 = layer0 = SentenceEmbeddingMultiNN(self._corpusWithEmbeddings, self._dialogSentenceCount, self._sentenceWordCount, rng, wordEmbeddingDim=200, \
                                                      sentenceLayerNodesNum=sentenceLayerNodesNum, \
                                                      sentenceLayerNodesSize=sentenceLayerNodesSize,
                                                      poolingSize=poolingSize,
                                                      mode=mode)
     
     layer1 = HiddenLayer(
         rng,
         input=layer0.output,
         n_in=layer0.outputDimension,
         n_out=layer0.outputDimension,
         activation=T.tanh
     )
     self._nextSentence = layer1.output
     self._params = layer1.params + layer0.params
     self._setParameters(input_params)
     self.negativeLambda = negativeLambda
     
     zero_count = 1
     for sentence, pooling in zip(sentenceLayerNodesSize[-1::-1], [(1, 1)] + poolingSize[-1::-1]): 
         zero_count *= pooling[0]
         zero_count += sentence[0] - 1 
     self.zero_count = zero_count - 1
开发者ID:ivysoftware,项目名称:DeepEmbedding,代码行数:33,代码来源:dialogEmbeddingSentenceMulticonvHiddenNegativeSampling.py


示例18: build

    def build(self):
        x=T.ivector('x')
        y=T.ivector('y')
        lr=T.scalar('learning_rate')

        def _recurrence(x_t,s_tm1):
            s_t=T.tanh(self.U[:,x_t]+T.dot(s_tm1,self.W))
            o_t=T.nnet.softmax(T.dot(s_t,self.V))
            return [o_t[0],s_t]

        [o,s],updates=theano.scan(fn=_recurrence,
                                  sequences=x,
                                  outputs_info=[None,dict(initial=T.zeros(self.hidden_dim))],
                                  truncate_gradient=self.bptt_truncate,
                                  strict=True)
        prediction=T.argmax(o,axis=1)
        o_error=T.sum(T.nnet.categorical_crossentropy(o,y))

        # Gradients
        gparams=T.grad(o_error,self.params)
        updates=[(param,param-lr*gparam) for param,gparam in zip(self.params,gparams)]


        # Assign functions
        self.forward_propagation=theano.function([x],o)
        self.predict=theano.function([x],prediction)
        self.train=theano.function(intputs=[x,y,lr],
                                   outputs=o_error,
                                   updates=updates)
开发者ID:jiangnanHugo,项目名称:RNN-LSTM,代码行数:29,代码来源:rnn_theano.py


示例19: __theano_build__

    def __theano_build__(self):
        params = self.params
        param_names = self.param_names
        hidden_dim = self.hidden_dim

        x1  = T.imatrix('x1')    # first sentence
        x2  = T.imatrix('x2')    # second sentence
        x1_mask = T.fmatrix('x1_mask')    #mask
        x2_mask = T.fmatrix('x2_mask')
        y   = T.ivector('y')     # label
        y_c = T.ivector('y_c')   # class weights 
        
        # Embdding words
        _E1 = params["E"].dot(params["W"][0]) + params["B"][0]
        _E2 = params["E"].dot(params["W"][1]) + params["B"][1]
        statex1 = _E1[x1.flatten(), :].reshape([x1.shape[0], x1.shape[1], hidden_dim])
        statex2 = _E2[x2.flatten(), :].reshape([x2.shape[0], x2.shape[1], hidden_dim])
        
        def rnn_cell(x, mx, ph, Wh):
            h = T.tanh(ph.dot(Wh) + x)
            h = mx[:, None] * h + (1-mx[:, None]) * ph
            return [h] 
            
        [h1], updates = theano.scan(
            fn=rnn_cell,
            sequences=[statex1, x1_mask],
            truncate_gradient=self.truncate,
            outputs_info=[dict(initial=T.zeros([self.batch_size, self.hidden_dim]))],
            non_sequences=params["W"][2])
        
        [h2], updates = theano.scan(
            fn=rnn_cell,
            sequences=[statex2, x2_mask],
            truncate_gradient=self.truncate,
            outputs_info=[dict(initial=h1[-1])],
            non_sequences=params["W"][3])
       
        #predict
        _s = T.nnet.softmax(h1[-1].dot(params["lrW"][0]) + h2[-1].dot(params["lrW"][1]) + params["lrb"])
        _p = T.argmax(_s, axis=1)
        _c = T.nnet.categorical_crossentropy(_s, y)
        _c = T.sum(_c * y_c)
        _l = T.sum(params["lrW"]**2)
        _cost = _c + 0.01 * _l
        
        # SGD parameters
        learning_rate = T.scalar('learning_rate')
        decay = T.scalar('decay')
        
        # Gradients and updates
        _grads, _updates = rms_prop(_cost, param_names, params, learning_rate, decay)
        
        # Assign functions
        self.bptt = theano.function([x1, x2, x1_mask, x2_mask, y, y_c], _grads)
        self.loss = theano.function([x1, x2, x1_mask, x2_mask, y, y_c], _c)
        self.weights = theano.function([x1, x2, x1_mask, x2_mask], _s)
        self.predictions = theano.function([x1, x2, x1_mask, x2_mask], _p)
        self.sgd_step = theano.function(
            [x1, x2, x1_mask, x2_mask, y, y_c, learning_rate, decay],
            updates=_updates)
开发者ID:wangxggc,项目名称:rnn-theano,代码行数:60,代码来源:rnn.py


示例20: train

	def train(self, word_emb):
		X_local = T.ivector(name="X_local")
		X = T.iscalar(name="X")
		X_neg = T.ivector(name="X_neg")
		X_g = T.dvector(name="X_g")
		
		[o_error], updates = theano.scan(self.target_function, sequences=X_neg,\
										non_sequences=[word_emb, X_local, X, X_g])
		
		error_sum = T.sum(o_error)
		self.c_error = theano.function([X_local, X, X_neg, X_g], error_sum)
		
		d_word_emb = T.grad(error_sum, word_emb)
		d_W1 = T.grad(error_sum, self.W1)
		d_b1 = T.grad(error_sum, self.b1)
		d_W2 = T.grad(error_sum, self.W2)
		d_b2 = T.grad(error_sum, self.b2)
		d_Wg1 = T.grad(error_sum, self.Wg1)
		d_bg1 = T.grad(error_sum, self.bg1)
		d_Wg2 = T.grad(error_sum, self.Wg2)
		d_bg2 = T.grad(error_sum, self.bg2)
		
		self.train_step = theano.function([X_local, X, X_neg, X_g], [], \
										updates=[(word_emb-d_word_emb)
												(self.W1-d_W1),
												(self.b1-d_b1),
												(self.W2-d_W2),
												(self.b2-d_b2),
												(self.Wg1-d_Wg1),
												(self.bg1-d_bg1),
												(self.Wg2-d_Wg2),
												(self.bg2-d_bg2)])
开发者ID:zerkh,项目名称:BWE,代码行数:32,代码来源:GCWE.py



注:本文中的theano.tensor.ivector函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.jacobian函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.itensor3函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap