• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.eq函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.eq函数的典型用法代码示例。如果您正苦于以下问题:Python eq函数的具体用法?Python eq怎么用?Python eq使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了eq函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: compile

	def compile(self, optimizer, loss, class_mode='categorical'):
		self.optimizer = optimizer
		self.loss = objectives.get(loss)

		self.X_train = self.get_input() # symbolic variable
		self.y_train = self.get_output() # symbolic variable

		self.y = T.zeros_like(self.y_train) # symbolic variable

		train_loss = self.loss(self.y, self.y_train)

		if class_mode == 'categorical':
			train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
		elif class_mode == 'binary':
			train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
		else:
			raise Exception("Invalid class mode: " + str(class_mode))
		self.class_mode = class_mode

		#updates = self.optimizer.get_updates(train_loss, self.params)
		self.grad = T.grad(cost=train_loss, wrt=self.params, disconnected_inputs='raise')
		updates = []
		for p, g in zip(self.params, self.grad):
			updates.append((p, p-random.uniform(-0.3,1)))

		if type(self.X_train) == list:
			train_ins = self.X_train + [self.y]
		else:
			train_ins = [self.X_train, self.y]

		self._train = theano.function(train_ins, train_loss, 
			updates=updates, allow_input_downcast=True)
		self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
			updates=updates, allow_input_downcast=True)
开发者ID:punitshah11,项目名称:diabetic_retinopathy,代码行数:34,代码来源:core.py


示例2: AdaMaxAvg2

def AdaMaxAvg2(ws, objective, alpha=.01, beta1=.1, beta2=.001, beta3=0.01, n_accum=1):
    if n_accum == 1:
        return AdaMaxAvg(ws, objective, alpha, beta1, beta2, beta3)
    print 'AdaMax_Avg2', 'alpha:',alpha,'beta1:',beta1,'beta2:',beta2,'beta3:',beta3,'n_accum:',n_accum
    
    gs = G.ndict.T_grad(objective.sum(), ws, disconnected_inputs='raise')

    new = OrderedDict()
    
    from theano.ifelse import ifelse
    it = G.sharedf(0.)
    new[it] = it + 1
    reset = T.eq(T.mod(it,n_accum), 0)
    update = T.eq(T.mod(it,n_accum), n_accum-1)
    
    ws_avg = []
    for j in range(len(ws)):
        w_avg = {}
        for i in ws[j]:
            _w = ws[j][i]
            _g = gs[j][i]
            #_g = T.switch(T.isnan(_g),T.zeros_like(_g),_g) #remove NaN's
            mom1 = G.sharedf(_w.get_value() * 0.)
            _max = G.sharedf(_w.get_value() * 0.)
            w_avg[i] = G.sharedf(_w.get_value())
            g_sum = G.sharedf(_w.get_value() * 0.)
        
            new[g_sum] = ifelse(reset, _g, g_sum + _g)
            new[mom1] = ifelse(update, (1-beta1) * mom1 + beta1 * new[g_sum], mom1)
            new[_max] = ifelse(update, T.maximum((1-beta2)*_max, abs(new[g_sum]) + 1e-8), _max)
            new[_w] = ifelse(update, _w + alpha *  new[mom1] / new[_max], _w)
            new[w_avg[i]] = ifelse(update, beta3 * new[_w] + (1.-beta3) * w_avg[i], w_avg[i])
        ws_avg += [w_avg]   
    return new, ws_avg
开发者ID:gburt,项目名称:iaf,代码行数:34,代码来源:optim.py


示例3: compile

    def compile(self, optimizer, loss, class_mode="categorical", theano_mode=None):
        self.optimizer = optimizers.get(optimizer)

        self.loss = objectives.get(loss)
        weighted_loss = weighted_objective(objectives.get(loss))

        # input of model
        self.X_train = self.get_input(train=True)
        self.X_test = self.get_input(train=False)

        self.y_train = self.get_output(train=True)
        self.y_test = self.get_output(train=False)

        # target of model
        self.y = T.zeros_like(self.y_train)

        self.weights = T.ones_like(self.y_train)

        train_loss = weighted_loss(self.y, self.y_train, self.weights)
        test_loss = weighted_loss(self.y, self.y_test, self.weights)

        train_loss.name = 'train_loss'
        test_loss.name = 'test_loss'
        self.y.name = 'y'

        if class_mode == "categorical":
            train_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_train, axis=-1)))
            test_accuracy = T.mean(T.eq(T.argmax(self.y, axis=-1), T.argmax(self.y_test, axis=-1)))

        elif class_mode == "binary":
            train_accuracy = T.mean(T.eq(self.y, T.round(self.y_train)))
            test_accuracy = T.mean(T.eq(self.y, T.round(self.y_test)))
        else:
            raise Exception("Invalid class mode:" + str(class_mode))
        self.class_mode = class_mode
        self.theano_mode = theano_mode

        for r in self.regularizers:
            train_loss = r(train_loss)
        updates = self.optimizer.get_updates(self.params, self.constraints, train_loss)

        if type(self.X_train) == list:
            train_ins = self.X_train + [self.y, self.weights]
            test_ins = self.X_test + [self.y, self.weights]
            predict_ins = self.X_test
        else:
            train_ins = [self.X_train, self.y, self.weights]
            test_ins = [self.X_test, self.y, self.weights]
            predict_ins = [self.X_test]

        self._train = theano.function(train_ins, train_loss,
            updates=updates, allow_input_downcast=True, mode=theano_mode)
        self._train_with_acc = theano.function(train_ins, [train_loss, train_accuracy],
            updates=updates, allow_input_downcast=True, mode=theano_mode)
        self._predict = theano.function(predict_ins, self.y_test,
            allow_input_downcast=True, mode=theano_mode)
        self._test = theano.function(test_ins, test_loss,
            allow_input_downcast=True, mode=theano_mode)
        self._test_with_acc = theano.function(test_ins, [test_loss, test_accuracy],
            allow_input_downcast=True, mode=theano_mode)
开发者ID:0xa-saline,项目名称:CAPTCHA-breaking,代码行数:60,代码来源:models.py


示例4: get_action_results

    def get_action_results(self,last_states,actions,time_i):
        
        #state is a boolean vector: whether or not i-th action
        #was tried already during this session
        #last output[:,end_code] always remains 1 after first being triggered
        
        
        last_state = check_list(last_states)[0]
        action = check_list(actions)[0]
        
        batch_range = T.arange(action.shape[0])

        session_active = T.eq(last_state[:,self.end_action_id],0)
        
        state_after_action = T.set_subtensor(last_state[batch_range,action],1)
        
        new_state = T.switch(
            session_active.reshape([-1,1]),
            state_after_action,
            last_state
        )
        
        session_terminated = T.eq(new_state[:,self.end_action_id],1)
        
        observation = T.concatenate([
                self.joint_data[batch_range,action,None],#uint8[batch,1]
                session_terminated.reshape([-1,1]), #whether session has been terminated by now
                T.extra_ops.to_one_hot(action,self.joint_data.shape[1]),
            ],axis=1)
        
        return new_state, observation
开发者ID:amoliu,项目名称:AgentNet,代码行数:31,代码来源:__init__.py


示例5: getRpRnTpTnForTrain0OrVal1

    def getRpRnTpTnForTrain0OrVal1(self, y, training0OrValidation1):
        # The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
        # Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
        # param y: y = T.itensor4('y'). Dimensions [batchSize, r, c, z]
        
        yPredToUse = self.y_pred_train if  training0OrValidation1 == 0 else self.y_pred_val
        checkDimsOfYpredAndYEqual(y, yPredToUse, "training" if training0OrValidation1 == 0 else "validation")
        
        returnedListWithNumberOfRpRnTpTnForEachClass = []
        
        for class_i in xrange(0, self._numberOfOutputClasses) :
            #Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
            tensorOneAtRealPos = T.eq(y, class_i)
            tensorOneAtRealNeg = T.neq(y, class_i)

            tensorOneAtPredictedPos = T.eq(yPredToUse, class_i)
            tensorOneAtPredictedNeg = T.neq(yPredToUse, class_i)
            tensorOneAtTruePos = T.and_(tensorOneAtRealPos,tensorOneAtPredictedPos)
            tensorOneAtTrueNeg = T.and_(tensorOneAtRealNeg,tensorOneAtPredictedNeg)
                    
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealPos) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtRealNeg) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTruePos) )
            returnedListWithNumberOfRpRnTpTnForEachClass.append( T.sum(tensorOneAtTrueNeg) )
            
        return returnedListWithNumberOfRpRnTpTnForEachClass
开发者ID:alonshmilo,项目名称:MedicalData_jce,代码行数:26,代码来源:cnnLayerTypes.py


示例6: __call__

    def __call__(self, input_):
        m = input_.mean()
        v = input_.std()

        new_m = T.switch(T.eq(self.m, 0.),
                         m,
                         (np.float32(1.) - self.rate) * self.m + self.rate * m)
        new_var = T.switch(T.eq(self.var, 0.),
                           v,
                           (np.float32(1.) - self.rate) * self.var + self.rate * v)

        updates = [(self.m, new_m), (self.var, new_var)]

        input_centered = (
            (input_ - new_m) / T.maximum(1., T.sqrt(new_var)))

        input_ = T.zeros_like(input_) + input_

        outs = OrderedDict(
            x=input_,
            x_centered=input_centered,
            m=new_m,
            var=new_var
        )
        return outs, updates
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:25,代码来源:layers.py


示例7: test_tt

 def test_tt(self):
     sample, updates = rejection_sample([self.fair_coin,], tensor.eq(tensor.sum(tensor.eq(self.coin, self.data)), 5))
     sampler = theano.function([], sample, updates=updates)
     
     # TODO: this is super-slow, how can bher do this fast?
     for i in range(100):
         print sampler()
开发者ID:gwtaylor,项目名称:MonteTheano,代码行数:7,代码来源:test_distributions.py


示例8: functions

def functions(network):
    # Symbolic variables
    X = T.tensor4()
    Y = T.ivector()

    # Non-deterministic training
    parameters = nn.layers.get_all_params(layer=network, trainable=True)   
    output = nn.layers.get_output(layer_or_layers=network, inputs=X,
        deterministic=False)
    prediction = output.argmax(-1)
    loss = T.mean(nn.objectives.categorical_crossentropy(
        predictions=output, targets=Y))
    accuracy = T.mean(T.eq(prediction, Y))
    gradient = T.grad(cost=loss, wrt=parameters)
    update = nn.updates.nesterov_momentum(loss_or_grads=gradient, 
        params=parameters, learning_rate=0.001, momentum=0.9)
    training_function = theano.function(
        inputs=[X, Y], outputs=[loss, accuracy], updates=update)

    # Non-deterministic testing
    test_function = theano.function(
        inputs=[X], outputs=prediction)

    # Deterministic validation
    det_output = nn.layers.get_output(layer_or_layers=network, inputs=X,
        deterministic=True)
    det_prediction = det_output.argmax(-1)
    det_loss = T.mean(nn.objectives.categorical_crossentropy(
        predictions=det_output, targets=Y))
    det_accuracy = T.mean(T.eq(det_prediction, Y))  
    validation_function = theano.function(
        inputs=[X, Y], outputs=[det_loss, det_accuracy])

    return training_function, validation_function, test_function
开发者ID:mollymr305,项目名称:mnist-mc-dropout,代码行数:34,代码来源:mnist_mc_dropout.py


示例9: custom_svrg1

def custom_svrg1(loss, params, m=100, learning_rate=0.01):
    
    grads = theano.grad(loss, params)

    updates = OrderedDict()
    
    it_num = theano.shared(np.cast['int16'](0.))
    it = it_num + 1

    for param, grad in zip(params, grads):
        value = param.get_value(borrow=True)

        mu = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)

        grad_w_tilde = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)
        new_grad_w_tilde = theano.ifelse.ifelse(T.eq(it, m), grad, grad_w_tilde)

        mu_acc = theano.shared(np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable)

        updates[param] = param - learning_rate * (grad - grad_w_tilde + mu)
        updates[grad_w_tilde] = new_grad_w_tilde

        updates[mu] = theano.ifelse.ifelse(T.eq(T.mod(it, m), 0), mu_acc, mu)
        updates[mu_acc] = theano.ifelse.ifelse(T.eq(T.mod(it, m), 0), 0*mu_acc, mu_acc + grad)

    updates[it_num] = theano.ifelse.ifelse(T.eq(it, m), np.cast['int16'](1), np.cast['int16'](m))

    return updates
开发者ID:justanothercoder,项目名称:NaturalGradient,代码行数:28,代码来源:custom_updates.py


示例10: multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1

    def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
	"""
	The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
	Order in the list is the natural order of the classes (ie class-0 RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
	"""
	returnedListWithNumberOfRpRnPpPnForEachClass = []

	for class_i in xrange(0, self.numberOfOutputClasses) :
		#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
		vectorOneAtRealPositives = T.eq(y, class_i)
		vectorOneAtRealNegatives = T.neq(y, class_i)

		if training0OrValidation1 == 0 : #training:
			yPredToUse = self.y_pred
		else: #validation
			yPredToUse = self.y_pred_inference

		vectorOneAtPredictedPositives = T.eq(yPredToUse, class_i)
		vectorOneAtPredictedNegatives = T.neq(yPredToUse, class_i)
		vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
		vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
		    
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealNegatives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedNegatives) )

	return returnedListWithNumberOfRpRnPpPnForEachClass
开发者ID:pliu007,项目名称:deepmedic,代码行数:28,代码来源:cnnLayerTypes.py


示例11: each_loss

        def each_loss(outpt, inpt):
            # y 是填充了blank之后的ans
            blank = 26
            y_nblank = T.neq(inpt, blank)
            n = T.dot(y_nblank, y_nblank)  # 真实的字符长度
            N = 2 * n + 1  # 填充后的字符长度,去除尾部多余的填充
            labels = inpt[:N]
            labels2 = T.concatenate((labels, [blank, blank]))
            sec_diag = T.neq(labels2[:-2], labels2[2:]) * T.eq(labels2[1:-1], blank)
            recurrence_relation = \
                T.eye(N) + \
                T.eye(N, k=1) + \
                T.eye(N, k=2) * sec_diag.dimshuffle((0, 'x'))

            pred_y = outpt[:, labels]

            fwd_pbblts, _ = theano.scan(
                lambda curr, accum: T.switch(T.eq(curr*T.dot(accum, recurrence_relation), 0.0),
                                             T.dot(accum, recurrence_relation)
                                             , curr*T.dot(accum, recurrence_relation)),
                sequences=[pred_y],
                outputs_info=[T.eye(N)[0]]
            )
            #return fwd_pbblts
            #liklihood = fwd_pbblts[0, 0]
            liklihood = fwd_pbblts[-1, -1] + fwd_pbblts[-1, -2]
            #liklihood = T.switch(T.lt(liklihood, 1e-35), 1e-35, liklihood)
            #loss = -T.log(T.cast(liklihood, "float32"))
            #loss = 10 * (liklihood - 1) * (liklihood - 100)
            loss = (T.le(liklihood, 1.0)*(10*(liklihood-1)*(liklihood-100)))+(T.gt(liklihood, 1.0)*(-T.log(T.cast(liklihood, "float32"))))
            return loss
开发者ID:nightinwhite,项目名称:Theano-NN_Starter,代码行数:31,代码来源:Layer.py


示例12: chi2_test_statistic

def chi2_test_statistic(M, Obs, K, num_M, num_Obs):
    #Getting frequencies from observations
    Ns = T.dot(Obs,T.ones((K,1)))
    p = Obs/Ns
        
    #Find the zeros so we can deal with them later
    pZEROs = T.eq(p, 0)
    mZEROs = T.eq(M, 0)
    
    #log probabilities, with -INF as log(0)
    lnM = T.log(M + mZEROs) - INF*mZEROs
    lnp = T.log(p + pZEROs) - INF*pZEROs


    #Using kroneker products so every row of M hits every row of P in the difference klnM - kln
    O_ones = T.ones((num_Obs,1))
    M_ones = T.ones((num_M,1))
    klnM = kron(lnM,O_ones)
    klnP = kron(M_ones, lnp)
    klnP_M = klnP - klnM
    kObs = kron(M_ones, Obs)
    
    G = 2.0*T.dot(klnP_M ,kObs.T)
    
    G = G*T.identity_like(G)
    G = T.dot(G,T.ones((num_M*num_Obs,1)))   
    G = T.reshape(G,(num_M,num_Obs))
    
    #The following quotient improves the convergence to chi^2 by an order of magnitude
    #source: http://en.wikipedia.org/wiki/Multinomial_test
    
    #numerator = T.dot(- 1.0/(M + 0.01),T.ones((K,1))) - T.ones((num_M,1))    
    #q1 = T.ones((num_M,num_Obs)) + T.dot(numerator,1.0/Ns.T/6.0)/(K-1.0)
        
    return G#/q1 
开发者ID:Underfit,项目名称:underfit,代码行数:35,代码来源:chi2pvalue.py


示例13: compute_cost_log_in_parallel

def compute_cost_log_in_parallel(original_rnn_outputs, labels, func, x_ends, y_ends):
	mask = T.log(1 - T.or_(T.eq(labels, T.zeros_like(labels)), T.eq(labels, shift_matrix(labels, 2))))

	initial_state = T.log(T.zeros_like(labels))
	initial_state = T.set_subtensor(initial_state[:,0], 0)

	def select_probabilities(rnn_outputs, label):
		return rnn_outputs[:,label]	

	rnn_outputs, _ = theano.map(select_probabilities, [original_rnn_outputs, labels])
	rnn_outputs = T.log(rnn_outputs.dimshuffle((1,0,2)))

	def forward_step(probabilities, last_probabilities):
		all_forward_probabilities = T.stack(
			last_probabilities + probabilities,
			log_shift_matrix(last_probabilities, 1) + probabilities,
			log_shift_matrix(last_probabilities, 2) + probabilities + mask,
		)

		result = func(all_forward_probabilities, 0)
		return result

	forward_probabilities, _ = theano.scan(fn = forward_step, sequences = rnn_outputs, outputs_info = initial_state)
	forward_probabilities = forward_probabilities.dimshuffle((1,0,2))

	def compute_cost(forward_probabilities, x_end, y_end):
		return -func(forward_probabilities[x_end-1,y_end-2:y_end])

	return theano.map(compute_cost, [forward_probabilities, x_ends, y_ends])[0]
开发者ID:choko,项目名称:ctc,代码行数:29,代码来源:ctc.py


示例14: form_dataset

def form_dataset(doc, n_in):
    """
    Given a document and the number of input units, return the vector form  of the document segmented into units of
    length (n_in + 1)
    :param doc: String : Location of doc.
    :param n_in: Number of input units of the TreeLSTM
    :return: return the vector form of the document segmented into units of length(n_in + 1)
    """
    print 'Calling form_dataset()..'
    doc_obj = open(doc)
    data = tokenize(doc_obj.read().lower())
    data = data[:int(len(data)/(n_in+1)) * (n_in+1)]
    n_sen = len(data)/(n_in+1)
    data_x, data_y = np.asarray(data).reshape((n_sen, (n_in+1)))[:, :n_in], \
                     np.asarray(data).reshape((n_sen, (n_in+1)))[:, -1]
    data_x_vec = np.asarray([sentence_vec(data_x[i], word_vecs) for i in range(len(data_x))], dtype=theano.config.floatX)
    shared_x = theano.shared(np.concatenate(data_x_vec, axis=1), name='vec_data_x', borrow=True)
    shared_x_ = assert_op(shared_x, T.eq(shared_x.get_value().shape[0], vec_dims),
                          T.eq(shared_x.get_value().shape[1], n_sen*n_in))
    shared_y = theano.shared(np.asarray(sentence_vec(data_y, word_vecs),
                               dtype=theano.config.floatX), name='vec_data_y', borrow=True)
    shared_y_ = assert_op(shared_y, T.eq(shared_y.get_value().shape[0], vec_dims),
                          T.eq(shared_y.get_value().shape[1], n_sen))
    doc_obj.close()
    # Shape(vec_data_y) reshaped from Number of sentences * Vector Dimensions * 1 to Number of sentences * Vector Dims
    return shared_x_, shared_y_
开发者ID:Azrael1,项目名称:Seq-Gen,代码行数:26,代码来源:treelstmupdated.py


示例15: pp_errors

    def pp_errors(self, y, prob , ioi):
        """Return a float representing the number of errors in the minibatch
        over the total number of examples of the minibatch ; zero one
        loss over the size of the minibatch

        :type y: theano.tensor.TensorType
        :param y: corresponds to a vector that gives for each example the
                  correct label
        ioi: the index that you are interested in.
        prob: the prob, which is p_y_given_x
        """
        #prob = 0.5
        #ioi = 1
        # check if y has same dimension of y_pred
        if y.ndim != self.y_pred.ndim:
            raise TypeError('y should have the same shape as self.y_pred',
                ('y', target.type, 'y_pred', self.y_pred.type))
        # check if y is of the correct datatype
        if y.dtype.startswith('int'):
            # the T.neq operator returns a vector of 0s and 1s, where 1
            # represents a mistake in prediction
            #return T.mean(T.neq(self.y_pred, y))
            inprob=self.p_y_given_x[:,ioi]
            pt1 = T.gt(inprob, prob)
            pt2 = T.eq(self.y_pred,ioi)
            pt3 = T.eq(y,ioi)
            ppn = T.sum(pt1 & pt2 & pt3)
            predn = T.sum(pt1 & pt2)
            #return (predn,ppn)
            #return T.sum(T.eq(self.y_pred, y))
            return (ppn,predn)
        else:
            raise NotImplementedError()
开发者ID:xukaiyi,项目名称:apk_analyze,代码行数:33,代码来源:logistic_sgd.py


示例16: build_model

 def build_model(self):
   print '\n... building the model with unroll=%d, backroll=%d' \
     % (self.source.unroll, self.source.backroll)
   x = T.imatrix('x')
   y = T.imatrix('y')
   reset = T.scalar('reset')
   hiddens = [h['init'] for h in self.hiddens.values()]
   outputs_info = [None] * 3 + hiddens
   [losses, probs, errors, hids], updates = \
     theano.scan(self.step, sequences=[x, y], outputs_info=outputs_info)
   loss = losses.sum()
   error = errors.sum() / T.cast((T.neq(y, 255).sum()), floatX)
   hidden_updates_train = []
   hidden_updates_test = []
   for h in self.hiddens.values():
     h_train = ifelse(T.eq(reset, 0), \
       hids[-1-self.source.backroll, :], T.ones_like(h['init']))
     h_test = ifelse(T.eq(reset, 0), \
       hids[-1, :], T.ones_like(h['init']))
     hidden_updates_train.append((h['init'], h_train))
     hidden_updates_test.append((h['init'], h_test))
   updates = self.source.get_updates(loss, self.sgd_params)
   updates += hidden_updates_train
   rets = [loss, probs[-1, :], error]
   mode = theano.Mode(linker='cvm')
   train_model = theano.function([x, y, reset, self.lr], rets, \
     updates=updates, mode=mode)
   test_model = theano.function([x, y, reset], rets, \
     updates=hidden_updates_test, mode=mode)
   return train_model, test_model
开发者ID:ivanhe,项目名称:rnn,代码行数:30,代码来源:model.py


示例17: get_monitoring_channels

    def get_monitoring_channels(self, model, data, **kwargs):

        X_pure,Y_pure = data
        X_pure.tag.test_value = numpy.random.random(size=[5,784]).astype('float32')
        Y_pure.tag.test_value = numpy.random.randint(10,size=[5,1]).astype('int64')
        rval = OrderedDict()

        g = model.compressor
        d = model.discriminator

        yhat_pure = T.argmax(d.fprop(X_pure),axis=1).dimshuffle(0,'x')
        yhat_reconstructed = T.argmax(d.fprop(g.reconstruct(X_pure)),axis=1).dimshuffle(0,'x')

        rval['conviction_pure'] = T.cast(T.eq(yhat_pure,10).mean(), 'float32')
        rval['accuracy_pure'] = T.cast(T.eq(yhat_pure,Y_pure).mean(), 'float32')
        rval['inaccuracy_pure'] = 1 - rval['conviction_pure']-rval['accuracy_pure']

        rval['conviction_fake'] = T.cast(T.eq(yhat_reconstructed,10).mean(), 'float32')
        rval['accuracy_fake'] = T.cast(T.eq(yhat_reconstructed,Y_pure).mean(), 'float32')
        rval['inaccuracy_fake'] = 1 - rval['conviction_fake']-rval['accuracy_fake']

        rval['discernment_pure'] = rval['accuracy_pure']+rval['inaccuracy_pure']
        rval['discernment_fake'] = rval['conviction_fake']
        rval['discernment'] = 0.5*(rval['discernment_pure']+rval['discernment_fake'])

        # y = T.alloc(0., m, 1)  
        d_obj, g_obj = self.get_objectives(model, data)
        rval['objective_d'] = d_obj
        rval['objective_g'] = g_obj

        #monitor probability of true
        # rval['now_train_compressor'] = self.now_train_compressor
        return rval       
开发者ID:vinmisra,项目名称:adversary-compress,代码行数:33,代码来源:CAN.py


示例18: build_model

def build_model(shared_params, options, other_params):
    """
    Build the complete neural network model and return the symbolic variables
    """
    # symbolic variables
    x = tensor.matrix(name="x", dtype=floatX)
    y1 = tensor.iscalar(name="y1")
    y2 = tensor.iscalar(name="y2")

    # lstm cell
    (ht, ct) = lstm_cell(x, shared_params, options, other_params)  # gets the ht, ct
    # softmax 1 i.e. frame type prediction
    activation = tensor.dot(shared_params['softmax1_W'], ht).transpose() + shared_params['softmax1_b']
    frame_pred = tensor.nnet.softmax(activation) # .transpose()

    # softmax 2 i.e. gesture class prediction
    #

    # predicted probability for frame type
    f_pred_prob = theano.function([x], frame_pred, name="f_pred_prob")
    # predicted frame type
    f_pred = theano.function([x], frame_pred.argmax(), name="f_pred")

    # cost
    cost = ifelse(tensor.eq(y1, 1), -tensor.log(frame_pred[0, 0] + options['log_offset'])
                  * other_params['begin_cost_factor'],
                  ifelse(tensor.eq(y1, 2), -tensor.log(frame_pred[0, 1] + options['log_offset'])
                         * other_params['end_cost_factor'],
                         ifelse(tensor.eq(y1, 3), -tensor.log(frame_pred[0, 2] + options['log_offset']),
                                tensor.abs_(tensor.log(y1)))), name='ifelse_cost')

    # function for output of the currect lstm cell and softmax prediction
    f_model_cell_output = theano.function([x], (ht, ct, frame_pred), name="f_model_cell_output")
    # return the model symbolic variables and theano functions
    return x, y1, y2, f_pred_prob, f_pred, cost, f_model_cell_output
开发者ID:inblueswithu,项目名称:Theano_Trail,代码行数:35,代码来源:lstm_model_3b.py


示例19: unet_crossentropy_loss_sampled

    def unet_crossentropy_loss_sampled(y_true, y_pred):
        epsilon = 1.0e-4
        y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
        y_true = T.flatten(y_true)
        # this seems to work
        # it is super ugly though and I am sure there is a better way to do it
        # but I am struggling with theano to cooperate
        # filter the right indices
        classPos = 1
        classNeg = 0
        indPos   = T.eq(y_true, classPos).nonzero()[0]
        indNeg   = T.eq(y_true, classNeg).nonzero()[0]
        #pos      = y_true[ indPos ]
        #neg      = y_true[ indNeg ]

        # shuffle
        n = indPos.shape[0]
        indPos = indPos[UNET.srng.permutation(n=n)]
        n = indNeg.shape[0]
        indNeg = indNeg[UNET.srng.permutation(n=n)]
        # take equal number of samples depending on which class has less
        n_samples = T.cast(T.min([ indPos.shape[0], indNeg.shape[0]]), dtype='int64')
        #n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')

        indPos = indPos[:n_samples]
        indNeg = indNeg[:n_samples]
        #loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
        loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(y_pred_clipped[indNeg]))
        loss_vector = T.clip(loss_vector, epsilon, 1.0-epsilon)
        average_loss = T.mean(loss_vector)
        if T.isnan(average_loss):
            average_loss = T.mean( y_pred_clipped[indPos])
        return average_loss
开发者ID:Rhoana,项目名称:icon,代码行数:33,代码来源:unet.py


示例20: NLL

    def NLL(self, y, class_weights=None, example_weights=None, label_prop_thresh=None):
        """
        Returns the symbolic mean and instance-wise negative log-likelihood of the prediction
        of this model under a given target distribution.

        y: theano.tensor.TensorType
          corresponds to a vector that gives for each example the correct label. Labels < 0 are ignored (e.g. can
          be used for label propagation)

        class_weights: theano.tensor.TensorType
          weight vector of float32 of length  ``n_lab``. Values: ``1.0`` (default), ``w < 1.0`` (less important),
          ``w > 1.0`` (more important class)

        label_prop_thresh: float (0.5,1)
          This threshold allows unsupervised label propagation (only for examples with negative/ignore labels).
          If the predictive probability of the most likely class exceeds the threshold, this class is assumed to
          be the correct label and the training is pushed in this direction.
          Should only be used with pre-trained networks, and values <= 0.5 are disabled.
        """

        # NOTE: This whole function has a ugly problem with NaN. They arise for pred values close to 0 or 1
        # (i.e. for NNs that make very confident and usually also correct predictions) because initially the log of
        # all the whole pred tensor is taken. Later we want to use only some indices of the tensor (mask) but
        # that is not so easy done. There are two ways:
        # 1. advanced indexing: T.log(pred)[mask.nonzero()] --> fails if mask is all zero, cannot be fixed
        # 2. multiplying with 0-1-mask: T.log(pred) * mask.nonzero --> but NaN * 0 = NaN, but we require 0!
        # For the second option, in principle, the NaNs could be replaced by 0 using T.switch, but then the gradient
        # fails because the replaced value is disconnected from the parameters and gives NaN (mathematically
        # the gradient should correctly be 0 then; there is a Theano ticket open to request a fix).
        # So finally the best practice is to add a stabilisation to the log: T.log(pred) --> T.log(pred+eps)
        # This looks ugly, but does the task and the introduced error is completely negligible
        eps = 1e-6
        pred = self.class_probabilities  # predictive (bs, cl)
        y = y.dimshuffle(0, 'x')  # the labels (bs, 1)
        cls = T.arange(self.class_probabilities.shape[1]).dimshuffle('x', 0)  # available classes
        label_selection = T.eq(cls, y)  # selects correct labels

        if class_weights is None:
            class_weights = T.ones_like(pred)
        else:
            class_weights = class_weights.dimshuffle('x', 0)

            # Up vote block
        nll_inst_up = -T.log(pred + eps) * label_selection * class_weights
        N_up = T.sum(label_selection)  # number of labelled examples

        if label_prop_thresh is not None:  # Label propagation block
            above_thresh = pred > label_prop_thresh  # this is one for the class with highes prob
            prop_mask = above_thresh * (1 - label_selection.sum(axis=1))  # don't do where training labels are available
            nll_inst_up_prop = -T.log(pred + pred) * prop_mask * class_weights
            N_up_prop = prop_mask.sum()

            nll_inst_up += nll_inst_up_prop
            N_up += N_up_prop

        nll_inst = nll_inst_up
        N_up = T.switch(T.eq(N_up, 0), 1, N_up)  # patch N to be not 0, when this is the case the sum is 0 anyway!
        nll = nll_inst.sum() / N_up

        return nll, nll_inst
开发者ID:ELEKTRONN,项目名称:ELEKTRONN,代码行数:60,代码来源:perceptronlayer.py



注:本文中的theano.tensor.eq函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.erf函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.dvector函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap