• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.gt函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.gt函数的典型用法代码示例。如果您正苦于以下问题:Python gt函数的具体用法?Python gt怎么用?Python gt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了gt函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _backward_negative_z

def _backward_negative_z(inputs, weights, normed_relevances, bias=None):
    inputs_plus = inputs * T.gt(inputs, 0)
    weights_plus = weights * T.gt(weights, 0)
    inputs_minus = inputs * T.lt(inputs, 0)
    weights_minus = weights * T.lt(weights, 0)
    # Compute weights+ * inputs- and weights- * inputs+
    negative_part_a = conv2d(
        normed_relevances, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
    )
    negative_part_a *= inputs_minus
    negative_part_b = conv2d(
        normed_relevances, weights_minus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
    )
    negative_part_b *= inputs_plus

    together = negative_part_a + negative_part_b
    if bias is not None:
        bias_negative = bias * T.lt(bias, 0)
        bias_relevance = bias_negative.dimshuffle("x", 0, "x", "x") * normed_relevances
        # Divide bias by weight size before convolving back
        # mean across channel, 0, 1 dims (hope this is correct?)
        fraction_bias = bias_relevance / T.prod(weights.shape[1:]).astype(theano.config.floatX)
        bias_rel_in = conv2d(
            fraction_bias, T.ones_like(weights).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
        )
        together += bias_rel_in
    return together
开发者ID:robintibor,项目名称:braindecode,代码行数:27,代码来源:heatmap.py


示例2: calcColNormalizer

def calcColNormalizer(inMatrix):
    #Theano function for calculating logSum, i.e., calculate ln(X + Y) based on ln(X) and ln(Y).
    maxExp = -4950.0 
    x, y = T.fscalars(2)
    
    yMinusx = y - x  ## this part is for the condition which x > y
    xMinusy = x - y  # if x < y
    bigger = T.switch(T.gt(x, y), x, y)
    YSubtractX = T.switch(T.gt(x,y), yMinusx, xMinusy)       
    x_prime =  T.log(1 + T.exp(YSubtractX)) + bigger
    calcSum = T.switch(T.lt(YSubtractX, maxExp), bigger, x_prime)
    logSum = function([x, y], calcSum, allow_input_downcast=True)
    ####### end of logSum  ###############
    
    # now we  caclculate sum of log joint as normalizer 
    if len(inMatrix.shape) < 2:
        raise Exception ("calcColNormalizer expect a 2D matrix")
    nRows, nCols = inMatrix.shape        
    columnAccumLogSum = np.zeros(nCols)        
    for col in range(nCols):
        currLogSum = np.NINF
        for j in range(nRows):
            if inMatrix[j,col] == np.NINF:
                continue
            currLogSum = logSum(currLogSum, inMatrix[j,col])             
        columnAccumLogSum[col] = currLogSum
        
    return columnAccumLogSum
开发者ID:kevilu9116,项目名称:DriverMutProject,代码行数:28,代码来源:utilTCI.py


示例3: T_subspacel1_slow_shrinkage

def T_subspacel1_slow_shrinkage(a,L,lam_sparse,lam_slow,small_value=.001):
    amp = T.sqrt(a[::2,:]**2 + a[1::2,:]**2 + small_value)
    #damp = amp[:,1:] - amp[:,:-1]

    # compose slow shrinkage with subspace l1 shrinkage

    # slow shrinkage
    div = T.zeros_like(amp)
    d1 = amp[:,1:] - amp[:,:-1]
    d2 = d1[:,1:] - d1[:,:-1]
    div = T.set_subtensor(div[:,1:-1],-d2)
    div = T.set_subtensor(div[:,0], -d1[:,0])
    div = T.set_subtensor(div[:,-1], d1[:,-1])
    slow_amp_shrinkage = 1 - (lam_slow/L)*(div/amp)
    slow_amp_value = T.switch(T.gt(slow_amp_shrinkage,0),slow_amp_shrinkage,0)
    slow_shrinkage_prox_a = slow_amp_value*a[::2,:]
    slow_shrinkage_prox_b = slow_amp_value*a[1::2,:]

    # subspace l1 shrinkage
    amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a**2 + slow_shrinkage_prox_b**2)
    #amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
    amp_shrinkage = 1. - (lam_sparse/L)/amp_slow_shrinkage_prox
    amp_value = T.switch(T.gt(amp_shrinkage,0.),amp_shrinkage,0.)
    subspacel1_prox = T.zeros_like(a)
    subspacel1_prox = T.set_subtensor(subspacel1_prox[ ::2,:],amp_value*slow_shrinkage_prox_a)
    subspacel1_prox = T.set_subtensor(subspacel1_prox[1::2,:],amp_value*slow_shrinkage_prox_b)
    return subspacel1_prox
开发者ID:baylabs,项目名称:hdl,代码行数:27,代码来源:theano_methods.py


示例4: momentum_normscaled

def momentum_normscaled(loss, all_params, lr, mom, batch_size, max_norm=np.inf, weight_decay=0.0,verbose=False):
    updates = []
    #all_grads = [theano.grad(loss, param) for param in all_params]
    all_grads = theano.grad(gradient_clipper(loss),all_params)

    grad_lst = [ T.sum( (  grad / float(batch_size) )**2  ) for grad in all_grads ]
    grad_norm = T.sqrt( T.sum( grad_lst ))
    if verbose:
        grad_norm = theano.printing.Print('MOMENTUM GRAD NORM1:')(grad_norm)

    all_grads = ifelse(T.gt(grad_norm, max_norm),
                       [grads*(max_norm / grad_norm) for grads in all_grads],
                       all_grads)


    if verbose:
        grad_lst = [ T.sum( (  grad / float(batch_size) )**2  ) for grad in all_grads ]
        grad_norm = T.sqrt( T.sum( grad_lst ))
        grad_norm = theano.printing.Print('MOMENTUM GRAD NORM2:')(grad_norm)
        all_grads = ifelse(T.gt(grad_norm, np.inf),
                           [grads*(max_norm / grad_norm) for grads in all_grads],
                           all_grads)

    for param_i, grad_i in zip(all_params, all_grads):
        mparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX))
        v = mom * mparam_i - lr*(weight_decay*param_i + grad_i)

        updates.append( (mparam_i, v) )
        updates.append( (param_i, param_i + v) )

    return updates
开发者ID:benathi,项目名称:nntools,代码行数:31,代码来源:LSTMTrainingFunctions.py


示例5: __init__

 def __init__(self, x, lower, upper, *args, **kwargs):
     super(Uniform, self).__init__(*args, **kwargs)
     self._logp = T.log(T.switch(T.gt(x, upper), 0, T.switch(T.lt(x, lower), 0, 1/(upper - lower))))
     self._cdf = T.switch(T.gt(x, up), 1, T.switch(T.lt(x, low), 0, (x - low)/(up - low)))
     self._add_expr('x', x)
     self._add_expr('lower', lower)
     self._add_expr('upper', upper)
开发者ID:giangzuzana,项目名称:python-mle,代码行数:7,代码来源:__init__.py


示例6: call

 def call(self, X):
     if type(X) is not list or len(X) != 2:
         raise Exception("SquareAttention must be called on a list of two tensors. Got: " + str(X))
         
     frame, position  = X[0], X[1]
     
     # Reshaping the input to exclude the time dimension
     frameShape = K.shape(frame)
     positionShape = K.shape(position)
     (chans, height, width) = frameShape[-3:]
     targetDim = positionShape[-1]
     frame = K.reshape(frame, (-1, chans, height, width))
     position = K.reshape(position, (-1, ) + (targetDim, ))
     
     # Applying the attention
     hw = THT.abs_(position[:, 2] - position[:, 0]) * self.scale / 2.0
     hh = THT.abs_(position[:, 3] - position[:, 1]) * self.scale / 2.0
     position = THT.maximum(THT.set_subtensor(position[:, 0], position[:, 0] - hw), -1.0)
     position = THT.minimum(THT.set_subtensor(position[:, 2], position[:, 2] + hw), 1.0)
     position = THT.maximum(THT.set_subtensor(position[:, 1], position[:, 1] - hh), -1.0)
     position = THT.minimum(THT.set_subtensor(position[:, 3], position[:, 3] + hh), 1.0)
     rX = Data.linspace(-1.0, 1.0, width)
     rY = Data.linspace(-1.0, 1.0, height)
     FX = THT.gt(rX, position[:,0].dimshuffle(0,'x')) * THT.le(rX, position[:,2].dimshuffle(0,'x'))
     FY = THT.gt(rY, position[:,1].dimshuffle(0,'x')) * THT.le(rY, position[:,3].dimshuffle(0,'x'))
     m = FY.dimshuffle(0, 1, 'x') * FX.dimshuffle(0, 'x', 1)
     m = m + self.alpha - THT.gt(m, 0.) * self.alpha
     frame = frame * m.dimshuffle(0, 'x', 1, 2)
     
     # Reshaping the frame to include time dimension
     output = K.reshape(frame, frameShape)
     
     return output
开发者ID:fhdiaze,项目名称:DeepTracking,代码行数:33,代码来源:SquareAttention.py


示例7: T_subspacel1_slow_shrinkage_conv

def T_subspacel1_slow_shrinkage_conv(a, L, lam_sparse, lam_slow, imshp,kshp,featshp,stride=(1,1),small_value=.001):
    featshp = (imshp[0],kshp[0],featshp[2],featshp[3]) # num images, features, szy, szx
    features = T.reshape(T.transpose(a),featshp,ndim=4)

    amp = T.sqrt(features[:,::2,:,:]**2 + features[:,1::2,:,:]**2 + small_value)
    #damp = amp[:,1:] - amp[:,:-1]

    # compose slow shrinkage with subspace l1 shrinkage

    # slow shrinkage
    div = T.zeros_like(amp)
    d1 = amp[1:,:,:,:] - amp[:-1,:,:,:]
    d2 = d1[1:,:,:,:] - d1[:-1,:,:,:]
    div = T.set_subtensor(div[1:-1,:,:,:], -d2)
    div = T.set_subtensor(div[0,:,:,:], -d1[0,:,:,:])
    div = T.set_subtensor(div[-1,:,:,:], d1[-1,:,:,:])
    slow_amp_shrinkage = 1 - (lam_slow / L) * (div / amp)
    slow_amp_value = T.switch(T.gt(slow_amp_shrinkage, 0), slow_amp_shrinkage, 0)
    slow_shrinkage_prox_a = slow_amp_value * features[:, ::2, :,:]
    slow_shrinkage_prox_b = slow_amp_value * features[:,1::2, :,:]

    # subspace l1 shrinkage
    amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a ** 2 + slow_shrinkage_prox_b ** 2)
    #amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
    amp_shrinkage = 1. - (lam_sparse / L) / amp_slow_shrinkage_prox
    amp_value = T.switch(T.gt(amp_shrinkage, 0.), amp_shrinkage, 0.)
    subspacel1_prox = T.zeros_like(features)
    subspacel1_prox = T.set_subtensor(subspacel1_prox[:, ::2, :,:], amp_value * slow_shrinkage_prox_a)
    subspacel1_prox = T.set_subtensor(subspacel1_prox[:,1::2, :,:], amp_value * slow_shrinkage_prox_b)

    reshape_subspacel1_prox = T.transpose(T.reshape(subspacel1_prox,(featshp[0],featshp[1]*featshp[2]*featshp[3]),ndim=2))
    return reshape_subspacel1_prox
开发者ID:baylabs,项目名称:hdl,代码行数:32,代码来源:theano_methods.py


示例8: norm_col

def norm_col(w, h):
    """normalize the column vector w (Theano function).
    Apply the invert normalization on h such that w.h does not change

    Parameters
    ----------
    w: Theano vector
        vector to be normalised
    h: Ttheano vector
        vector to be normalised by the invert normalistation

    Returns
    -------
    w : Theano vector with the same shape as w
        normalised vector (w/norm)
    h : Theano vector with the same shape as h
        h*norm
    """
    norm = w.norm(2, 0)
    eps = 1e-12
    size_norm = (T.ones_like(w)).norm(2, 0)
    w = ifelse(T.gt(norm, eps),
               w/norm,
               (w+eps)/(eps*size_norm).astype(theano.config.floatX))
    h = ifelse(T.gt(norm, eps),
               h*norm,
               (h*eps*size_norm).astype(theano.config.floatX))
    return w, h
开发者ID:rserizel,项目名称:groupNMF,代码行数:28,代码来源:base.py


示例9: decay

 def decay(self):
     updates = []
     new_batch = ifelse(T.gt(self.batch, self.decay_batch), sharedX(0), self.batch+1)
     new_lr = ifelse(T.gt(self.batch, self.decay_batch), self.lr*self.lr_decay_factor, self.lr)
     updates.append((self.batch, new_batch))
     updates.append((self.lr, new_lr))
     return updates
开发者ID:ColaWithIce,项目名称:Mozi,代码行数:7,代码来源:learning_method.py


示例10: irprop_minus_updates

def irprop_minus_updates(params, grads):

    # IRPROP- parameters
    updates = []
    deltas = 0.1*numpy.ones(len(params))
    last_params = params
    
    positiveStep = 1.2
    negativeStep = 0.5
    maxStep = 50.
    minStep = math.exp(-6)

    for param, gparam, delta, last_gparam in zip(params, grads, deltas, last_params):
        # calculate change
        change = T.sgn(gparam * last_gparam)
        if T.gt(change, 0) :
            delta = T.minimum(delta * positiveStep, maxStep)
            
            if T.lt(delta, minStep):
                delta = minStep
                
        elif T.lt(change, 0):
            delta = T.maximum(delta * negativeStep, minStep)
            
            if T.gt(delta, params['maxStep']):
                delta = params['maxStep']
            last_gparam = 0
            
        # update the weights
        updates.append((param, param - T.sgn(gparam) * delta))
        # store old change
        last_gparam = gparam

    return updates
开发者ID:andersjo,项目名称:vector-semantics,代码行数:34,代码来源:rprop.py


示例11: __init__

 def __init__(self, alpha, m, *args, **kwargs):
     super(Pareto, self).__init__(*args, **kwargs)
     self.alpha = alpha
     self.m = m
     self.mean = tt.switch(tt.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf)
     self.median = m * 2.0 ** (1.0 / alpha)
     self.variance = tt.switch(tt.gt(alpha, 2), (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), np.inf)
开发者ID:Riashat,项目名称:pymc3,代码行数:7,代码来源:continuous.py


示例12: errors

	def errors(self, y, print_output=False):
		# check if y has same dimension of y_pred
		if y.ndim != self.y_pred.ndim:
			raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type))

		# check if y is of the correct datatype
		if y.dtype.startswith('int'):
			num_positive = T.cast(T.sum(T.eq(y,1)),'float64')
			num_predicted_positive = T.cast(T.sum(T.eq(self.y_pred,1)),'float64')
			num_correctly_predicted = T.cast(T.sum(T.eq(self.y_pred*y,1)),'float64')

			P = T.cast(0.0,'float64')	# precision  = True positive / (True positive + False positive)
			if (T.gt(num_predicted_positive,0.0)):
				P = T.cast(num_correctly_predicted / num_predicted_positive,'float64')

			R = T.cast(0.0,'float64')	# recall     = True positive / (True positive + False negative)
			if (T.gt(num_positive,0.0)):
				R = T.cast(num_correctly_predicted / num_positive,'float64')

			F1 = T.cast(0.0,'float64')	# F1 score
			if (T.gt(P+R,0.0)):
				F1 = 2.0*P*R/(P+R)

			if (print_output):
				print("      num positive = {0}".format( num_positive ) )
				print("      num predicted positive = {0}".format( num_predicted_positive ) )
				print("      num correctly predicted = {0}".format( num_correctly_predicted ) )
				print("      precision = {0}".format(P))
				print("      recall = {0}".format(R))
				print("      F1 score = {0}".format(F1))
			return [T.mean(T.neq(self.y_pred, y)), P, R, F1]

		else:
			raise NotImplementedError()
		return
开发者ID:vassilikitsios,项目名称:data_mining_tools,代码行数:35,代码来源:logistic_regression.py


示例13: multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1

    def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
	"""
	The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
	For class_i == 0 (backgr), what is reported is the WHOLE rp,rn,tpp,tpn. ie, as calculated considering background VS all other classes.
	Order in the list is the natural order of the classes (ie class-0-WHOLE RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
	"""
	returnedListWithNumberOfRpRnPpPnForEachClass = []

	for class_i in xrange(0, self.numberOfOutputClasses) :
		#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
		vectorOneAtRealPositives = T.gt(y, 0) if class_i == 0 else T.eq(y, class_i)
		vectorOneAtRealNegatives = T.eq(y, 0) if class_i == 0 else T.neq(y, class_i)

		if training0OrValidation1 == 0 : #training:
			yPredToUse = self.y_pred
		else: #validation
			yPredToUse = self.y_pred_inference

		vectorOneAtPredictedPositives = T.gt(yPredToUse, 0) if class_i == 0 else T.eq(yPredToUse, class_i)
		vectorOneAtPredictedNegatives = T.eq(yPredToUse, 0) if class_i == 0 else T.neq(yPredToUse, class_i)
		vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
		vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
		    
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealNegatives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedPositives) )
		returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedNegatives) )

	return returnedListWithNumberOfRpRnPpPnForEachClass
开发者ID:TobiasMR,项目名称:deepmedic,代码行数:29,代码来源:cnnLayerTypes.py


示例14: group_div

def group_div(X, W, H, beta, params):
    """Compute beta divergence D(X|WH), intra-class distance
    and intra-session distance for a particular
    (class, session) couple [1]_.


    Parameters
    ----------
    X : Theano tensor
        data
    W : Theano tensor
        Bases
    H : Theano tensor
        activation matrix
    beta : Theano scalar
    params : Theano tensor
        Matrix of parameter related to class/session.
            :params[0][0]: index for the (class, session) couple
            :params[1][0]: number of vector basis related to class
            :params[1][1]: number of vector basis related to session
            :params[2]: weight on the class/session similarity constraints
            :params[3]: sessions in which class c appears
            :params[4]: classes present in session s



    Returns
    -------
    cost : Theano scalar
        total cost
    div : Theano scalar
        beta divergence D(X|WH)
    sum_cls : Theano scalar
        intra-class distance
    sum_ses : Theano scalar
        intra-session distance"""
    ind = params[0][0]
    k_cls = params[1][0]
    k_ses = params[1][1]
    lambdas = params[2]
    Sc = params[3]
    Cs = params[4]
    res_ses, up = theano.scan(
        fn=lambda Cs, prior_result: prior_result
        + eucl_dist(W[ind, :, k_cls : k_cls + k_ses], W[Cs, :, k_cls : k_cls + k_ses]),
        outputs_info=T.zeros_like(beta),
        sequences=Cs,
    )
    sum_ses = ifelse(T.gt(Cs[0], 0), res_ses[-1], T.zeros_like(beta))
    res_cls, up = theano.scan(
        fn=lambda Sc, prior_result: prior_result + eucl_dist(W[ind, :, 0:k_cls], W[Sc, :, 0:k_cls]),
        outputs_info=T.zeros_like(beta),
        sequences=Sc,
    )
    sum_cls = ifelse(T.gt(Sc[0], 0), res_cls[-1], T.zeros_like(beta))
    betaDiv = beta_div(X, W[ind].T, H, beta)

    cost = lambdas[0] * sum_cls + lambdas[1] * sum_ses + betaDiv
    return cost, betaDiv, sum_cls, sum_ses
开发者ID:rserizel,项目名称:groupNMF,代码行数:59,代码来源:costs.py


示例15: symGivens2

def symGivens2(a, b):
    """
    Stable Symmetric Givens rotation plus reflection

    Parameters

        a: (theano scalar) first element of a two-vector  [a; b]
        b: (theano scalar) second element of a two-vector [a; b]
    Returns

        c  cosine(theta), where theta is the implicit angle of
           rotation (counter-clockwise) in a plane-rotation
        s  sine(theta)
        d  two-norm of [a; b]

    Description:
        This method gives c and s such that
            [ c  s ][a] = [d],
            [ s -c ][b]   [0]
      where d = two norm of vector [a, b],
            c = a / sqrt(a^2 + b^2) = a / d,
            s = b / sqrt(a^2 + b^2) = b / d.
      The implementation guards against overflow in computing
         sqrt(a^2 + b^2).

      SEE ALSO:
         (1) Algorithm 4.9, stable *unsymmetric* Givens
         rotations in Golub and van Loan's book Matrix
         Computations, 3rd edition.
         (2) MATLAB's function PLANEROT.

      Observations:
          Implementing this function as a single op in C might improve speed
          considerably ..
    """
    c_branch1 = T.switch(T.eq(a, constantX(0)), constantX(1), T.sgn(a))
    c_branch21 = (a / b) * T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2)
    c_branch22 = T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2)

    c_branch2 = T.switch(T.eq(a, constantX(0)), constantX(0), T.switch(T.gt(abs(b), abs(a)), c_branch21, c_branch22))
    c = T.switch(T.eq(b, constantX(0)), c_branch1, c_branch2)

    s_branch1 = T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2)
    s_branch2 = (b / a) * T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2)
    s = T.switch(
        T.eq(b, constantX(0)),
        constantX(0),
        T.switch(T.eq(a, constantX(0)), T.sgn(b), T.switch(T.gt(abs(b), abs(a)), s_branch1, s_branch2)),
    )

    d_branch1 = b / (T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2))
    d_branch2 = a / (T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2))
    d = T.switch(
        T.eq(b, constantX(0)),
        abs(a),
        T.switch(T.eq(a, constantX(0)), abs(b), T.switch(T.gt(abs(b), abs(a)), d_branch1, d_branch2)),
    )
    return c, s, d
开发者ID:scyoyo,项目名称:pylearn,代码行数:58,代码来源:basic.py


示例16: new_range

def new_range(overflow, overflow_1, max_overflow):
    
    # the goal is to update the range of the vector 
    # we know the overflow rates associated with range (overflow) 
    # and range-1 (overflow_1)
    # if (overflow > max_overflow): increment range
    # else if (overflow_1 < max_overflow): decrement range
    return T.switch(T.gt(overflow, max_overflow), 1, 
        T.switch(T.gt(overflow_1, max_overflow), 0, - 1))
开发者ID:MatthieuCourbariaux,项目名称:deep-learning-multipliers,代码行数:9,代码来源:format.py


示例17: __call__

    def __call__(self, input): 
        mean = input.mean(self.axes, keepdims=True) 
        std = input.std(self.axes, keepdims=True) + self.epsilon 

        # Don't batchnoramlise a single data point
        mean = ifelse(T.gt(input.shape[0], 1), mean, T.zeros(mean.shape, dtype=mean.dtype))
        std  = ifelse(T.gt(input.shape[0], 1), std, T.ones(std.shape, dtype=std.dtype))

        return (input - mean) * T.addbroadcast((self.gamma / std) + self.beta, *self.axes)
开发者ID:Brimborough,项目名称:deep-motion-analysis,代码行数:9,代码来源:BatchNormLayer.py


示例18: rprop

def rprop(param,learning_rate,gparam,mask,updates,current_cost,previous_cost,
          eta_plus=1.2,eta_minus=0.5,max_delta=50, min_delta=10e-6):
    previous_grad = sharedX(numpy.ones(param.shape.eval()),borrow=True)
    delta = sharedX(learning_rate * numpy.ones(param.shape.eval()),borrow=True)
    previous_inc = sharedX(numpy.zeros(param.shape.eval()),borrow=True)
    zero = T.zeros_like(param)
    one = T.ones_like(param)
    change = previous_grad * gparam

    new_delta = T.clip(
            T.switch(
                T.eq(gparam,0.),
                delta,
                T.switch(
                    T.gt(change,0.),
                    delta*eta_plus,
                    T.switch(
                        T.lt(change,0.),
                        delta*eta_minus,
                        delta
                    )
                )
            ),
            min_delta,
            max_delta
    )
    new_previous_grad = T.switch(
            T.eq(mask * gparam,0.),
            previous_grad,
            T.switch(
                T.gt(change,0.),
                gparam,
                T.switch(
                    T.lt(change,0.),
                    zero,
                    gparam
                )
            )
    )
    inc = T.switch(
            T.eq(mask * gparam,0.),
            zero,
            T.switch(
                T.gt(change,0.),
                - T.sgn(gparam) * new_delta,
                T.switch(
                    T.lt(change,0.),
                    zero,
                    - T.sgn(gparam) * new_delta
                )
            )
    )

    updates.append((previous_grad,new_previous_grad))
    updates.append((delta,new_delta))
    updates.append((previous_inc,inc))
    return param + inc * mask
开发者ID:nitbix,项目名称:ensemble-testing,代码行数:57,代码来源:mlp-old.py


示例19: objective

def objective(y_true, y_pred, P, Q, alpha=0., beta=0.15, dbeta=0., gamma=0.01, gamma1=-1., poos=0.23, eps=1e-6):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''

    beta = np.float32(beta)
    dbeta = np.float32(dbeta)
    gamma = np.float32(gamma)
    poos = np.float32(poos)
    eps = np.float32(eps)

    # scale preds so that the class probas of each sample sum to 1
    y_pred += eps
    y_pred /= y_pred.sum(axis=-1, keepdims=True)

    y_true = T.cast(y_true.flatten(), 'int64')
    y1 = T.and_(T.gt(y_true, 0), T.le(y_true, Q))  # in-set
    y0 = T.or_(T.eq(y_true, 0), T.gt(y_true, Q))  # out-of-set or unlabeled
    y0sum = y0.sum() + eps  # number of oos
    y1sum = y1.sum() + eps  # number of in-set
    # we want to reduce cross entrophy of labeled data
    # convert all oos/unlabeled to label=0
    cost0 = T.nnet.categorical_crossentropy(y_pred, T.switch(y_true <= Q, y_true, 0))
    cost0 = T.dot(y1, cost0) / y1sum  # average cost per labeled example

    if alpha:
        cost1 = T.nnet.categorical_crossentropy(y_pred, y_pred)
        cost1 = T.dot(y0, cost1) / y0sum  # average cost per labeled example
        cost0 += alpha*cost1

    # we want to increase the average entrophy in each batch
    # average over batch
    if beta:
        y_pred_avg0 = T.dot(y0, y_pred) / y0sum
        y_pred_avg0 = T.clip(y_pred_avg0, eps, np.float32(1) - eps)
        y_pred_avg0 /= y_pred_avg0.sum(axis=-1, keepdims=True)
        cost2 = T.nnet.categorical_crossentropy(y_pred_avg0.reshape((1,-1)), P-dbeta)[0] # [None,:]
        cost2 = T.switch(y0sum > 0.5, cost2, 0.)  # ignore cost2 if no samples
        cost0 += beta*cost2

    # binary classifier score
    if gamma:
        y_pred0 = T.clip(y_pred[:,0], eps, np.float32(1) - eps)
        if gamma1 < 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot(np.float32(1)-poos*y0.T,T.log(np.float32(1)-y_pred0))
            cost3 /= y_pred.shape[0]
            cost0 += gamma*cost3
        elif gamma1 > 0.:
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0,T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost31 =  - T.dot(y1,T.log(np.float32(1)-y_pred0))
            cost3 /= y1sum
            cost0 += gamma*cost3 + gamma1*cost31
        else:  # gamma1 == 0.
            cost3 = - T.dot(poos*y0,T.log(y_pred0)) - T.dot((np.float32(1)-poos)*y0, T.log(np.float32(1)-y_pred0))
            cost3 /= y0sum
            cost0 += gamma*cost3
    return cost0
开发者ID:fulldecent,项目名称:LRE,代码行数:57,代码来源:ladder.py


示例20: tukey_biweight

def tukey_biweight(predictions, targets, c=4.685, s=1.4826):
    """
    Tukey's biweight function expressed in theano as in
    :param predictions: Prediction tensor
    :param targets: Target tensor
    :param c: Tukey tuning constant
    :param s: Consistence scale parameter
    :return: Cost function
    """
    # Flatten input to make calc easier
    pred = predictions.flatten(2)
    target = targets.flatten(2)
    # Compute mask
    mask = T.gt(target, 0)
    # Compute n of valid pixels
    n_valid = T.sum(mask, axis=1)
    # Apply mask and log transform
    m_pred = pred * mask
    m_t = T.switch(mask, target, 0)

    def median(tensor):
        """
        MAD tensor from https://groups.google.com/forum/#!topic/theano-users/I4eHjbAetEQ
        :param tensor: Input tensor
        :return: Median expression
        """
        tensor = tensor.flatten(1)
        return T.switch(T.eq((tensor.shape[0] % 2), 0),
                        # if even vector
                        T.mean(T.sort(tensor)[((tensor.shape[0] / 2) - 1): ((tensor.shape[0] / 2) + 1)]),
                        # if odd vector
                        T.sort(tensor)[tensor.shape[0] // 2])

    def mad(tensor):
        """
        Median absolute deviation
        :param tensor: Input tensor
        :return: MAD
        """
        med = median(tensor=tensor)
        return median(T.abs_(tensor - med))

    # Residual
    r_i = (m_pred - m_t)
    # r_i = r_i / (s * mad(r_i))
    r_i = r_i / r_i.std()
    # Compute the masking vectors
    tukey_mask = T.gt(T.abs_(r_i), c)

    # Cost
    cost = (c ** 2 / 6) * (1-(1 - (r_i / c) ** 2) ** 3)
    # Aggregate
    return T.sum(T.sum(T.switch(tukey_mask, (c ** 2) / 6., cost), axis=1)) / T.maximum((T.sum(n_valid)), 1)
开发者ID:sebastian-schlecht,项目名称:im2vol,代码行数:53,代码来源:losses.py



注:本文中的theano.tensor.gt函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.icol函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.grad函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap