• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.var函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.var函数的典型用法代码示例。如果您正苦于以下问题:Python var函数的具体用法?Python var怎么用?Python var使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了var函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: computeA

    def computeA(self, symmetric_double_encoder, params):

        regularization = 0
        if self._layer == -1:
            for layer in symmetric_double_encoder:
                hidden_x = layer.output_forward_x
                hidden_y = layer.output_forward_y

                cov_x = Tensor.dot(hidden_x, hidden_x.T)
                cov_y = Tensor.dot(hidden_y, hidden_y.T)

                regularization += Tensor.mean(Tensor.sum(abs(cov_x), axis=1, dtype=Tensor.config.floatX)) + Tensor.mean(
                    Tensor.sum(abs(cov_y), axis=1, dtype=Tensor.config.floatX))

        elif self._layer < len(symmetric_double_encoder):
            hidden_x = symmetric_double_encoder[self._layer].output_forward_x
            hidden_y = symmetric_double_encoder[self._layer].output_forward_y

            var_x = Tensor.var(hidden_x, axis=1)
            var_y = Tensor.var(hidden_y, axis=1)

            norm_x = Tensor.mean(Tensor.sum(hidden_x ** 2, axis=1, dtype=Tensor.config.floatX))
            norm_y = Tensor.mean(Tensor.sum(hidden_y ** 2, axis=1, dtype=Tensor.config.floatX))

            regularization -= norm_x
            regularization -= norm_y

            #
            # cov_x = Tensor.dot(hidden_x.T, hidden_x)
            # cov_y = Tensor.dot(hidden_y.T, hidden_y)
            #
            # regularization -= ((Tensor.sum(abs(cov_x))) + (Tensor.sum(abs(cov_y))))

        return self.weight * regularization
开发者ID:aviveise,项目名称:double_encoder,代码行数:34,代码来源:variance_regularization.py


示例2: build

 def build(self, output, tparams=None, BNparams=None):
     if self.BN_mode:
         self.BN_eps = npt(self.BN_eps)
         if not hasattr(self, 'BN_mean'):
             self.BN_mean = T.mean(output)
         if not hasattr(self, 'BN_std'):
             m2 = (1 + 1 / (T.prod(output.shape) - 1)).astype(floatX)
             self.BN_std = T.sqrt(m2 * T.var(output) + self.BN_eps)
         if self.BN_mode == 2:
             t_mean = T.mean(output, axis=[0, 2, 3], keepdims=True)
             t_var = T.var(output, axis=[0, 2, 3], keepdims=True)
             BN_mean = BNparams[p_(self.prefix, 'mean')].dimshuffle(
                 'x', 0, 'x', 'x')
             BN_std = BNparams[p_(self.prefix, 'std')].dimshuffle(
                 'x', 0, 'x', 'x')
             output = ifelse(
                 self.training,
                 (output - t_mean) / T.sqrt(t_var + self.BN_eps),
                 (output - BN_mean) / BN_std)
             output *= tparams[p_(self.prefix, 'BN_scale')].dimshuffle(
                 'x', 0, 'x', 'x')
             output += tparams[p_(self.prefix, 'BN_shift')].dimshuffle(
                 'x', 0, 'x', 'x')
         elif self.BN_mode == 1:
             t_mean = T.mean(output)
             t_var = T.var(output)
             output = ifelse(
                 self.training,
                 (output - t_mean) / T.sqrt(t_var + self.BN_eps),
                 ((output - BNparams[p_(self.prefix, 'mean')])
                  / BNparams[p_(self.prefix, 'std')]))
             output *= tparams[p_(self.prefix, 'BN_scale')]
             output += tparams[p_(self.prefix, 'BN_shift')]
     self.output = self.activation(output)
开发者ID:wufangjie,项目名称:dnn,代码行数:34,代码来源:layers.py


示例3: __init__

    def __init__(self, network):
        self.network = network
        self.parameters = network.parameters

        num_trails = self.parameters.num_trials
        n_layers = network.n_layers
        self.channels = {}

        for channel in self.training_values:
            self.channels[channel] = np.zeros((n_layers, num_trails))
        for channel in self.training_mean_std:
            self.channels[channel] = np.zeros((n_layers, num_trails, 2))

        outputs = []

        for layer in range(n_layers):
            if layer == 0:
                X = self.network.X
            else:
                X = self.network.Y[layer-1]
            Y = self.network.Y[layer]
            Q = self.network.Q[layer]
            W = self.network.W[layer]
            theta = self.network.theta[layer]
            y_bar = Y.mean()
            Cyy_bar = (Y.T.dot(Y)/network.parameters.batch_size).mean()
            outputs.extend([y_bar, Cyy_bar])

            X_rec = Y.dot(Q.T)
            X_rec_norm = T.sqrt(T.sum(T.sqr(X_rec),axis =1,keepdims=True))
            X_norm = T.sqrt(T.sum(T.sqr(X),axis =1,keepdims=True))
            X_rec_bar = X_rec_norm.mean()
            X_rec_std = X_rec_norm.std()
            outputs.extend([X_rec_bar, X_rec_std])

            X_bar = X_norm.mean()
            X_std = X_norm.std()
            outputs.extend([X_bar, X_std])

            SNR_Norm = T.mean(T.var(X,axis=0))/T.mean(T.var(X-X_rec*X_norm/X_rec_norm,axis=0))
            SNR = T.mean(T.var(X,axis=0))/T.mean(T.var(X-X_rec_norm,axis=0))
            outputs.extend([SNR, SNR_Norm])
            
            Q_norm = T.sqrt(T.sum(T.sqr(Q), axis=0))
            Q_bar = Q_norm.mean()
            Q_std = Q_norm.std()
            outputs.extend([Q_bar, Q_std])

            W_bar = W.mean()
            W_std = W.std()
            outputs.extend([W_bar, W_std])

            theta_bar = theta.mean()
            theta_std = theta.std()
            outputs.extend([theta_bar, theta_std])

        self.f = theano.function([], outputs)
开发者ID:JesseLivezey,项目名称:SAILNet_STDP,代码行数:57,代码来源:monitor.py


示例4: instance

    def instance(self, train_x, infer_x, dropout=None, epsilon=1e-8, **kwargs):
        """Returns (train_output, inference_output, statistics_updates, train_reconstruction, infer_reconstruction)"""

        # dropout
        dropout = dropout or 0.
        mask = self.srng.binomial(n=1, p=1 - dropout, size=train_x.shape)
        # cast because int * float32 = float64 which does not run on GPU
        train_x = train_x * T.cast(mask, theano.config.floatX)

        # outputs with batch-specific normalization
        train_lin_output = T.dot(train_x, self.t_W) + self.t_b
        train_lin_output.name = self.subname("trainLinOutput")
        batch_mean = T.mean(train_lin_output, axis=0)
        offset_output = train_lin_output - batch_mean
        batch_var = T.var(offset_output, axis=0)
        batch_sd = T.sqrt(batch_var + epsilon)
        normalized_lin_output = offset_output / batch_sd
        train_output = self.activation_fn(self.gamma * normalized_lin_output + self.beta)
        train_output.name = self.subname("trainOutput")

        # reconstruct batch-specific output
        W_T = self.t_W.T
        W_T.name = self.subname("W_T")
        recon_lin_output = T.dot(train_output, W_T) + self.t_decode_b
        recon_lin_output.name = self.subname("reconLinOutput")
        decode_batch_mean = T.mean(recon_lin_output, axis=0)
        recon_offset_output = recon_lin_output - decode_batch_mean
        decode_batch_var = T.var(recon_offset_output, axis=0)
        decode_batch_sd = T.sqrt(decode_batch_var + epsilon)
        normalized_recon_lin_output = recon_offset_output / decode_batch_sd
        reconstructed_output = self.activation_fn(self.decode_gamma * normalized_recon_lin_output + self.decode_beta)

        # outputs with rolling-average normalization
        infer_lin_output = T.dot(infer_x, self.t_W) + self.t_b
        infer_lin_output.name = self.subname("inferLinOutput")
        sd = T.sqrt(self.variance + epsilon)
        normalized_infer_lin_output = infer_lin_output - self.mean
        inference_output = self.activation_fn(self.gamma / sd * normalized_infer_lin_output + self.beta)
        infer_lin_output.name = self.subname("inferenceOutput")

        # reconstruct batch-specific output
        recon_infer_lin_output = T.dot(inference_output, W_T) + self.t_decode_b
        recon_infer_lin_output.name = self.subname("reconInferLinOutput")
        decode_sd = T.sqrt(self.decode_variance + epsilon)
        normalized_recon_infer_lin_output = recon_infer_lin_output - self.decode_mean
        recon_infer_output = self.activation_fn(self.decode_gamma / decode_sd * normalized_recon_infer_lin_output + self.decode_beta)

        # save exponential moving average for batch mean/variance
        statistics_updates = [
            (self.mean, self.alpha * self.mean + (1.0 - self.alpha) * batch_mean),
            (self.variance, self.alpha * self.variance + (1.0 - self.alpha) * batch_var),
            (self.decode_mean, self.alpha * self.decode_mean + (1.0 - self.alpha) * decode_batch_mean),
            (self.decode_variance, self.alpha * self.decode_variance + (1.0 - self.alpha) * decode_batch_var),
        ]

        return train_output, inference_output, statistics_updates, reconstructed_output, recon_infer_output
开发者ID:yueranyuan,项目名称:vector_edu,代码行数:56,代码来源:net.py


示例5: f_prop

 def f_prop(self, x):
     if x.ndim == 2:
         mean = T.mean(x, axis=0, keepdims=True)
         std = T.sqrt(T.var(x, axis=0, keepdims=True)+self.epsilon)
     elif x.ndim == 4:
         mean = T.mean(x, axis=(0,2,3), keepdims=True)
         std = T.sqrt(T.var(x, axis=(0,2,3), keepdims=True)+self.epsilon)
     
     normalized_x = (x-mean)/std
     self.z = self.gamma*normalized_x+self.beta
     return self.z
开发者ID:rarilurelo,项目名称:ail,代码行数:11,代码来源:cnn_cifar.py


示例6: get_stats

def get_stats(input, stat=None):
    """
    Returns a dictionary mapping the name of the statistic to the result on the input.
    Currently gets mean, var, std, min, max, l1, l2.

    Parameters
    ----------
    input : tensor
        Theano tensor to grab stats for.

    Returns
    -------
    dict
        Dictionary of all the statistics expressions {string_name: theano expression}
    """
    stats = {
        'mean': T.mean(input),
        'var': T.var(input),
        'std': T.std(input),
        'min': T.min(input),
        'max': T.max(input),
        'l1': input.norm(L=1),
        'l2': input.norm(L=2),
        #'num_nonzero': T.sum(T.nonzero(input)),
    }
    stat_list = raise_to_list(stat)
    compiled_stats = {}
    if stat_list is None:
        return stats

    for stat in stat_list:
        if isinstance(stat, string_types) and stat in stats:
            compiled_stats.update({stat: stats[stat]})
    return compiled_stats
开发者ID:EqualInformation,项目名称:OpenDeep,代码行数:34,代码来源:statistics.py


示例7: layer_normalization

def layer_normalization(x, bias=None, scale=None, eps=1e-5):
  """
  Layer Normalization, https://arxiv.org/abs/1607.06450
  x is mean and variance normalized along its feature dimension.
  After that, we allow a bias and a rescale. This is supposed to be trainable.
  :param x: 3d tensor (time,batch,dim) (or any ndim, last dim is expected to be dim)
  :param bias: 1d tensor (dim) or None
  :param scale: 1d tensor (dim) or None
  """
  mean = T.mean(x, axis=x.ndim - 1, keepdims=True)
  std = T.sqrt(T.var(x, axis=x.ndim - 1, keepdims=True) + numpy.float32(eps))
  assert mean.ndim == std.ndim == x.ndim
  output = (x - mean) / std
  assert output.ndim == x.ndim
  if scale is not None:
    assert scale.ndim == 1
    scale = scale.dimshuffle(*(('x',) * (x.ndim - 1) + (0,)))
    assert scale.ndim == x.ndim
    output = output * scale
  if bias is not None:
    assert bias.ndim == 1
    bias = bias.dimshuffle(*(('x',) * (x.ndim - 1) + (0,)))
    assert bias.ndim == x.ndim
    output = output + bias
  return output
开发者ID:atuxhe,项目名称:returnn,代码行数:25,代码来源:TheanoUtil.py


示例8: process

    def process(self, input, tparams, BNparams):
        mode = 'full' if self.border_mode == 'same' else self.border_mode
        output = conv.conv2d(
            input=input,
            filters=tparams[p_(self.prefix, 'W')],
            image_shape=[self.batch_size, self.n_in[0]] + self.image_shape,
            filter_shape=[self.n_out] + self.n_in,
            border_mode=mode,
            subsample=self.stride)

        if self.border_mode == 'same':
            a1 = (self.filter_size[0] - 1) // 2
            b1 = (self.filter_size[1] - 1) // 2
            a2 = self.filter_size[0] - a1
            b2 = self.filter_size[1] - b1
            if a2 == 1:
                if b2 == 1:
                    output = output[:, :, a1:, b1:]
                else:
                    output = output[:, :, a1:, b1:-b2+1]
            else:
                if b2 == 1:
                    output = output[:, :, a1:-a2+1, b1:]
                else:
                    output = output[:, :, a1:-a2+1, b1:-b2+1]

        if self.with_bias:
            output += tparams[p_(self.prefix, 'b')].dimshuffle('x', 0, 'x', 'x')

        self.BN_mean = T.mean(output, axis=[0, 2, 3])
        m2 = (1 + 1 / (T.prod(output.shape) / self.n_out - 1)).astype(floatX)
        self.BN_std = T.sqrt(m2 * T.var(output, axis=[0, 2, 3])
                             + npt(self.BN_eps))
        return output
开发者ID:wufangjie,项目名称:dnn,代码行数:34,代码来源:layers.py


示例9: batch_norm

        def batch_norm(X, gamma, beta, m_shared, v_shared, test, add_updates):
            if X.ndim > 2:
                output_shape = X.shape
                X = X.flatten(2)
 
            if test is False:
                m = T.mean(X, axis=0, keepdims=True)
                v = T.sqrt(T.var(X, axis=0, keepdims=True) + self.epsilon)
                
                mulfac = 1.0/1000
                if m_shared in add_updates:
                    add_updates[m_shared] = (1.0-mulfac)*add_updates[m_shared] + mulfac*m
                    add_updates[v_shared] = (1.0-mulfac)*add_updates[v_shared] + mulfac*v
                else:
                    add_updates[m_shared] = (1.0-mulfac)*m_shared + mulfac*m
                    add_updates[v_shared] = (1.0-mulfac)*v_shared + mulfac*v
            else:
                m = m_shared
                v = v_shared
            
            X_hat = (X - m) / v
            y = gamma*X_hat + beta
 
            if X.ndim > 2:
                y = T.reshape(y, output_shape)
            return y
开发者ID:Carps,项目名称:Theano-Lights,代码行数:26,代码来源:lm_lstm_bn.py


示例10: ZCA

def ZCA(data, n_component=2):
    '''
    m is the number of data points
    n is the dimension of the data

    :param data: <numpy matrix, (m,n)> imput data
    :param n_component: <int> number of dimension to be extracted
    :return:
    '''

    # data standardization
    x = T.matrix('x')
    eps = T.scalar('eps')
    y = (x - T.mean(x, axis=0)) / T.sqrt(T.var(x) + eps)
    standardize = th.function([x, eps], y)

    # zca whitening
    x_n = T.matrix('x_n')  # normalized input
    eps2 = T.scalar('eps2')  # small esp to prevent div by zero
    x_cov = T.dot(x_n.T, x_n) / x_n.shape[0]  # variance of input
    u, s, v = T.nlinalg.svd(x_cov)

    z = T.dot(T.dot(u, T.nlinalg.diag(1. / T.sqrt(s + eps2))), u.T)
    x_zca = T.dot(x_n, z.T[:, :n_component])
    zca_whiten = th.function([x_n, eps2], x_zca)
    return  zca_whiten(standardize(data, 0.1), 0.01)
开发者ID:ticksonW,项目名称:Test,代码行数:26,代码来源:kmeans.py


示例11: layer_var

 def layer_var(self):
     # square of L2 norm ; one regularization option is to enforce
     # square of L2 norm to be small
     var = []
     for layer in self.layers:
         var.append(T.var(layer.W))
     return var
开发者ID:sunits,项目名称:Kaldi_stuff,代码行数:7,代码来源:MLP.py


示例12: add_param

 def add_param(self, param, name="", constraints=True,
               custom_update=None, custom_update_normalized=False, custom_update_exp_average=0,
               custom_update_condition=None, custom_update_accumulate_batches=None):
   """
   :type param: theano.SharedVariable
   :type name: str
   :rtype: theano.SharedVariable
   """
   param = super(Layer, self).add_param(param, name)
   if custom_update:
     # Handled in Device and Updater.
     param.custom_update = custom_update
     param.custom_update_normalized = custom_update_normalized
     param.custom_update_exp_average = custom_update_exp_average
     param.custom_update_condition = custom_update_condition
     param.custom_update_accumulate_batches = custom_update_accumulate_batches
   if constraints:
     if 'L1' in self.attrs and self.attrs['L1'] > 0:
       self.constraints += T.constant(self.attrs['L1'], name="L1", dtype='floatX') * abs(param).sum()
     if 'L2' in self.attrs and self.attrs['L2'] > 0:
       self.constraints += T.constant(self.attrs['L2'], name="L2", dtype='floatX') * (param**2).sum()
     if self.attrs.get('L2_eye', 0) > 0:
       L2_eye = T.constant(self.attrs['L2_eye'], name="L2_eye", dtype='floatX')
       if param.ndim == 2:
         eye = tiled_eye(param.shape[0], param.shape[1], dtype=param.dtype)
         self.constraints += L2_eye * ((param - eye)**2).sum()
       else:  # standard L2
         self.constraints += L2_eye * (param**2).sum()
     if 'varreg' in self.attrs and self.attrs['varreg'] > 0:
       self.constraints += self.attrs['varreg'] * (1.0 * T.sqrt(T.var(param)) - 1.0 / numpy.sum(param.get_value().shape))**2
   return param
开发者ID:rwth-i6,项目名称:returnn,代码行数:31,代码来源:NetworkBaseLayer.py


示例13: _normalize_input

 def _normalize_input(self):
     X = T.matrix('X')
     results, updates = theano.scan(
         lambda x_i: (x_i - T.mean(x_i)) / T.sqrt(T.var(x_i) + 10),
         sequences=[X]
     )
     return theano.function(inputs=[X], outputs=results)
开发者ID:bachard,项目名称:2015-DL-practicalcourse,代码行数:7,代码来源:kmeans.py


示例14: decorate

	def decorate(self, layer) :
		if not hasattr(layer, "batchnorm_W") or not hasattr(layer, "batchnorm_b") :
			self.paramShape = layer.getOutputShape()#(layer.nbOutputs, )
			self.WInitialization.initialize(self)
			self.bInitialization.initialize(self)

			layer.batchnorm_W = self.W
			layer.batchnorm_b = self.b

			mu = tt.mean(layer.outputs)
			sigma = tt.sqrt( tt.var(layer.outputs) + self.epsilon )
			layer.outputs = layer.batchnorm_W * ( (layer.outputs - mu) / sigma ) + layer.batchnorm_b

			mu = tt.mean(layer.testOutputs)
			sigma = tt.sqrt( tt.var(layer.testOutputs) + self.epsilon )
			layer.testOutputs = layer.batchnorm_W * ( (layer.testOutputs - mu) / sigma ) + layer.batchnorm_b
开发者ID:JonathanSeguin,项目名称:Mariana,代码行数:16,代码来源:decorators.py


示例15: activations

    def activations(self, dataset):
        prev_activations = self._prev_layer.activations(dataset)

        if prev_activations.ndim == 2:
            # flat dataset: (example, vector)
            mean = T.mean(prev_activations, axis=0)
            variance = T.var(prev_activations, axis=0)
        elif prev_activations.ndim == 3:
            # sequence dataset: (seq num, example, vector)
            mean = T.mean(prev_activations, axis=1).dimshuffle(0,'x',1)
            variance = T.var(prev_activations, axis=1).dimshuffle(0,'x',1)

        normalized = (prev_activations - mean) / T.sqrt(variance + self.EPSILON)
        scaled_and_shifted = (normalized * self._scale) + self._shift

        return scaled_and_shifted
开发者ID:igul222,项目名称:Marmot,代码行数:16,代码来源:batch_normalize.py


示例16: __init__

    def __init__(self,inputData,image_shape):
        self.input=inputData
        num_out=image_shape[1]
        epsilon=0.01
        self.image_shape=image_shape

        gamma_values = numpy.ones((num_out,), dtype=theano.config.floatX)
        self.gamma_vals = theano.shared(value=gamma_values, borrow=True)

        beta_values = numpy.zeros((num_out,), dtype=theano.config.floatX)
        self.beta_vals = theano.shared(value=beta_values, borrow=True)

        batch_mean=T.mean(self.input,keepdims=True,axis=(0,2,3))
        batch_var=T.var(self.input,keepdims=True,axis=(0,2,3))+epsilon

        self.batch_mean=self.adjustVals(batch_mean)
        batch_var=self.adjustVals(batch_var)
        self.batch_var=T.pow(batch_var,0.5)

        batch_normalize=(inputData-self.batch_mean)/(T.pow(self.batch_var,0.5))

        self.beta = self.beta_vals.dimshuffle('x', 0, 'x', 'x')
        self.gamma = self.gamma_vals.dimshuffle('x', 0, 'x', 'x')

        self.output=batch_normalize*self.gamma+self.beta
        #self.output=inputData-self.batch_mean

        self.params=[self.gamma_vals,self.beta_vals]
开发者ID:RedHenLab,项目名称:Gesture,代码行数:28,代码来源:layers.py


示例17: make_consensus

 def make_consensus(self, networks, axis=2):
   cns = self.attrs['consensus']
   if cns == 'max':
     return T.max(networks, axis=axis)
   elif cns == 'min':
     return T.min(networks, axis=axis)
   elif cns == 'mean':
     return T.mean(networks, axis=axis)
   elif cns == 'flat':
     if self.depth == 1:
       return networks
     if axis == 2:
       return networks.flatten(ndim=3)
       #return T.reshape(networks, (networks.shape[0], networks.shape[1], T.prod(networks.shape[2:]) ))
     else:
       return networks.flatten(ndim=2) # T.reshape(networks, (networks.shape[0], T.prod(networks.shape[1:]) ))
   elif cns == 'sum':
     return T.sum(networks, axis=axis, acc_dtype=theano.config.floatX)
   elif cns == 'prod':
     return T.prod(networks, axis=axis)
   elif cns == 'var':
     return T.var(networks, axis=axis)
   elif cns == 'project':
     p = self.add_param(self.create_random_uniform_weights(self.attrs['n_out'], 1, self.attrs['n_out'] + self.depth + 1))
     return T.tensordot(p, networks, [[1], [axis]])
   elif cns == 'random':
     idx = self.rng.random_integers(size=(1,), low=0, high=self.depth)
     if axis == 0: return networks[idx]
     if axis == 1: return networks[:,idx]
     if axis == 2: return networks[:,:,idx]
     if axis == 3: return networks[:,:,:,idx]
     assert False, "axis too large"
   else:
     assert False, "consensus method unknown: " + cns
开发者ID:chagge,项目名称:returnn,代码行数:34,代码来源:NetworkBaseLayer.py


示例18: get_output_for

    def get_output_for(self, input, deterministic=False, **kwargs):
        beta   = self.beta
        gamma  = self.gamma
        means  = self.means
        stdevs = self.stdevs

        output_shape = input.shape

        if input.ndim > 2:
            # if the input has more than two dimensions, flatten it into a
            # batch of feature vectors.
            input = input.flatten(2)

        if deterministic == False:
            m = T.mean(input, axis=0, keepdims=False)
            s = T.sqrt(T.var(input, axis=0, keepdims=False) + self.eta)

            means.default_update = self.alpha * means + (1-self.alpha) * m
            Es = self.alpha * stdevs + (1-self.alpha) * s
            u  = self.batch_size / (self.batch_size - 1)
            stdevs.default_update = u * Es

        else:
            m = means
            s = stdevs

        output = input - m
        output /= s

        # transform normalized outputs based on learned shift and scale
        if self.learn_transform is True:
            output = gamma * output + beta
        output = output.reshape(output_shape)
        return self.nonlinearity(output)
开发者ID:eglxiang,项目名称:xnn,代码行数:34,代码来源:normalization.py


示例19: normalize_samples

    def normalize_samples(self, x, gamma, beta):
        OutputLog().write('Normalizing Samples')
        mean = Tensor.mean(x, axis=1, keepdims=True)
        var = Tensor.var(x, axis=1, keepdims=True)

        normalized_output = (x - mean) / Tensor.sqrt(var + self.epsilon)
        return normalized_output / gamma + beta
开发者ID:aviveise,项目名称:double_encoder,代码行数:7,代码来源:symmetric_hidden_layer.py


示例20: get_output_for

    def get_output_for(self, input, moving_avg_hooks=None,
                       deterministic=False, *args, **kwargs):
            
        reshape = False
        if input.ndim > 2:
            output_shape = input.shape
            reshape = True
            input = input.flatten(2)

        if deterministic is False:
            m  = T.mean(input, axis=0, keepdims=True)
            v = T.sqrt(T.var(input, axis=0, keepdims=True)+self.epsilon)
            m.name = "tensor:mean-" + self.name
            v.name = "tensor:variance-" + self.name

            key = "BatchNormalizationLayer:movingavg"
            if key not in moving_avg_hooks:
#                moving_avg_hooks[key] = {}
                moving_avg_hooks[key] = []
#            moving_avg_hooks[key][self.name] = [[m,v], [self.mean_inference, self.variance_inference]]
            moving_avg_hooks[key].append([[m,v], [self.mean_inference, self.variance_inference]])
        else:
            m = self.mean_inference
            v = self.variance_inference

        input_hat = (input - m) / v            # normalize
        y = self.gamma*input_hat + self.beta        # scale and shift

        if reshape:#input.ndim > 2:
            y = T.reshape(y, output_shape)
        return self.nonlinearity(y)
开发者ID:DediGadot,项目名称:PatchBatch,代码行数:31,代码来源:learnedactivations.py



注:本文中的theano.tensor.var函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.vector函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.unbroadcast函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap