• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.max函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.max函数的典型用法代码示例。如果您正苦于以下问题:Python max函数的具体用法?Python max怎么用?Python max使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了max函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: norm

def norm(x,ord):
    x = as_tensor_variable(x)
    ndim = x.ndim
    if ndim == 0:
        raise ValueError("'axis' entry is out of bounds.")
    elif ndim == 1:
        if ord == None:
            return tensor.sum(x**2)**0.5
        elif ord == 'inf':
            return tensor.max(abs(x))
        elif ord == '-inf':
            return tensor.min(abs(x))
        elif ord == 0:
            return x[x.nonzero()].shape[0]
        else:
            try:
                z = tensor.sum(abs(x**ord))**(1./ord)
            except TypeError:
                raise ValueError("Invalid norm order for vectors.")
            return z
    elif ndim == 2:
        if ord == None or ord == 'fro':
            return tensor.sum(abs(x**2))**(0.5)
        elif ord == 'inf':
            return tensor.max(tensor.sum(abs(x), 1))
        elif ord == '-inf':
            return tensor.min(tensor.sum(abs(x), 1))
        elif ord == 1:
            return tensor.max(tensor.sum(abs(x), 0))
        elif ord == -1:
            return tensor.min(tensor.sum(abs(x),0))
        else:
            raise ValueError(0)
    elif ndim > 2:
        raise NotImplementedError("We don't support norm witn ndim > 2")
开发者ID:MLevinson-OR,项目名称:Theano,代码行数:35,代码来源:ops.py


示例2: plotUpdate

    def plotUpdate(self,updates):
        '''
        >>>get update info of each layer
        >>>type updates: dict
        >>>para updates: update dictionary
        '''
        maxdict=T.zeros(shape=(self.deep*2+1,))
        mindict=T.zeros(shape=(self.deep*2+1,))
        meandict=T.zeros(shape=(self.deep*2+1,))
        
        for i in xrange(self.deep):
            updw=updates[self.layers[i].w]-self.layers[i].w
            maxdict=T.set_subtensor(maxdict[2*i],T.max(updw))
            mindict=T.set_subtensor(mindict[2*i],T.min(updw))
            meandict=T.set_subtensor(meandict[2*i],T.mean(updw))
            updb=updates[self.layers[i].b]-self.layers[i].b
            maxdict=T.set_subtensor(maxdict[2*i+1],T.max(updb))
            mindict=T.set_subtensor(mindict[2*i+1],T.min(updb))
            meandict=T.set_subtensor(meandict[2*i+1],T.mean(updb))

        updw=updates[self.classifier.w]-self.classifier.w
        maxdict=T.set_subtensor(maxdict[self.deep*2],T.max(updw))
        mindict=T.set_subtensor(mindict[self.deep*2],T.min(updw))
        meandict=T.set_subtensor(meandict[self.deep*2],T.mean(updw))
        return [maxdict,mindict,meandict]
开发者ID:wolfhu,项目名称:RCNNSentence,代码行数:25,代码来源:dcnnModel.py


示例3: test_max

 def test_max(self):
     # If we call max directly, we will return an CAReduce object
     # and he don't have R_op implemented!
     # self.check_mat_rop_lop(tensor.max(self.mx, axis=[0,1])[0],
     #                       ())
     self.check_mat_rop_lop(tensor.max(self.mx, axis=0), (self.mat_in_shape[1],))
     self.check_mat_rop_lop(tensor.max(self.mx, axis=1), (self.mat_in_shape[0],))
开发者ID:cfsmile,项目名称:Theano,代码行数:7,代码来源:test_rop.py


示例4: _activation

    def _activation(self, Y, L, M, W):
        """Returns the activation for a given input.

        Derived from the generative model formulation of hierarchical
        Poisson mixtures, the formular for the activation in the network
        reads as follows:
        I_c =
         \sum_d \log(W_{cd})y_d + \log(M_{lc})        for labeled data
         \sum_d \log(W_{cd})y_d + \log(\sum_k M_{kc}) for unlabeled data
        s_c = softmax(I_c)
        """
        # first: complete inference to find label
        # Input integration:
        I = T.tensordot(Y,T.log(W),axes=[1,1])
        # recurrent term:
        vM = M[L]
        L_index = T.eq(L,-1).nonzero()
        vM = T.set_subtensor(vM[L_index], T.sum(M, axis=0))
        # numeric trick to prevent overflow in the exp-function
        max_exponent = 86. - T.ceil(T.log(I.shape[1].astype('float32')))
        scale = T.switch(
            T.gt(T.max(I, axis=1, keepdims=True), max_exponent),
            T.max(I, axis=1, keepdims=True) - max_exponent,
            0.)
        # numeric approximation to prevent underflow in the exp-function:
        # map too low values of I to a fixed minimum value
        min_exponent = -87. + T.ceil(T.log(I.shape[1].astype('float32')))
        I = T.switch(
            T.lt(I-scale, min_exponent),
            scale+min_exponent,
            I)
        # activation: recurrent softmax with overflow protection
        s = vM*T.exp(I-scale)/T.sum(vM*T.exp(I-scale), axis=1, keepdims=True)
        return s
开发者ID:smajida,项目名称:NeSi,代码行数:34,代码来源:poisson_theano_scan.py


示例5: predict

 def predict(self, new_data, batch_size, pool_size):
     """
     predict for new data
     """
     img_shape = (batch_size, 1, self.image_shape[2], self.image_shape[3])
     conv_out = conv.conv2d(input=new_data, filters=self.W, filter_shape=self.filter_shape, image_shape=img_shape)
     pool_list = []
     if self.non_linear == "tanh":
         conv_out_tanh = T.tanh(conv_out + self.b.dimshuffle("x", 0, "x", "x"))
         # pad_len = int(self.max_window_len/2)
         # right_pad_len = int(self.filter_shape[2]/2)
         # index_shift = pad_len-right_pad_len
         index_shift = int(self.filter_shape[2] / 2)
         for i in xrange(batch_size):
             # partition sentence via pool size
             e1pos = pool_size[i, 0] + index_shift
             e2pos = pool_size[i, 1] + index_shift
             # if T.gt(e1pos, 0):
             #     p1 = conv_out_tanh[i, :, :e1pos, :]
             # else:
             #     p1 = conv_out_tanh[i, :, 0, :]
             p1 = conv_out_tanh[i, :, :e1pos, :]
             p2 = conv_out_tanh[i, :, e1pos:e2pos, :]
             p3 = conv_out_tanh[i, :, e2pos:, :]
             p1_pool_out = T.max(p1, axis=1)
             p2_pool_out = T.max(p2, axis=1)
             p3_pool_out = T.max(p3, axis=1)
             temp = T.concatenate([p1_pool_out, p2_pool_out, p3_pool_out], axis=1)
             pool_list.append(temp.dimshuffle("x", 0, 1))
     else:
         pass
     output = T.concatenate(pool_list, axis=0)
     return output
开发者ID:Xls1994,项目名称:DeepLearning,代码行数:33,代码来源:convLayer.py


示例6: define_network

    def define_network(self, layers_info=None):
        """
        Builds Theano graph of the network.
        """
        self.hidden_layers = [None]*self.n_hidden.size

        self.params = []
        for i, h in enumerate(self.n_hidden):
            if i == 0:
                self.hidden_layers[i] = LBNHiddenLayer(self.rng, self.trng, self.x, self.n_in,
                                        h, self.det_activation[i],
                                        self.stoch_n_hidden, self.stoch_activation,
                                        det_activation_name=self.det_activation_names[i],
                                        stoch_activation_names=self.stoch_activation_names,
                                        m=self.m,
                                        det_W=None if layers_info is None else
                                        np.array(
                                        layers_info['hidden_layers'][i]['LBNlayer']['detLayer']\
                                                                                            ['W']),
                                        det_b=None if layers_info is None else
                                        np.array(layers_info['hidden_layers'][i]\
                                                                    ['LBNlayer']['detLayer']['b']),
                                        stoch_mlp_info=None if layers_info is None else
                                        layers_info['hidden_layers'][i]['LBNlayer']['stochLayer'])
            else:
                self.hidden_layers[i] = LBNHiddenLayer(self.rng, self.trng,
                                        self.hidden_layers[i-1].output,
                                        self.n_hidden[i-1], h, self.det_activation[i],
                                        self.stoch_n_hidden, self.stoch_activation,
                                        det_activation_name=self.det_activation_names[i],
                                        stoch_activation_names=self.stoch_activation_names, 
                                        det_W=None if layers_info is None else
                                        np.array(layers_info['hidden_layers'][i]['LBNlayer']\
                                                                                ['detLayer']['W']),
                                        det_b=None if layers_info is None else
                                        np.array(layers_info['hidden_layers'][i]['LBNlayer']\
                                                                                ['detLayer']['b']),
                                        stoch_mlp_info=None if layers_info is None else
                                        layers_info['hidden_layers'][i]['LBNlayer']['stochLayer'])

            self.params.append(self.hidden_layers[i].params)

        self.output_layer = OutputLayer(self.rng, self.hidden_layers[-1].output, self.n_hidden[-1], 
                                                            self.n_out, self.det_activation[-1],
                                                            self.det_activation_names[-1],
                                                            V_values=None 
                                                            if layers_info is None else np.array(
                                                            layers_info['output_layer']['W']))

        self.params.append(self.output_layer.params)
        self.output = self.output_layer.output
        exp_value = -0.5*T.sum((self.output - self.y.dimshuffle('x',0,1))**2, axis=2)
        max_exp_value = theano.ifelse.ifelse(T.lt(T.max(exp_value), -1*T.min(exp_value)),
                                                                T.max(exp_value), T.min(exp_value))
 
        self.log_likelihood = T.sum(T.log(T.sum(T.exp(exp_value - max_exp_value), axis=0)) +
                                                                                    max_exp_value)-\
                                self.y.shape[0]*(T.log(self.m)+self.y.shape[1]/2.*T.log(2*np.pi))

        self.predict = theano.function(inputs=[self.x, self.m], outputs=self.output)
开发者ID:pabaldonedo,项目名称:stochastic_fnn,代码行数:60,代码来源:lbn.py


示例7: filterbank_matrices

    def filterbank_matrices(self, center_y, center_x, delta, sigma):
        """
        Create a Fy and a Fx

        Parameters
        ----------
        center_y : T.vector (shape: batch_size)
        center_x : T.vector (shape: batch_size)
            Y and X center coordinates for the attention window
        delta : T.vector (shape: batch_size)
        sigma : T.vector (shape: batch_size)

        Returns
        -------
            FY, FX
        """
        tol = 1e-4
        # construct x and y coordinates for the grid points
        obj_x = center_x.dimshuffle(0, 'x') + \
                (delta.dimshuffle(0, 'x') * self.obj_x)
        obj_y = center_y.dimshuffle(0, 'x') + \
                (delta.dimshuffle(0, 'x') * self.obj_y)

        # construct unnormalized attention weights for each grid point
        FX = T.exp( -(self.img_x - obj_x.dimshuffle(0,1,'x'))**2. / \
                   (2. * sigma.dimshuffle(0,'x','x')**2.) )
        FY = T.exp( -(self.img_y - obj_y.dimshuffle([0,1,'x']))**2. / \
                   (2. * sigma.dimshuffle(0,'x','x')**2.) )

        # normalize the attention weights
        #FX = FX / (FX.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
        #FY = FY / (FY.sum(axis=-1).dimshuffle(0, 1, 'x') + tol)
        FX = FX / (T.max(FX.sum(axis=-1)) + tol)
        FY = FY / (T.max(FY.sum(axis=-1)) + tol)
        return FY, FX
开发者ID:Philip-Bachman,项目名称:Sequential-Generation,代码行数:35,代码来源:MotionRenderers.py


示例8: pool_function

    def pool_function(input, axis):

        input_shape = tuple(input.shape)
        num_feature_maps_out = input_shape[axis - 1]
        pool_size = input_shape[axis]

        pool_shape = (input_shape[:axis] + (num_in_sum,
                                            num_in_max) + input_shape[axis + 1:])
        # print("make_ghh_pool_conv2d: pool_shape is {}".format(pool_shape))
        input_reshaped = input.reshape(pool_shape)

        # raise NotImplementedError('TODO: use a soft max instead of T.max')
        # res_after_max = T.max(input_reshaped,axis=axis+1)

        # Soft max with strength of max_strength
        res_after_max = np.cast[floatX](1.0) / np.cast[floatX](max_strength) \
            * T.log(T.mean(T.exp(max_strength * (input_reshaped - T.max(input_reshaped, axis=axis + 1, keepdims=True))), axis=axis + 1)) \
            + T.max(input_reshaped, axis=axis + 1)

        # Get deltas
        delta = np.cast[floatX](1.0) - np.cast[floatX](2.0) * \
            (T.arange(num_in_sum, dtype=floatX) % np.cast[floatX](2))
        target_dimshuffle = ('x',) * axis + (0,) + ('x',) * \
            (len(input_shape) - 1 - axis)
        # print("make_ghh_pool_conv2d: target_dimshuffle is {}".format(target_dimshuffle))
        delta = delta.flatten().dimshuffle(*target_dimshuffle)

        res_after_sum = T.sum(res_after_max * delta, axis=axis)

        return res_after_sum
开发者ID:cvlab-epfl,项目名称:LIFT,代码行数:30,代码来源:ghh_pool.py


示例9: forward_init

    def forward_init(self):
        obs_ = self.obs_.reshape([self.obs_.shape[0]*self.obs_.shape[1], self.obs_.shape[-1]])

        h = eval(self.activ)(tensor.dot(obs_, self.params['W']) + self.params['b'][None,None,:])

        self.pi = []
        for oi in xrange(self.n_out):
            pi = tensor.dot(h, self.params['U%d'%oi]) + self.params['c%d'%oi][None,:]
            pi = tensor.exp(pi - tensor.max(pi,-1,keepdims=True))
            self.pi.append(pi / (pi.sum(-1, keepdims=True)))

        prev = tensor.matrix('prev', dtype='float32')
        #obs = tensor.matrix('obs', dtype='float32')
        obs_ = self.obs_.reshape([self.obs_.shape[0]*self.obs_.shape[1], 
                                  self.obs_.shape[-1]])
        obs_ = obs_[0]

        self.h_init = lambda x: numpy.float32(0.)

        h = eval(self.activ)(tensor.dot(obs_, self.params['W']) + self.params['b'][None,:])

        pi = []
        for oi in xrange(self.n_out):
            pi_ = tensor.dot(h, self.params['U%d'%oi]) + self.params['c%d'%oi][None,:]
            pi_ = tensor.exp(pi_ - tensor.max(pi_,-1,keepdims=True))
            pi.append(pi_ / (pi_.sum(-1, keepdims=True)))

        self.forward = theano.function([self.obs, prev], [h] + pi, name='forward', on_unused_input='ignore')
开发者ID:kyunghyuncho,项目名称:gym,代码行数:28,代码来源:policy_ff.py


示例10: maxout

            def maxout(z = None):
                #g = theano.shared(numpy.zeros((hidden_layers_sizes[i],)),name='g',borrow=True)
                g = T.max(z[0:5])
                g = T.stack(g,T.max(z[5:10]))
                for index in xrange(hidden_layers_sizes[i]-10):
                    g = T.concatenate([g,[T.max(z[5*(index+2):5*(index+3)])]])
		return g
开发者ID:TarvosEpilepsy,项目名称:StockPredict,代码行数:7,代码来源:DBN.py


示例11: compile_gpu_func

def compile_gpu_func(nan_is_error, inf_is_error, big_is_error):
    """ compile utility function used by contains_nan and contains_inf
    """
    global f_gpumin, f_gpumax, f_gpuabsmax
    if not cuda.cuda_available:
        return
    guard_input = cuda.fvector("nan_guard")
    cuda_compile_failed = False
    if (nan_is_error or inf_is_error) and f_gpumin is None:
        try:
            f_gpumin = theano.function([guard_input], T.min(guard_input), mode="FAST_RUN")
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if inf_is_error and not cuda_compile_failed and f_gpumax is None:
        try:
            f_gpumax = theano.function([guard_input], T.max(guard_input), mode="FAST_RUN")
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
    if big_is_error and not cuda_compile_failed and f_gpuabsmax is None:
        try:
            f_gpuabsmax = theano.function([guard_input], T.max(T.abs_(guard_input)), mode="FAST_RUN")
        except RuntimeError:
            # This can happen if cuda is available, but the
            # device is in exclusive mode and used by another
            # process.
            cuda_compile_failed = True
开发者ID:ChinaQuants,项目名称:Theano,代码行数:32,代码来源:nanguardmode.py


示例12: __theano__softmax

    def __theano__softmax(self, inp, dim=None, predict=False, issequence=False):

        if dim is None:
            assert issequence, "Data dimensionality could not be parsed."
            dim = 2

        # FFD for dimensions 1 and 2
        if dim == 1 or dim == 2:
            # Using the numerically stable implementation (along the channel axis):
            ex = T.exp(inp - T.max(inp, axis=1, keepdims=True))
            y = ex / T.sum(ex, axis=1, keepdims=True)

            # One hot encoding for prediction
            if predict:
                y = T.argmax(y, axis=1)

        elif dim == 3:
            # Stable implementation again, this time along axis = 2 (channel axis)
            ex = T.exp(inp - T.max(inp, axis=2, keepdims=True))
            y = ex / T.sum(ex, axis=2, keepdims=True)

            # One hot encoding for prediction
            if predict:
                y = T.argmax(y, axis=2)

        else:
            raise NotImplementedError("Softmax is implemented in 2D, 3D and 1D.")

        return y
开发者ID:abailoni,项目名称:greedy_CNN,代码行数:29,代码来源:backend.py


示例13: test_optimization_max

    def test_optimization_max(self):
        data = numpy.asarray(numpy.random.rand(2,3),dtype=config.floatX)
        n = tensor.matrix()

        f = function([n],tensor.max(n,0), mode=self.mode)
        topo = f.maker.env.toposort()
        assert len(topo)==1
        assert isinstance(topo[0].op,CAReduce)
        f(data)


        f = function([n],tensor.max(-n,0), mode=self.mode)
        topo = f.maker.env.toposort()
        assert len(topo)==2
        assert isinstance(topo[0].op, Elemwise)
        assert isinstance(topo[0].op.scalar_op, scalar.Neg)
        assert isinstance(topo[1].op,CAReduce)
        f(data)

        f = function([n],-tensor.max(n,0), mode=self.mode)
        topo = f.maker.env.toposort()
        assert len(topo)==2
        assert isinstance(topo[0].op,CAReduce)
        assert isinstance(topo[1].op, Elemwise)
        assert isinstance(topo[1].op.scalar_op, scalar.Neg)
        f(data)

        f = function([n],-tensor.max(-n,0), mode=self.mode)
        topo = f.maker.env.toposort()
        assert len(topo)==1
        assert isinstance(topo[0].op,CAReduce)#min
        f(data)
开发者ID:glorotxa,项目名称:Theano,代码行数:32,代码来源:test_opt_uncanonicalize.py


示例14: _test_layer_stats

 def _test_layer_stats(self, layer_output):
     """
     DESCRIPTION:
         This method is called every batch whereby the examples from test or valid set 
         is pass through, the final result will be the mean of all the results from all 
         the batches in an epoch from the test set or valid set.
     PARAM:
         layer_output: the output from the layer
     RETURN:
         A list of tuples of [('name_a', var_a), ('name_b', var_b)] whereby var is scalar 
     """
     
     w_len = T.sqrt((self.W ** 2).sum(axis=0))
     max_length = T.max(w_len)
     mean_length = T.mean(w_len)
     min_length = T.min(w_len)
     
     return [('max_col_length', max_length),
             ('mean_col_length', mean_length),
             ('min_col_length', min_length), 
             ('output_max', T.max(layer_output)),
             ('output_mean', T.mean(layer_output)), 
             ('output_min', T.min(layer_output)),
             ('max_W', T.max(self.W)),
             ('mean_W', T.mean(self.W)),
             ('min_W', T.min(self.W)),
             ('max_b', T.max(self.b)),
             ('mean_b', T.mean(self.b)),
             ('min_b', T.min(self.b))]
开发者ID:hycis,项目名称:smartNN,代码行数:29,代码来源:layer.py


示例15: update_log_p

def update_log_p(skip_idxs,zeros,active,log_p_curr,log_p_prev):
    active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
    active_next = T.cast(T.minimum(
        T.maximum(
            active + 1,
            T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
        ),
        log_p_curr.shape[0]
    ), 'int32')

    common_factor = T.max(log_p_prev[:active])
    p_prev = T.exp(log_p_prev[:active] - common_factor)
    _p_prev = zeros[:active_next]
    # copy over
    _p_prev = T.set_subtensor(_p_prev[:active], p_prev)
    # previous transitions
    _p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
    # skip transitions
    _p_prev = T.inc_subtensor(
        _p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
    updated_log_p_prev = T.log(_p_prev) + common_factor

    log_p_next = T.set_subtensor(
        zeros[:active_next],
        log_p_curr[:active_next] + updated_log_p_prev
    )
    return active_next, log_p_next
开发者ID:SigmaQuan,项目名称:theano-ctc,代码行数:27,代码来源:ctc.py


示例16: decoder

def decoder(localt, stm1, cstm1, hmat,
            Wbeta, Ubeta, vbeta,
            Wzide, Wzfde, Wzcde, Wzode,
            Ede, Wxide, Wside, bide, Wxfde, Wsfde, bfde, 
            Wxcde, Wscde, bcde, Wxode, Wsode, bode,
            L0, Ls, Lz):
    xt = theano.dot(localt, Ede)
    # get z from hmat (sentlen * nen), stm1
    beta = \
    theano.dot( act( theano.dot(hmat,Ubeta) + theano.dot(stm1,Wbeta) ) , vbeta )
    alpha = T.exp(beta-T.max(beta)) / T.sum(T.exp(beta-T.max(beta)) )
    zt = theano.dot(alpha, hmat)
    #
    it = sigma(theano.dot(xt,Wxide) + theano.dot(stm1,Wside) + theano.dot(zt,Wzide) + bide )
    ft = sigma(theano.dot(xt,Wxfde) + theano.dot(stm1,Wsfde) + theano.dot(zt,Wzfde) + bfde )
    cst = ft * cstm1 + it*act(theano.dot(xt,Wxcde)+theano.dot(stm1,Wscde)+ theano.dot(zt,Wzcde) +bcde )
    ot = sigma(theano.dot(xt,Wxode) + theano.dot(stm1,Wsode) + theano.dot(zt,Wzode) +bode )
    st = ot * act(cst)
    #
    winst = getwins()
    stfory = st * winst
    #
    yt0 = T.dot( (xt + T.dot(stfory, Ls) + T.dot(zt, Lz) ) , L0)
    #yt0 = theano.dot(st,Wsyde)
    yt0max = T.max(yt0)
    #yt0maxvec = T.maximum(yt0, yt0max)
    yt = T.exp(yt0-yt0max) / T.sum(T.exp(yt0-yt0max))
    logyt = yt0-yt0max-T.log(T.sum(T.exp(yt0-yt0max)))
    #yt = T.exp(yt0-yt0maxvec) / T.sum(T.exp(yt0-yt0maxvec))
    #logyt = yt0-yt0maxvec-T.log(T.sum(T.exp(yt0-yt0maxvec)))
#    yt = T.concatenate([addzero,tempyt],axis=0)
    return st, cst, yt, logyt
开发者ID:lizuyao2010,项目名称:lstm_theano,代码行数:32,代码来源:walkmanweightdecay.py


示例17: init_opt

    def init_opt(self):
        is_recurrent = int(self.policy.recurrent)

        obs_var = self.env.observation_space.new_tensor_variable(
            'obs',
            extra_dims=1 + is_recurrent,
        )
        action_var = self.env.action_space.new_tensor_variable(
            'action',
            extra_dims=1 + is_recurrent,
        )
        advantage_var = ext.new_tensor(
            'advantage',
            ndim=1 + is_recurrent,
            dtype=theano.config.floatX
        )
        dist = self.policy.distribution
        old_dist_info_vars = {
            k: ext.new_tensor(
                'old_%s' % k,
                ndim=2 + is_recurrent,
                dtype=theano.config.floatX
            ) for k in dist.dist_info_keys
            }
        old_dist_info_vars_list = [old_dist_info_vars[k] for k in dist.dist_info_keys]

        if is_recurrent:
            valid_var = TT.matrix('valid')
        else:
            valid_var = None

        dist_info_vars = self.policy.dist_info_sym(obs_var, action_var)
        logli = dist.log_likelihood_sym(action_var, dist_info_vars)
        kl = dist.kl_sym(old_dist_info_vars, dist_info_vars)

        # formulate as a minimization problem
        # The gradient of the surrogate objective is the policy gradient
        if is_recurrent:
            surr_obj = - TT.sum(logli * advantage_var * valid_var) / TT.sum(valid_var)
            mean_kl = TT.sum(kl * valid_var) / TT.sum(valid_var)
            max_kl = TT.max(kl * valid_var)
        else:
            surr_obj = - TT.mean(logli * advantage_var)
            mean_kl = TT.mean(kl)
            max_kl = TT.max(kl)

        input_list = [obs_var, action_var, advantage_var]
        if is_recurrent:
            input_list.append(valid_var)

        self.optimizer.update_opt(surr_obj, target=self.policy, inputs=input_list)

        f_kl = ext.compile_function(
            inputs=input_list + old_dist_info_vars_list,
            outputs=[mean_kl, max_kl],
        )
        self.opt_info = dict(
            f_kl=f_kl,
        )
开发者ID:jpdoyle,项目名称:vime,代码行数:59,代码来源:vpg_expl.py


示例18: Max_pooling

def Max_pooling(inp):
	"""
	Finding max across rows; inp is a 2D matrix
	"""
	if inp.ndim==1:
		return T.max(inp)
	else:
		return T.max(inp,axis=0)
开发者ID:singhalprerana,项目名称:Nucleus,代码行数:8,代码来源:neural_net_classes.py


示例19: rmax

def rmax(x):

	xmax  = T.ge(x, T.max(x, axis = 1).reshape((x.shape[0],1)))
	shift = (T.ones_like(x) - xmax) * x
	max2  = T.max(shift,axis = 1).reshape((x.shape[0],1))
	out = T.nnet.relu(x - max2)

	return out
开发者ID:hongyuanzhu,项目名称:keras,代码行数:8,代码来源:activations.py


示例20: __call__

 def __call__(self, x):
     if x.ndim == 2:
         x = T.max([x[:, n::self.n_pool] for n in range(self.n_pool)], axis=0)
     elif x.ndim == 4:
         x = T.max([x[:, n::self.n_pool, :, :] for n in range(self.n_pool)], axis=0)
     elif x.ndim == 3:
         x = T.max([x[:, :, n::self.n_pool] for n in range(self.n_pool)], axis=0)
     return x
开发者ID:IndicoDataSolutions,项目名称:Foxhound,代码行数:8,代码来源:activations.py



注:本文中的theano.tensor.max函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.maximum函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.matrix函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap