• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.shape函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.shape函数的典型用法代码示例。如果您正苦于以下问题:Python shape函数的具体用法?Python shape怎么用?Python shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了shape函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: get_sensi_speci

def get_sensi_speci(y_hat, y):
    # y_hat = T.concatenate(T.sum(input=y_hat[:, 0:2], axis=1), T.sum(input=y_hat[:, 2:], axis=1))
    y_hat = T.stacklists([y_hat[:, 0] + y_hat[:, 1], y_hat[:, 2] + y_hat[:, 3] + y_hat[:, 4]]).T
    y_hat = T.argmax(y_hat)

    tag = 10 * y_hat + y
    tneg = T.cast((T.shape(tag[(T.eq(tag, 0.)).nonzero()]))[0], config.floatX)
    fneg = T.cast((T.shape(tag[(T.eq(tag, 1.)).nonzero()]))[0], config.floatX)
    fpos = T.cast((T.shape(tag[(T.eq(tag, 10.)).nonzero()]))[0], config.floatX)
    tpos = T.cast((T.shape(tag[(T.eq(tag, 11.)).nonzero()]))[0], config.floatX)



    # assert fneg + fneg + fpos + tpos == 1380
    # tneg.astype(config.floatX)
    # fneg.astype(config.floatX)
    # fpos.astype(config.floatX)
    # tpos.astype(config.floatX)

    speci = ifelse(T.eq((tneg + fpos), 0), np.float64(float('inf')), tneg / (tneg + fpos))
    sensi = ifelse(T.eq((tpos + fneg), 0), np.float64(float('inf')), tpos / (tpos + fneg))

    # keng die!!!
    # if T.eq((tneg + fpos), 0):
    #     speci = float('inf')
    # else:
    #     speci = tneg // (tneg + fpos)
    # if T.eq((tpos + fneg), 0.):
    #     sensi = float('inf')
    # else:
    #     sensi = tpos // (tpos + fneg)

    # speci.astype(config.floatX)
    # sensi.astype(config.floatX)
    return [sensi, speci]
开发者ID:jackal092927,项目名称:pylearn2_med,代码行数:35,代码来源:test0.py


示例2: compileActivation

    def compileActivation(self, net, layerNum):
        variable = net.x if layerNum == 0 else net.varArrayA[layerNum - 1]

        #Calc shapes for reshape function on-the-fly. Assume we have square images as input.
        sX = T.cast(T.sqrt(T.shape(variable)[0] / self.kernel_shape[1]), 'int16')

        #Converts input from 2 to 4 dimensions
        Xr = T.reshape(variable.T, (T.shape(variable)[1], self.kernel_shape[1], sX, sX))

        if self.optimized:
            out_size = T.cast(
                T.ceil((T.shape(Xr)[-1] - T.shape(net.varWeights[layerNum]['w'])[-1] + 1) / np.float32(self.stride)),
                'int32')

            conv_op = FilterActs(stride=self.stride)
            input_shuffled = Xr.dimshuffle(1, 2, 3, 0)  # bc01 to c01b
            filters_shuffled = net.varWeights[layerNum]['w'].dimshuffle(1, 2, 3, 0)  # bc01 to c01b
            filters_flipped = filters_shuffled[:, ::-1, ::-1, :] # flip rows and columns
            contiguous_input = gpu_contiguous(input_shuffled)
            contiguous_filters = gpu_contiguous(filters_flipped *
                                                (net.dropOutVectors[layerNum].dimshuffle('x', 0, 1, 'x') if self.dropout else 1.0))
            a = conv_op(contiguous_input, contiguous_filters)
            a = a[:, :out_size, :out_size, :]
            #Add bias
            a = a + net.varWeights[layerNum]['b'].dimshuffle(0, 'x', 'x', 'x')
        else:
            a = T.nnet.conv2d(Xr, net.varWeights[layerNum]['w'] *
                              (net.dropOutVectors[layerNum].dimshuffle('x', 'x', 0, 1) if self.dropout else 1.0),
                              border_mode='valid',
                              subsample=(self.stride, self.stride))
            #Add bias
            a = a + net.varWeights[layerNum]['b'].dimshuffle('x', 0, 'x', 'x')

        if self.pooling:
            if self.optimized:
                #Pooling
                # ds - side of square pool window
                # stride - Defines the stride size between successive pooling squares.
                # Setting this parameter smaller than sizeX produces overlapping pools.
                # Setting it equal to sizeX gives the usual, non-overlapping pools. Values greater than sizeX are not allowed.
                pool_op = MaxPool(ds=self.pooling_shape, stride=self.pooling_shape)

                contiguous_input = gpu_contiguous(a)
                a = pool_op(contiguous_input)
                a = a.dimshuffle(3, 0, 1, 2)       # c01b to bc01
            else:
                #a = downsample.max_pool_2d(a, (self.pooling_shape, self.pooling_shape), ignore_border=False)
                a = pool.max_pool2D(a, (self.pooling_shape, self.pooling_shape), ignore_border=False)
        else:
            if self.optimized:
                a = a.dimshuffle(3, 0, 1, 2)       # c01b to bc01

        a = T.flatten(a, outdim=2).T

        #Sigmoid
        a = self.activation(a, self.pool_size)

        net.varArrayA.append(a)
开发者ID:SoftServeSAG,项目名称:DRL,代码行数:58,代码来源:fTheanoNNclassCORE.py


示例3: infer_shape

 def infer_shape(self, node, in_shapes):
   data_shape = T.shape(node.inputs[0])
   rois_shape = T.shape(node.inputs[1])
   batch_size = rois_shape[0]
   num_maps = data_shape[1]
   h = self.pooled_h
   w = self.pooled_w
   out_shape = [batch_size, num_maps, h, w]
   return [out_shape, out_shape]
开发者ID:hongyuanzhu,项目名称:theano-roi-pooling,代码行数:9,代码来源:roi_pooling.py


示例4: __init__

 def __init__(self, p, *args, **kwargs):
     super(Categorical, self).__init__(*args, **kwargs)
     try:
         self.k = tt.shape(p)[-1].tag.test_value
     except AttributeError:
         self.k = tt.shape(p)[-1]
     self.p = p = tt.as_tensor_variable(p)
     self.p = (p.T / tt.sum(p, -1)).T
     self.mode = tt.argmax(p)
开发者ID:bballamudi,项目名称:pymc3,代码行数:9,代码来源:discrete.py


示例5: activation

    def activation(self,z):
        
        y = T.reshape(z,(T.shape(z)[0], self.n_units//self.n_pieces, self.n_pieces))

        y = T.max(y,axis=2)
        
        y = T.reshape(y,(T.shape(z)[0],self.n_units//self.n_pieces))

        return y
开发者ID:subercui,项目名称:BinaryConnect,代码行数:9,代码来源:layer.py


示例6: get_train

def get_train(U_Ot, U_R, lenW, n_facts):
    def phi_x1(x_t, L):
        return T.concatenate([L[x_t].reshape((-1,)), zeros((2*lenW,)), zeros((3,))], axis=0)
    def phi_x2(x_t, L):
        return T.concatenate([zeros((lenW,)), L[x_t].reshape((-1,)), zeros((lenW,)), zeros((3,))], axis=0)
    def phi_y(x_t, L):
        return T.concatenate([zeros((2*lenW,)), L[x_t].reshape((-1,)), zeros((3,))], axis=0)
    def phi_t(x_t, y_t, yp_t, L):
        return T.concatenate([zeros(3*lenW,), T.stack(T.switch(T.lt(x_t,y_t), 1, 0), T.switch(T.lt(x_t,yp_t), 1, 0), T.switch(T.lt(y_t,yp_t), 1, 0))], axis=0)
    def s_Ot(xs, y_t, yp_t, L):
        result, updates = theano.scan(
            lambda x_t, t: T.dot(T.dot(T.switch(T.eq(t, 0), phi_x1(x_t, L).reshape((1,-1)), phi_x2(x_t, L).reshape((1,-1))), U_Ot.T),
                           T.dot(U_Ot, (phi_y(y_t, L) - phi_y(yp_t, L) + phi_t(x_t, y_t, yp_t, L)))),
            sequences=[xs, T.arange(T.shape(xs)[0])])
        return result.sum()
    def sR(xs, y_t, L, V):
        result, updates = theano.scan(
            lambda x_t, t: T.dot(T.dot(T.switch(T.eq(t, 0), phi_x1(x_t, L).reshape((1,-1)), phi_x2(x_t, L).reshape((1,-1))), U_R.T),
                                 T.dot(U_R, phi_y(y_t, V))),
            sequences=[xs, T.arange(T.shape(xs)[0])])
        return result.sum()

    x_t = T.iscalar('x_t')
    m = [x_t] + [T.iscalar('m_o%d' % i) for i in xrange(n_facts)]
    f = [T.iscalar('f%d_t' % i) for i in xrange(n_facts)]
    r_t = T.iscalar('r_t')
    gamma = T.scalar('gamma')
    L = T.fmatrix('L') # list of messages
    V = T.fmatrix('V') # vocab
    r_args = T.stack(*m)

    cost_arr = [0] * 2 * (len(m)-1)
    updates_arr = [0] * 2 * (len(m)-1)
    for i in xrange(len(m)-1):
        cost_arr[2*i], updates_arr[2*i] = theano.scan(
                lambda f_bar, t: T.switch(T.or_(T.eq(t, f[i]), T.eq(t, T.shape(L)-1)), 0, T.largest(gamma - s_Ot(T.stack(*m[:i+1]), f[i], t, L), 0)),
            sequences=[L, T.arange(T.shape(L)[0])])
        cost_arr[2*i+1], updates_arr[2*i+1] = theano.scan(
                lambda f_bar, t: T.switch(T.or_(T.eq(t, f[i]), T.eq(t, T.shape(L)-1)), 0, T.largest(gamma + s_Ot(T.stack(*m[:i+1]), t, f[i], L), 0)),
            sequences=[L, T.arange(T.shape(L)[0])])

    cost1, u1 = theano.scan(
        lambda r_bar, t: T.switch(T.eq(r_t, t), 0, T.largest(gamma - sR(r_args, r_t, L, V) + sR(r_args, t, L, V), 0)),
        sequences=[V, T.arange(T.shape(V)[0])])

    cost = cost1.sum()
    for c in cost_arr:
        cost += c.sum()

    g_uo, g_ur = T.grad(cost, [U_Ot, U_R])

    train = theano.function(
        inputs=[r_t, gamma, L, V] + m + f,
        outputs=[cost],
        updates=[(U_Ot, U_Ot-alpha*g_uo), (U_R, U_R-alpha*g_ur)])
    return train
开发者ID:amiltonwong,项目名称:memnn,代码行数:56,代码来源:main.py


示例7: __init__

 def __init__(self, input1, input2):
     x1_sub = input1[:, :, 2:-2, 2:-2]
     x1_flatten = T.flatten(x1_sub)
     x1 = T.extra_ops.repeat(x1_flatten, 25)
     x1 = T.reshape(x1, [T.shape(x1_flatten)[0], 25])
     x2 = neighbours.images2neibs(input2, neib_shape=(5, 5), neib_step=(1, 1))
     diff = x1 - x2
     new_shape = T.shape(x1_sub)*[1, 1, 5, 5]
     diff_img = neighbours.neibs2images(diff, neib_shape=(5, 5), original_shape=[1, 25, 25*5, 5*5])
     self.output = T.nnet.relu(diff_img)
开发者ID:yangli625,项目名称:ReId_theano,代码行数:10,代码来源:Layer.py


示例8: conv2D_keep_shape

def conv2D_keep_shape(x, w, image_shape, filter_shape, subsample=(1, 1)):
    # crop output to same size as input
    fs = T.shape(w)[2] - 1  # this is the filter size minus 1
    ims = T.shape(x)[2]  # this is the image size
  #  return theano.sandbox.cuda.dnn.dnn_conv(img=x, kerns=w,
    return theano.tensor.nnet.conv2d(x,w, 
					image_shape=image_shape, filter_shape=filter_shape,
                                            border_mode='full',
                                            subsample=subsample,
                                            )[:, :, fs/2:ims+fs/2, fs/2:ims+fs/2]
开发者ID:jxwuyi,项目名称:WebNav,代码行数:10,代码来源:theano_utils.py


示例9: down_sampleT

 def down_sampleT(self, x, y, _sample_rate):
     length = tensor.cast(tensor.shape(y)[0] * _sample_rate, 'int32')
     id_max = tensor.cast(tensor.shape(y)[0] - 1, 'int32')
     def get_sub(i,x,y):
         idd = self.srng.random_integers(low = 0, high = id_max)
         return [x[idd], y[idd]]
     ([dx, dy], updates) = theano.scan(fn = get_sub,
             outputs_info=None,
             sequences=tensor.arange(length),
             non_sequences=[x,y])
     return dx, dy, length
开发者ID:lucktroy,项目名称:sugar,代码行数:11,代码来源:SUGAR.py


示例10: get_output_for

 def get_output_for( self, inputs ,**kwargs ):
     # For each ROI R = [batch_index x1 y1 x2 y2]: max pool over R
     input = inputs[0]
     boxes = inputs[1]
     batch = T.shape (input)[0]
     channels = T.shape (input)[1]
     height = T.shape( input )[2]
     width = T.shape( input )[3]
     num_boxes = T.shape(boxes)[0]
     #output = T.zeros((batch * num_boxes , channels, self.num_features))
     op = ROIPoolingOp(pooled_h=self.pool_dims, pooled_w=self.pool_dims, spatial_scale=self.sp_scale)
     output = op(input, boxes)
     return output[0]
开发者ID:marcopede,项目名称:lasagneRCNN,代码行数:13,代码来源:SPP.py


示例11: __init__

    def __init__(self, p, *args, **kwargs):
        super().__init__(*args, **kwargs)
        try:
            self.k = tt.shape(p)[-1].tag.test_value
        except AttributeError:
            self.k = tt.shape(p)[-1]
        p = tt.as_tensor_variable(floatX(p))

        # From #2082, it may be dangerous to automatically rescale p at this
        # point without checking for positiveness
        self.p = p
        self.mode = tt.argmax(p, axis=-1)
        if self.mode.ndim == 1:
            self.mode = tt.squeeze(self.mode)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:14,代码来源:discrete.py


示例12: grad

    def grad(self, inputs, cost_grad):
        """
        Notes:
        1. The gradient is computed under the assumption that perturbations
        of the input array respect triangularity, i.e. partial derivatives wrt
        triangular region are zero.
        2. In contrast with the usual mathematical presentation, in order to
        apply theano's 'reshape' function wich implements row-order (i.e. C
        order), the differential expressions below have been derived based on
        the row-vectorizations of inputs 'a' and 'b'.

        See The Matrix Reference Manual,
        Copyright 1998-2011 Mike Brookes, Imperial College, London, UK
        """

        a, b = inputs
        ingrad = cost_grad
        ingrad = tensor.as_tensor_variable(ingrad)
        shp_a = (tensor.shape(inputs[0])[1],
                               tensor.shape(inputs[0])[1])
        I_M = tensor.eye(*shp_a)
        if self.lower:
            inv_a = solve_triangular(a, I_M, lower=True)
            tri_M = tril(tensor.ones(shp_a))
        else:
            inv_a = solve_triangular(a, I_M, lower=False)
            tri_M = triu(tensor.ones(shp_a))
        if b.ndim == 1:
            prod_a_b = tensor.tensordot(-b.T, inv_a.T, axes=1)
            prod_a_b = tensor.shape_padleft(prod_a_b)
            jac_veca = kron(inv_a, prod_a_b)
            jac_b = inv_a
            outgrad_veca = tensor.tensordot(ingrad, jac_veca, axes=1)
            outgrad_a = tensor.reshape(outgrad_veca,
                        (inputs[0].shape[0], inputs[0].shape[0])) * tri_M
            outgrad_b = tensor.tensordot(ingrad, jac_b, axes=1).flatten(ndim=1)
        else:
            ingrad_vec = ingrad.flatten(ndim=1)
            prod_a_b = tensor.tensordot(-b.T, inv_a.T, axes=1)
            jac_veca = kron(inv_a, prod_a_b)
            I_N = tensor.eye(tensor.shape(inputs[1])[1],
                               tensor.shape(inputs[1])[1])
            jac_vecb = kron(inv_a, I_N)
            outgrad_veca = tensor.tensordot(ingrad_vec, jac_veca, axes=1)
            outgrad_a = tensor.reshape(outgrad_veca,
                        (inputs[0].shape[0], inputs[0].shape[0])) * tri_M
            outgrad_vecb = tensor.tensordot(ingrad_vec, jac_vecb, axes=1)
            outgrad_b = tensor.reshape(outgrad_vecb,
                        (inputs[1].shape[0], inputs[1].shape[1]))
        return [outgrad_a, outgrad_b]
开发者ID:c0g,项目名称:Theano,代码行数:50,代码来源:spatial.py


示例13: dropout_fprop

 def dropout_fprop(self, input):
     
     # we reduce the precision of parameters for the computations
     self.fixed_W = apply_format(self.format, self.W, self.comp_precision, self.w_range)
     self.fixed_b = apply_format(self.format, self.b, self.comp_precision, self.b_range)
         
     # create the dropout mask
     # The cast is important because
     # int * float32 = float64 which pulls things off the gpu
     srng = T.shared_randomstreams.RandomStreams(self.rng.randint(999999))
     self.mask = T.cast(srng.binomial(n=1, p=self.p, size=T.shape(input)), theano.config.floatX)
     
     # apply the mask
     self.fixed_x = input * self.mask
     
     # weighted sum
     self.z = T.dot(self.fixed_x, self.fixed_W) + self.fixed_b
     self.fixed_z = apply_format(self.format, self.z, self.comp_precision, self.z_range)
     
     # activation
     self.y = self.activation(self.fixed_z)
     self.fixed_y = apply_format(self.format, self.y, self.comp_precision, self.y_range)
     
     # return the output
     return  self.fixed_y
开发者ID:MatthieuCourbariaux,项目名称:deep-learning-multipliers,代码行数:25,代码来源:layer.py


示例14: bbprop

 def bbprop(self):
     self.lin_bbprop = self.p_y_given_x - self.p_y_given_x * self.p_y_given_x
     self.lin_bbprop /= T.shape(self.p_y_given_x)[0]
     self.dict_bbprop = {}
     self.dict_bbprop.update({self.b_upmask: T.sum(self.lin_bbprop, 0)})
     self.dict_bbprop.update({self.W_upmask: T.dot(T.transpose(self.inp * self.inp), self.lin_bbprop)})
     return T.dot(self.lin_bbprop, T.transpose(self.W * self.W)), self.dict_bbprop
开发者ID:sqxiang,项目名称:DeepANN,代码行数:7,代码来源:Logistic_regression.py


示例15: timestep

		def timestep(predictions, label, len_example, total_len_example):

			label_binary = T.gt(label[0:len_example-1], 0)
			oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
			
			a = total_len_example
			return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:7,代码来源:lm_v4.py


示例16: predict

	def predict(self, input):   #input is an array of vectors (2D np.array)
		self.input = input
		padw = int(self.window/2)
		if padw>0:
			padding = np.asarray([np.zeros((self.dim_in,), dtype=theano.config.floatX)] * (padw))
			inp = T.concatenate((padding, input, padding), axis=0)
		else:
			inp = self.input
		seq = T.arange(T.shape(inp)[0]-self.window+1)
		self.input, _ = theano.scan(lambda v: inp[v : v+self.window].flatten(), sequences=seq)

		# initialize the gates
		out = theano.shared(numpy.zeros((self.dim_out,), dtype=theano.config.floatX))

		# gate computations
		def rnn_step(x, h_prev):
			if self.use_bias:
				out = T.nnet.sigmoid(T.dot(x, self.Wx) + T.dot(h_prev, self.Wh) + self.b)
			else:
				out = T.nnet.sigmoid(T.dot(x, self.Wx) + T.dot(h_prev, self.Wh))
			return out

		self.output, _ = theano.scan(fn=rnn_step, 
								  sequences = dict(input=self.input, taps=[0]), 
								  outputs_info = [out])
		if self.use_last_output:
			self.output = self.output[-1]
		if self.pooling != None:
			self.output = self.pooling(self.output)
		return self.output
开发者ID:singhalprerana,项目名称:Nucleus,代码行数:30,代码来源:neural_net_classes.py


示例17: hessian

def hessian(objective, argument):
    """
    Compute the directional derivative of the gradient
    (which is equal to the hessian multiplied by direction).
    """
    g = T.grad(objective, argument)

    # Create a new tensor A, which has the same type (i.e. same dimensionality)
    # as argument.
    A = argument.type()

    try:
        # First attempt efficient 'R-op', this directly calculates the
        # directional derivative of the gradient, rather than explicitly
        # calculating the hessian and then multiplying.
        R = T.Rop(g, argument, A)
    except NotImplementedError:
        shp = T.shape(argument)
        H = T.jacobian(g.flatten(), argument).reshape(
                                        T.concatenate([shp, shp]), 2*A.ndim)
        R = T.tensordot(H, A, A.ndim)

    try:
        hess = theano.function([argument, A], R, on_unused_input='raise')
    except theano.compile.UnusedInputError:
        warn('Theano detected unused input - suggests hessian may be zero or '
             'constant.')
        hess = theano.function([argument, A], R, on_unused_input='ignore')
    return hess
开发者ID:gitter-badger,项目名称:pymanopt,代码行数:29,代码来源:_theano.py


示例18: cross_entropy_cost

def cross_entropy_cost(target, output, output_act, in_sided, out_sided, in_bounded, out_bounded, act):
    assert in_bounded
    #assert out_bounded
    scale_bb = 1.
    if in_bounded != 1.:
        target = target / in_bounded
    #if out_bounded != 1.:
    #    output = output / out_bounded
    #    scale_bb = 1. / out_bounded
    if not in_sided:
        target = (target+1)/(2.0)
    if not out_sided:
        output= (output+1)/(2.0)
        scale_bb = scale_bb / 2.
    ddXE = target * scale_bb * 1./(output * output) + (1 - target) * scale_bb * 1./((1 - output) * (1-output))
    ddXE /= T.shape(ddXE)[0]
    ddXE = T.cast(ddXE,dtype=theano.config.floatX)
    if act in ['sigmoid','tanh','tanhnorm','abstanh','abstanhnorm']:
        if act == 'sigmoid':
            return sigmoid_cross_entropy(target, output_act,ddXE)
        if act == 'tanh':
            return tanh_cross_entropy(target, output_act,ddXE)
        if act == 'tanhnorm':
            return tanhnorm_cross_entropy(target, output_act,ddXE)
        if act == 'abstanh':
            return abstanh_cross_entropy(target, output_act,ddXE)
        if act == 'abstanhnorm':
            return abstanhnorm_cross_entropy(target, output_act,ddXE)
    else:
        XE = target * T.log(output) + (1 - target) * T.log(1 - output)
        return [[-T.mean(T.sum(XE, axis=1),axis=0)] , ddXE]
开发者ID:glorotxa,项目名称:DeepANN,代码行数:31,代码来源:Reconstruction_cost.py


示例19: get_cost_updates

    def get_cost_updates(self, corruption_level, learning_rate,cost_function_name):
        """
            This function computes the cost and the updates for one trainng
            step of the dA
        """
        #print str(self.activation)
        tilde_x = self.get_corrupted_input(self.x, corruption_level)
        #print self.activation
        y = self.get_hidden_values(tilde_x)
        z = self.get_reconstructed_input(y)
        cost=None
        
        if cost_function_name=='cross_entropy':
            #print 'cross_entropy..'
            L = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1-z), axis=1)
            cost = T.mean(L) #所有节点求和,然后所有batch_size求平均
        if cost_function_name=='sqr_error':
            L=(T.sum(T.square(T.abs_(self.x-z))/2.,axis=0))/T.cast(T.shape(self.x)[0],'float32')
            #theano.printing.debugprint(obj=cost,print_type=True)
            #printdebug.debugprint(cost)
            cost=T.mean(L)
            
        T.cast(cost, 'float32')
        #print cost
        # compute the gradients of the cost of the `dA` with respect
        # to its parameters
        gparams = T.grad(cost, self.params)
        # generate the list of updates
        updates = [
            (param, param - learning_rate * gparam)
            for param, gparam in zip(self.params, gparams)
        ]

        return (cost, updates)
开发者ID:zjh-nudger,项目名称:BioNLP-ST2016,代码行数:34,代码来源:dA.py


示例20: MaxOut

 def MaxOut(z, *args):
     #z = T.dot(W, X) + B.dimshuffle(0, 'x')
     d = T.shape(z)
     n_elem = args[0]
     z = z.reshape((d[0] / n_elem, n_elem, d[1]))
     a = T.max(z, axis=1)
     return a
开发者ID:SoftServeSAG,项目名称:DRL,代码行数:7,代码来源:fTheanoNNclassCORE.py



注:本文中的theano.tensor.shape函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.shape_padleft函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.sgn函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap