• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python extra_ops.repeat函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.extra_ops.repeat函数的典型用法代码示例。如果您正苦于以下问题:Python repeat函数的具体用法?Python repeat怎么用?Python repeat使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了repeat函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: reverseConv

    def reverseConv(self, activations, img_shape, flipped_filter, dim2=1):
       
        # Reverse max pooling first
        self.zp = activations.reshape((self.output.shape[0] * self.output.shape[1] * self.output.shape[2], self.output.shape[3]))
        lengthen = repeat(activations, self.poolsize[0], axis=2)
        self.lengthen = repeat(lengthen, self.poolsize[1], axis=3)
        self.w_shape = self.W.shape
        self.changed_W = self.W.dimshuffle(1,0,2,3)
        
	# Reversing the convolutional step	
        rev_conv_out = conv.conv2d(input=self.lengthen, filters=self.changed_W[:,:,::-1,::-1],filter_shape=flipped_filter,image_shape=img_shape, border_mode='full')
     
        #convert to "same" (from full)
        s1 = numpy.floor((self.filter_shape[2]-1)/2.0).astype(int)
        e1 = numpy.ceil((self.filter_shape[2]-1)/2.0).astype(int)

	#Time must be the same forward = time is same, frequency is valid, backward = time is same, frequency is full

        if dim2: #convert to "valid" (from full) 
            s2 = numpy.floor((self.filter_shape[3]-1)/2.0).astype(int)
            e2 = numpy.ceil((self.filter_shape[3]-1)/2.0).astype(int)
            if s1 == e1:
		rev_conv_out = rev_conv_out[:,:,:,s2:-e2]
	    else:
		rev_conv_out = rev_conv_out[:,:,s1:-e1,s2:-e2]
        else:
            rev_conv_out = rev_conv_out[:,:,s1:-e1,:]

	self.reverseOutput=rev_conv_out
开发者ID:sl3368,项目名称:DeepBirdBrain,代码行数:29,代码来源:layer_classes.py


示例2: test_repeatOp

    def test_repeatOp(self):
        for ndim in range(3):
            x = T.TensorType(config.floatX, [False] * ndim)()
            a = np.random.random((10, ) * ndim).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in tensor.discrete_dtypes:
                    r_var = T.scalar(dtype=dtype)
                    r = numpy.asarray(3, dtype=dtype)
                    if dtype in self.numpy_unsupported_dtypes:
                        self.assertRaises(TypeError,
                                repeat, x, r_var, axis=axis)
                    else:
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))

                        r_var = T.vector(dtype=dtype)
                        if axis is None:
                            r = np.random.random_integers(
                                    5, size=a.size).astype(dtype)
                        else:
                            r = np.random.random_integers(
                                    5, size=(10,)).astype(dtype)

                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))
开发者ID:317070,项目名称:Theano,代码行数:30,代码来源:test_extra_ops.py


示例3: output

    def output(self, input, n_batch=None):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        ###--- Unpool + conv
        # convolve input feature maps with filters
        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=unpool_out,
                kerns=self.W,
                subsample=(1,1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')  

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
开发者ID:codeaudit,项目名称:mmdgm,代码行数:34,代码来源:UnpoolConvNon_DNN_DNN.py


示例4: drop_output

    def drop_output(self, input, drop=0, rng=None, p=0.5):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        ###--- Unpool + conv
        # convolve input feature maps with filters
        if self.border_mode == 'valid':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='valid'
            )
        elif self.border_mode == 'same':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
            padding_w = theano.shared((self.filter_shape[2] - 1) / 2)
            padding_h = theano.shared((self.filter_shape[3] - 1) / 2)
            conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h]
        elif self.border_mode == 'full':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
        else:
            raise Exception('Unknown conv type')

        # downsample each feature map individually, using maxpooling
        
        

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        output= (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
        droppedOutput = nonlinearity.dropout(rng, output, p)
        return T.switch(T.neq(drop, 0), droppedOutput, output)
开发者ID:codeaudit,项目名称:mmdgm,代码行数:59,代码来源:UnpoolConvNon.py


示例5: test_repeatOp

    def test_repeatOp(self):
        for ndim in [1, 3]:
            x = T.TensorType(config.floatX, [False] * ndim)()
            a = np.random.random((10, ) * ndim).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in tensor.integer_dtypes:
                    r_var = T.scalar(dtype=dtype)
                    r = np.asarray(3, dtype=dtype)
                    if (dtype == 'uint64' or
                            (dtype in self.numpy_unsupported_dtypes and
                                r_var.ndim == 1)):
                        self.assertRaises(TypeError, repeat, x, r_var, axis=axis)
                    else:
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))

                        r_var = T.vector(dtype=dtype)
                        if axis is None:
                            r = np.random.randint(
                                1, 6, size=a.size).astype(dtype)
                        else:
                            r = np.random.randint(
                                1, 6, size=(10,)).astype(dtype)

                        if dtype in self.numpy_unsupported_dtypes and r_var.ndim == 1:
                            self.assertRaises(TypeError,
                                              repeat, x, r_var, axis=axis)
                        else:
                            f = theano.function([x, r_var],
                                                repeat(x, r_var, axis=axis))
                            assert np.allclose(np.repeat(a, r, axis=axis),
                                               f(a, r))

                        # check when r is a list of single integer, e.g. [3].
                        r = np.random.randint(
                            1, 11, size=()).astype(dtype) + 2
                        f = theano.function([x],
                                            repeat(x, [r], axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a))
                        assert not np.any([isinstance(n.op, RepeatOp)
                                           for n in f.maker.fgraph.toposort()])

                        # check when r is  theano tensortype that broadcastable is (True,)
                        r_var = theano.tensor.TensorType(broadcastable=(True,),
                                                         dtype=dtype)()
                        r = np.random.randint(1, 6, size=(1,)).astype(dtype)
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r[0], axis=axis),
                                           f(a, r))
                        assert not np.any([isinstance(n.op, RepeatOp)
                                           for n in f.maker.fgraph.toposort()])
开发者ID:Thrandis,项目名称:Theano,代码行数:56,代码来源:test_extra_ops.py


示例6: step

    def step(time_idx,lstm_hidden):
        M_pad = repeat(P.memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )
        M_curr_temp = T.concatenate([M_pad , lstm_hidden[:time_idx,:,:]] , axis=0)
        M_curr      = M_curr_temp.transpose((1,0,2))
        input_curr  = lstm_hidden[time_idx,:,:]

        weight_prev = T.zeros([input_curr.shape[0] , time_idx+1])
        weight_inter = weight_prev

        for head in heads:
            weight_inter, att_w_inter, key = build_head_curr(
                weight_inter, M_curr , head, input_curr)

        weight_curr = weight_inter
        entropy_temp = -1*(weight_curr*T.log(weight_curr))
        entropy = T.sum(entropy_temp , axis=1)

        key_normalize = T.nnet.softmax(key)
        key_entropy_temp = -1*(key_normalize*T.log(key_normalize))
        key_entropy = T.sum(key_entropy_temp , axis=1)

        att_w_curr  = att_w_inter

        att_M_curr = att_w_curr.dimshuffle(0,'x',1)*M_curr
        read_curr = build_read(att_M_curr, weight_curr)
        output = controller(input_curr, read_curr)

        return output,entropy,key_entropy
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:28,代码来源:model.py


示例7: output

 def output(self, dropout_active=False):
     X = self.embedded()
     out, _ = theano.scan(self.op.step,
                          sequences=[X],
                          outputs_info=[repeat(self.op.id, X.shape[1], axis=0)]
                      )
     return out[-1]
开发者ID:gchrupala,项目名称:imaginet,代码行数:7,代码来源:layers.py


示例8: step

    def step(time_idx,lstm_hidden):
        M_pad = repeat(P.memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )
        M_curr_temp = T.concatenate([M_pad , lstm_hidden[:time_idx,:,:]] , axis=0)
        M_curr      = M_curr_temp.transpose((1,0,2))
        input_curr  = lstm_hidden[time_idx,:,:]

        weight_prev = T.zeros([input_curr.shape[0] , time_idx+1])
        weight_inter = weight_prev

        for head in heads:
            weight_inter, att_w_inter = build_head_curr(
                weight_inter, M_curr , head, input_curr)

        weight_curr = weight_inter
        pad_matrix = T.zeros((input_curr.shape[0],lstm_hidden.shape[0]-weight_curr.shape[1]),dtype='float32')
        weight_pad = T.concatenate([weight_curr,pad_matrix],axis=1)
        entropy_temp = -1*(weight_curr*T.log(weight_curr))
        entropy = T.sum(entropy_temp , axis=1)
        att_w_curr  = att_w_inter

        att_M_curr = att_w_curr.dimshuffle(0,'x',1)*M_curr
        read_curr = build_read(att_M_curr, weight_curr)
        output = controller(input_curr, read_curr)

        return output,entropy,weight_pad
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:25,代码来源:model.py


示例9: drop_output

    def drop_output(self, input, drop=0, rng=None, p=0.5):
        ###--- Unpool

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.mask

        image_shape = list(self.image_shape)
        if n_batch is not None:
            image_shape[0] = n_batch

        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=unpool_out,
                kerns=self.W,
                subsample=(1,1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')
        
        if self.cnorm:
            print 'cnorm size', self.filter_shape[0]/8+1
            conv_out=ContrastCrossChannels.ContrastCrossChannels(input=conv_out, n=self.filter_shape[0]/8+1)

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        output= (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
        droppedOutput = nonlinearity.dropout(rng, output, p)
        return T.switch(T.neq(drop, 0), droppedOutput, output)
开发者ID:codeaudit,项目名称:mmdgm,代码行数:38,代码来源:UnpoolConvNon_GauInit_DNN.py


示例10: step

    def step(time_idx,lstm_hidden,input_hidden,weighted_mem):#lstm_hidden is used to generate weight
        M_pad = repeat(P.memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )
        weighted_M_pad = repeat(P.weighted_memory_init.dimshuffle((0,'x',1)) , lstm_hidden.shape[1] , axis=1 )

        M_curr_temp = T.concatenate([M_pad , lstm_hidden[:time_idx,:,:]] , axis=0)
        weighted_M_curr_temp = T.concatenate([weighted_M_pad , weighted_mem[:time_idx,:,:]] , axis=0)

        M_curr      = M_curr_temp.transpose((1,0,2))
        weighted_M_curr      = weighted_M_curr_temp.transpose((1,0,2))
        input_curr  = input_hidden[time_idx,:,:]

        weight_prev = T.zeros([input_curr.shape[0] , time_idx+1])
        weight_inter = weight_prev

        for head in heads:
            weight_inter = build_head_curr(
                weight_inter, M_curr , head, input_curr)

        weight_curr = weight_inter

        read_curr = build_read(weighted_M_curr, weight_curr)
        output = controller(input_curr, read_curr)

        return output
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:24,代码来源:model.py


示例11: output

 def output(self, dropout_active=False):
     X = self.l_in.output(dropout_active=dropout_active)
     if self.p_drop > 0. and dropout_active:
         X = dropout(X, self.p_drop)
     x_in = T.dot(X, self.w_in) + self.b_in
     out, _ = theano.scan(self.step,
         sequences=[x_in],
         outputs_info=[repeat(self.h0, x_in.shape[1], axis=0)],
         non_sequences=[self.w_rec],
         truncate_gradient=self.truncate_gradient
     )
     if self.seq_output:
         return out
     else:
         return out[-1]
开发者ID:gotomypc,项目名称:Passage,代码行数:15,代码来源:layers.py


示例12: fawn_recurrent

def fawn_recurrent(
    inpt_mean, inpt_var, weights_mean, weights_var,
    f,
    initial_mean, initial_var):

    f_transfer = lookup(f, transfer_)
    def step(inpt_mean, inpt_var, him_m1, hiv_m1, hom_m1, hov_m1):
        wm, wv = weights_mean, weights_var

        pres_mean = T.dot(inpt_mean, wm)
        pres_var = (T.dot(inpt_mean ** 2, wv)
                    + T.dot(inpt_var, wm ** 2)
                    + T.dot(inpt_var, wv)
                    )

        post_mean, post_var = f_transfer(pres_mean, pres_var)
        return pres_mean, pres_var, post_mean, post_var


    if initial_mean.ndim == 1:
        initial_mean = repeat(
            initial_mean.dimshuffle('x', 0), inpt_mean.shape[1], axis=0)
    if initial_var.ndim == 1:
        initial_var = repeat(
            initial_var.dimshuffle('x', 0), inpt_mean.shape[1], axis=0)

    (hidden_in_mean_rec, hidden_in_var_rec, hidden_mean_rec, hidden_var_rec), _ = theano.scan(
        step,
        sequences=[inpt_mean, inpt_var],
        outputs_info=[T.zeros_like(inpt_mean[0]),
                      T.zeros_like(inpt_mean[0]),
                      initial_mean,
                      initial_var])

    return (hidden_in_mean_rec, hidden_in_var_rec,
            hidden_mean_rec, hidden_var_rec)
开发者ID:Wiebke,项目名称:breze,代码行数:36,代码来源:sequential.py


示例13: recurrent_layer_stateful

def recurrent_layer_stateful(hidden_inpt, hidden_to_hidden, f, initial_hidden):
    def step(x, s_m1, hi_tm1, h_tm1):
        hi = T.dot(h_tm1, hidden_to_hidden)
        hi += x
        s, h = f(s_m1, hi)
        return s, hi, h

    initial_hidden_b = repeat(
        initial_hidden.dimshuffle('x', 0), hidden_inpt.shape[1], axis=0)

    (states, hidden_in_rec, hidden_rec), _ = theano.scan(
        step,
        sequences=hidden_inpt,
        outputs_info=[
            T.zeros_like(initial_hidden_b),
            T.zeros_like(hidden_inpt[0]),
            initial_hidden_b])

    return states, hidden_in_rec, hidden_rec
开发者ID:Wiebke,项目名称:breze,代码行数:19,代码来源:rnn.py


示例14: recurrent_layer

def recurrent_layer(hidden_inpt, hidden_to_hidden, f, initial_hidden):
    def step(x, hi_tm1):
        h_tm1 = f(hi_tm1)
        hi = T.dot(h_tm1, hidden_to_hidden) + x
        return hi

    # Modify the initial hidden state to obtain several copies of
    # it, one per sample.
    initial_hidden_b = repeat(initial_hidden, hidden_inpt.shape[1], axis=0)
    initial_hidden_b = initial_hidden_b.reshape(
        (hidden_inpt.shape[1], hidden_inpt.shape[2]))

    hidden_in_rec, _ = theano.scan(
        step,
        sequences=hidden_inpt,
        outputs_info=[initial_hidden_b])

    hidden_rec = f(hidden_in_rec)

    return hidden_in_rec, hidden_rec
开发者ID:ddofer,项目名称:breze,代码行数:20,代码来源:rnn.py


示例15: output

 def output(self, pool=True):
     X = self.input
     if self.backward:
         # flip along second axis
         X = X[:, ::-1]
         self.mask = self.mask[:, ::-1]
     # shuffle dimension so scan over axis 1
     X = X.dimshuffle(1, 0, 2)
     if self.mask is not None:
         mask = self.mask.dimshuffle(1, 0)
         seq_input = [mask, X]
         step = self.step_masked
     else:
         seq_input = [X]
         step = self.step
     out, _ = theano.scan(
         step,
         sequences=seq_input,
         outputs_info=[repeat(self.h0, X.shape[1], axis=0)],
         non_sequences=[self.u_z, self.u_r, self.u_h],
         truncate_gradient=self.truncate_gradient
     )
     # shuffle dimension back
     out = out.dimshuffle(1, 0, 2)
     if pool:
         if self.mask is not None:
             out = (out * self.mask[:, :, None]).sum(axis=1)
             out = out / self.mask.sum(axis=1)[:, None]
             return out
         return T.mean(out, axis=1)
     elif self.seq_output:
         if self.mask is not None:
             return out * self.mask[:, :, None]
         else:
             return out
     else:
         return out[-1]
开发者ID:csong27,项目名称:NgramNeuralNetworks,代码行数:37,代码来源:recurrent_layer.py


示例16: get_interpolated_hiddens

def get_interpolated_hiddens(old_hidden,  n_timesteps,
                             n_samples, interpolation_mask,
                             number_cons_hiddens):
    '''
        old_hidden: old_hidden_matrix which needs to be interpolated.
                  : number_of_hiddens * batch_size * Hidden_Size
        number_of_reduced_timstamps
        alphas  = [1, 0.8, 0.6, 0.4, 0.2]
        alpha is the interpolation mask as of now, which
        ne  eds to be passed as a function parameter.
        For ex, given hiddens, h1, h2, h3, h_n-1
        You get, [h1, h2], [h2,  h3], [h_n-2, h_n-1] so basically, n-1 pairs.
        Number of interolations need to be done. i.e relative clock times.
    '''
    alpha = interpolation_mask
    hidden_size = 1024
    batch_size = 32


    num_cons_hiddens = number_cons_hiddens
    num_reduced_hiddens = num_cons_hiddens + 1
    number_interp = len(interpolation_mask)

    X  = old_hidden.dimshuffle(1, 0, 2)
    new_matrix2 = repeat(X, 2, axis=1)
    new_matrix2 = tensor.roll(new_matrix2, -1, axis=1)
    new_matrix2 = new_matrix2[:, 0:2*num_reduced_hiddens-2, :]
    new_matrix2 = new_matrix2.reshape([n_samples, num_cons_hiddens, 2, hidden_size])

    def _step_slice(m_, interp_mask):
        interp_ret = []
        for i in range(number_interp):
            interp_ret.append(interp_mask[i] * m_[0] + (1-interp_mask[i])* m_[1])
        return interp_ret

    _step = _step_slice

    def step_batch(m_, alpha):
        seqs = m_
        rval, updates = theano.scan(_step,
                                    sequences=seqs,
                                    non_sequences=[alpha])
        return rval

    _batch_step = step_batch
    seqs = new_matrix2
    rval, updates = theano.scan(_batch_step,
                                sequences=seqs,
                                non_sequences=[alpha])
    out=[]
    out_batch =[]
    for batch_index in range(batch_size):
        for i in range(num_cons_hiddens):
            something =  [rval[j][batch_index][i] for j in range(number_interp)]
            if i==0:
                out = something
            if i >=1:
                out  = tensor.concatenate([out, something], axis=0)
        if batch_index == 0:
            out_batch = out
        if batch_index == 1:
            out_batch = tensor.stacklists([out_batch, out])
        if batch_index > 1:
            out = tensor.reshape(out,[1, n_timesteps-2, hidden_size])
            out_batch = tensor.concatenate([out_batch, out])

    zero_pad = tensor.zeros([out_batch.shape[0], number_interp , out_batch.shape[2]])
    out_batch = tensor.concatenate([zero_pad, out_batch], axis=1)
    return out_batch
开发者ID:anirudh9119,项目名称:mscale,代码行数:69,代码来源:lm.py


示例17: make_train

def make_train(image_size , word_size , first_hidden_size , proj_size , reg_lambda) :
    #initialize model
    P = Parameters()
    image_projecting = image_project.build(P, image_size, proj_size)
    batched_triplet_encoding , vector_triplet_encoding = triplet_encoding.build(P , word_size , first_hidden_size , proj_size)   

    image_vector = T.vector()

    #training
    correct_triplet =  [T.vector(dtype='float32') , T.vector(dtype='float32') , T.vector(dtype='float32')] #[E,R,E]
    negative_triplet = [T.matrix(dtype='float32') , T.matrix(dtype='float32') , T.matrix(dtype='float32')]

    image_projection_vector = image_projecting(image_vector)
    image_projection_matrix = repeat(image_projection_vector.dimshuffle(('x',0)) , negative_triplet[0].shape[0] , axis=0)
    correct_triplet_encoding_vector = vector_triplet_encoding(correct_triplet[0] , correct_triplet[1] , correct_triplet[2])
    negative_triplet_encoding_matrix = batched_triplet_encoding(negative_triplet[0] , negative_triplet[1] , negative_triplet[2])

    correct_cross_dot_scalar = T.dot(image_projection_vector , correct_triplet_encoding_vector)
    negative_cross_dot_vector = T.batched_dot(image_projection_matrix , negative_triplet_encoding_matrix)

    #margin cost
    zero_cost = T.zeros_like(negative_cross_dot_vector)
    margin_cost = 1 - correct_cross_dot_scalar + negative_cross_dot_vector
    cost_vector = T.switch(T.gt(zero_cost , margin_cost) , zero_cost , margin_cost)

    #regulizar cost
    params = P.values()
    l2 = T.sum(0)
    for p in params:
        l2 = l2 + (p ** 2).sum()        
    cost = T.sum(cost_vector)/T.shape(negative_triplet[0])[0] + reg_lambda * l2 #assume word vector has been put into P #unsolved
    grads = [T.clip(g, -100, 100) for g in T.grad(cost, wrt=params)]

    lr = T.scalar(name='learning rate',dtype='float32')
    train = theano.function(
        inputs=[image_vector, correct_triplet[0], correct_triplet[1], correct_triplet[2], negative_triplet[0], negative_triplet[1], negative_triplet[2], lr],
        outputs=cost,
        updates=updates.rmsprop(params, grads, learning_rate=lr),
        allow_input_downcast=True
    )

    #valid
    valid = theano.function(
        inputs=[image_vector, correct_triplet[0], correct_triplet[1], correct_triplet[2], negative_triplet[0], negative_triplet[1], negative_triplet[2]],
        outputs=cost,
        allow_input_downcast=True

    )
    #visualize
    image_project_fun = theano.function(
        inputs=[image_vector],
        outputs=image_projection_vector,
        allow_input_downcast=True
    )
    #testing
    all_triplet = [T.matrix(dtype='float32') , T.matrix(dtype='float32') , T.matrix(dtype='float32')]
    image_projection_matrix_test = repeat(image_projection_vector.dimshuffle(('x',0)) , all_triplet[0].shape[0] , axis=0)
    all_triplet_encoding_matrix = batched_triplet_encoding(all_triplet[0] , all_triplet[1] , all_triplet[2])
    all_cross_dot_vector = T.batched_dot(image_projection_matrix_test , all_triplet_encoding_matrix)

    test = theano.function(
        inputs=[image_vector, all_triplet[0], all_triplet[1], all_triplet[2]],
        outputs=all_cross_dot_vector,
        allow_input_downcast=True

    )

    return P , train , valid , image_project_fun , test
开发者ID:darongliu,项目名称:Cross_Modal_Projection,代码行数:68,代码来源:train.py


示例18: __init__

    def __init__(self, rng, input, filter_shape, image_shape, poolsize=(2, 2), border_mode='same', activation=None, mask=None):
        """
        Allocate a LeNetConvPoolLayer with shared variable internal parameters.

        :type rng: numpy.random.RandomState
        :param rng: a random number generator used to initialize weights

        :type input: theano.tensor.dtensor4
        :param input: symbolic image tensor, of shape image_shape

        :type filter_shape: tuple or list of length 4
        :param filter_shape: (number of filters, num input feature maps,
                              filter height, filter width)

        :type image_shape: tuple or list of length 4
        :param image_shape: (batch size, num input feature maps,
                             image height, image width)

        :type poolsize: tuple or list of length 2
        :param poolsize: the downsampling (pooling) factor (#rows, #cols)
        """

        assert image_shape[1] == filter_shape[1]
        self.input = input

        # there are "num input feature maps * filter height * filter width"
        # inputs to each hidden unit
        fan_in = numpy.prod(filter_shape[1:])
        # each unit in the lower layer receives a gradient from:
        # "num output feature maps * filter height * filter width" /
        #   pooling size

        ###--- Change / to *

        fan_out = (filter_shape[0] * numpy.prod(filter_shape[2:]) *
                   numpy.prod(poolsize))
        # initialize weights with random weights
        W_bound = numpy.sqrt(6. / (fan_in + fan_out))
        self.W = theano.shared(
            numpy.asarray(
                rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
                dtype=theano.config.floatX
            ),
            borrow=True
        )

        # the bias is a 1D tensor -- one bias per output feature map
        b_values = numpy.zeros((filter_shape[0],), dtype=theano.config.floatX)
        self.b = theano.shared(value=b_values, borrow=True)

        ###--- Unpool

        if poolsize[0] == 1 and poolsize[1] == 1:
            self.unpool_out = input
        else:
            if mask is None:
                window = np.zeros((poolsize), dtype=np.float32)
                window[0, 0] = 1
                mask = theano.shared(np.tile(window.reshape([1, 1]+poolsize), input_shape))

            self.unpool_out = Textra.repeat(Textra.repeat(input, poolsize[0], axis = 2), poolsize[1], axis = 3) * mask

        relu_output = (
            self.unpool_out if activation is None
            else activation(self.unpool_out)
        )

        ###--- Unpool + conv
        # convolve input feature maps with filters
        if border_mode == 'valid':
            conv_out = conv.conv2d(
                input=relu_output,
                filters=self.W,
                filter_shape=filter_shape,
                image_shape=image_shape,
                border_mode='valid'
            )
        elif border_mode == 'same':
            conv_out = conv.conv2d(
                input=relu_output,
                filters=self.W,
                filter_shape=filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
            padding_w = theano.shared((filter_shape[2] - 1) / 2)
            padding_h = theano.shared((filter_shape[3] - 1) / 2)
            conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h]
        elif border_mode == 'full':
            conv_out = conv.conv2d(
                input=relu_output,
                filters=self.W,
                filter_shape=filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
        else:
            raise Exception('Unknown conv type')

        # downsample each feature map individually, using maxpooling
#.........这里部分代码省略.........
开发者ID:codeaudit,项目名称:mmdgm,代码行数:101,代码来源:UnpoolNonConv.py


示例19: cnn_creator

    def cnn_creator(kernel):
        if kernel.shape[0] != 8:
            raise Exception('Expected cnn kernel with 8 subkernels.'
                            '\nReceived kernel has {0} '
                            'subkernel(s).'.format(kernel.shape[0]))

        src_data = T.tensor4(name="source_data")
        grt_data = T.tensor4(name="ground_truth_data")

        # ***********************************************************
        w1 = kernel[0]
        b1 = kernel[1]
        w2 = kernel[2]
        b2 = kernel[3]
        w3 = kernel[4]
        b3 = kernel[5]
        w4 = kernel[6]
        b4 = kernel[7]

        w1_shape = kernel[0].eval().shape
        b1_shape = kernel[1].eval().shape
        w2_shape = kernel[2].eval().shape
        b2_shape = kernel[3].eval().shape
        w3_shape = kernel[4].eval().shape
        b3_shape = kernel[5].eval().shape
        w4_shape = kernel[6].eval().shape
        b4_shape = kernel[7].eval().shape

        # ***********************************************************

        def relu(value, alpha=0.05):
            return T.switch(value > 0, value, alpha * value)

        def softmax4d(value):
            e_x = theano.tensor.exp(value - value.max(axis=1,
                                                      keepdims=True))
            return e_x / e_x.sum(axis=1, keepdims=True)

        def create_param(w_shape):
            param_values = numpy.zeros(w_shape)
            shared = theano.shared(
                numpy.asarray(param_values,
                              dtype=theano.config.floatX),
                borrow=True)
            return shared

        # ***********************************************************

        conv_1 = nnet.conv2d(input=src_data, filters=w1, ) + \
                 b1.dimshuffle('x', 0, 'x', 'x')
        pool_1 = downsample.max_pool_2d(conv_1, (2, 2))
        l1_out = relu(pool_1)

        conv_2 = nnet.conv2d(input=l1_out, filters=w2) + \
                 b2.dimshuffle('x', 0, 'x', 'x')
        pool_2 = downsample.max_pool_2d(conv_2, (2, 2))
        l2_out = relu(pool_2)

        conv_3 = nnet.conv2d(input=l2_out, filters=w3) + \
                 b3.dimshuffle('x', 0, 'x', 'x')
        pool_3 = downsample.max_pool_2d(conv_3, (2, 2))
        l3_out = relu(pool_3)

        conv_4 = nnet.conv2d(input=l3_out, filters=w4) + \
                 b4.dimshuffle('x', 0, 'x', 'x')
        pool_4 = downsample.max_pool_2d(conv_4, (2, 2))
        l4_out = relu(pool_4)

        scaled_up_y = ops.repeat(l4_out, 16, axis=2)
        scaled_up_y_x = ops.repeat(scaled_up_y, 16, axis=3)

        softmax = softmax4d(scaled_up_y_x)
        eps = 1e-7
        clipped_softmax = softmax.clip(eps, 1 - eps)

        # ***********************************************************

        max_val = clipped_softmax.argmax(axis=1, keepdims=True)

        # ***********************************************************

        ds_softmax = clipped_softmax.dimshuffle(0, 2, 3, 1)
        rs_softmax = ds_softmax.reshape((-1, 3))

        ds_grt_data = grt_data.dimshuffle(0, 2, 3, 1)
        rs_grt_data = ds_grt_data.reshape((-1, 3))

        cross = T.nnet.categorical_crossentropy(rs_softmax,
                                                rs_grt_data)

        cost = T.mean(cross)

        # ***********************************************************

        params = [w1, w2, w3, w4, b1, b2, b3, b4]
        gparams = [T.grad(cost, param) for param in params]

        # ***********************************************************

        prev_eg2_w1 = create_param(w1_shape)
#.........这里部分代码省略.........
开发者ID:TsuReX,项目名称:img_seg,代码行数:101,代码来源:cnn_img_seg.py


示例20: output_random_generation

    def output_random_generation(self, input, n_batch=144):
        ###--- Unpool

        image_shape = list(self.image_shape)
        image_shape[0] = n_batch
        #print '---', image_shape
        if self.random_mask is None:
            image_shape[2]/=self.poolsize[0]
            image_shape[3]/=self.poolsize[1]
            window = np.zeros((self.poolsize), dtype=np.float32)
            window[0, 0] = 1
            self.random_mask = theano.shared(np.tile(window.reshape([1, 1]+self.poolsize), image_shape))
            image_shape[2]*=self.poolsize[0]
            image_shape[3]*=self.poolsize[1]
        #print '----', image_shape

        if self.poolsize[0] == 1 and self.poolsize[1] == 1:
            unpool_out = input
        else:
            unpool_out = Textra.repeat(Textra.repeat(input, self.poolsize[0], axis = 2), self.poolsize[1], axis = 3) * self.random_mask
        
        ###--- Unpool + conv
        # convolve input feature maps with filters

        if self.border_mode == 'same':
            conv_out = dnn.dnn_conv(
                img=unpool_out,
                kerns=self.W,
                subsample=(1,1),
                border_mode=self.border,
                #conv_mode='cross'
            )
        else:
            raise Exception('Unknown conv type')

        '''
        if self.border_mode == 'valid':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='valid'
            )
        elif self.border_mode == 'same':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
            padding_w = theano.shared((self.filter_shape[2] - 1) / 2)
            padding_h = theano.shared((self.filter_shape[3] - 1) / 2)
            conv_out = conv_out[:,:,padding_w:-padding_w,padding_h:-padding_h]
        elif self.border_mode == 'full':
            conv_out = conv.conv2d(
                input=unpool_out,
                filters=self.W,
                filter_shape=self.filter_shape,
                image_shape=image_shape,
                border_mode='full'
            )
        else:
            raise Exception('Unknown conv type')
        '''

        # add the bias term. Since the bias is a vector (1D array), we first
        # reshape it to a tensor of shape (1, n_filters, 1, 1). Each bias will
        # thus be broadcasted across mini-batches and feature map
        # width & height
        lin_output = conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
        return (
            lin_output if self.activation is None
            else self.activation(lin_output)
        )
开发者ID:codeaudit,项目名称:mmdgm,代码行数:76,代码来源:UnpoolConvNon_DNN_DNN.py



注:本文中的theano.tensor.extra_ops.repeat函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python nlinalg.det函数代码示例发布时间:2022-05-27
下一篇:
Python extra_ops.diff函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap