• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.ones函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.ones函数的典型用法代码示例。如果您正苦于以下问题:Python ones函数的具体用法?Python ones怎么用?Python ones使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了ones函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: step_fun

 def step_fun(self):
     if self._step_fun is None:
         inputs = T.matrix('inputs')
         states_tm1 = [T.matrix('state_%d_%d_tm1' % (layer, state))
                       for layer in range(self.n_layers)
                       for state in range(self.gate0.n_states)]
         if self.gates[-1].use_attention:
             raise NotImplementedError('Stacked RNN with attention')
             attended=T.tensor3('attended')
             attended_dot_u=T.tensor3('attended_dot_u')
             attention_mask=T.matrix('attention_mask')
             self._step_fun = function(
                     [inputs] + states_tm1 + [
                         attended, attended_dot_u, attention_mask],
                     self.step(*([inputs, T.ones(inputs.shape[:-1])] +
                                 states_tm1 + [T.ones_like(states_tm1[0]),
                                 attended, attended_dot_u,
                                 attention_mask])),
                     name='%s_step_fun'%self.name)
         else:
             self._step_fun = function(
                     [inputs] + states_tm1,
                     self.step(*([inputs, T.ones(inputs.shape[:-1])] +
                               states_tm1 + [T.ones_like(states_tm1[0])])),
                     name='%s_step_fun'%self.name)
     return self._step_fun
开发者ID:robertostling,项目名称:bnas,代码行数:26,代码来源:model.py


示例2: __init__

    def __init__(self,model,
                 dis_updater = updates.Adam(lr=sharedX(0.0002), b1=0.5, regularizer=updates.Regularizer(l2=1e-5)),
                 gen_updater = updates.Adam(lr=sharedX(0.0002), b1=0.5, regularizer=updates.Regularizer(l2=1e-5))):

        X = model.X
        Z = model.Z
        targets = T.matrix()

        genX = model.genX

        disX = model.disX
        disgenX = model.disgenX

        disX_loss = bce(disX, T.ones(disX.shape)).mean()
        disgenX_loss = bce(disgenX, T.zeros(disgenX.shape)).mean()
        genX_loss = bce(disgenX, T.ones(disgenX.shape)).mean()

        dis_loss = disX_loss + disgenX_loss
        gen_loss = genX_loss

        trainable_discrim_params = model.trainable_discrim_params
        trainable_gen_params = model.trainable_gen_params

        dis_updates = dis_updater(trainable_discrim_params, dis_loss) + model.other_discrim_updates
        gen_updates = gen_updater(trainable_gen_params, gen_loss) + model.other_gen_updates

        print 'COMPILING'
        t = time()
        self._train_gen = theano.function([Z], gen_loss, updates=gen_updates)
        self._train_dis = theano.function([X, Z], dis_loss, updates=dis_updates)
        self._gen = theano.function([Z], genX)
        print '%.2f seconds to compile theano functions'%(time()-t)
开发者ID:nPellejero,项目名称:deepNet,代码行数:32,代码来源:gan.py


示例3: pos_phase_updates

    def pos_phase_updates(self, v, init_state=None, n_steps=1, mean_field=False):
        """
        Implements the positive phase sampling, which performs blocks Gibbs
        sampling in order to sample from p(g,h,x,y|v).
        :param v: fixed training set
        :param init: dictionary of initial values, or None if sampling from scratch
        :param n_steps: scalar, number of Gibbs steps to perform.
        :param restart: if False, start sampling from buffers self.pos_*
        """
        if init_state is None:
            assert n_steps
            # start sampler from scratch
            init_state = OrderedDict()
            init_state['g'] = T.ones((self.batch_size,self.n_g)) * T.nnet.sigmoid(self.gbias)
            init_state['s'] = T.ones((self.batch_size,self.n_g)) * self.mu
            init_state['h'] = T.ones((self.batch_size,self.n_h)) * T.nnet.sigmoid(self.hbias)
            init_state['t'] = T.ones((self.batch_size,self.n_h)) * self.eta

        [new_g, new_s, new_h, new_t] = self.pos_phase(v,
                init_state = init_state,
                n_steps = n_steps,
                mean_field = mean_field)

        pos_states = OrderedDict()
        pos_states['g'] = new_g
        pos_states['s'] = new_s
        pos_states['h'] = new_h
        pos_states['t'] = new_t

        # update running average of positive phase activations
        pos_updates = OrderedDict()
        return pos_states, pos_updates
开发者ID:gdesjardins,项目名称:hossrbm_public,代码行数:32,代码来源:hossrbm_gsht.py


示例4: compute

 def compute(self, minibatch=1, steps=5, lrate=0.01):
     G = Generator(self.num_vis, self.num_hid)
     D = Discriminator(self.num_vis)
     for i in range(steps):
         # Sample m noise examples from Generator
         noise_samples = G.get_noise()
         # Sample m examples from data distribution
         data_examples = self._sample(minibatch)
         # Get real examples
         realX = D.output(data_examples)
         # Get generated examples
         genX = D.output(noise_samples)
         drealcost = T.mean(T.nnet.binary_crossentropy(realX, T.ones(realX.shape)))
         dgencost = T.mean(T.nnet.binary_crossentropy(noise_samples, T.zeros(genX.shape)))
         gencost = T.mean(T.nnet.binary_crossentropy(genX, T.ones(genX.shape)))
         cost = drealcost + dgencost
         updates = D.update(cost.mean())
         func = theano.function([], (realX, genX), updates=updates, givens={self.x: self.data})
         print("Discriminator cost {0}: ".format(func()))
     noise_samples = G.get_noise()
     allparams = []
     for param in G.params:
         allparams.append(param)
     '''for param in D.params:
         allparams.append(param)'''
     #gencost = 1 / self.num_samples * \
     #    T.sum(T.log(1 - D.output(G.output(noise_samples))))
     grads = T.grad(T.mean(gencost), allparams)
     return gencost, [(oldparam, oldparam - lrate * newparam) for (oldparam, newparam) in zip(allparams, grads)]
开发者ID:saromanov,项目名称:adversarialnets,代码行数:29,代码来源:adversarialnets.py


示例5: chi2_test_statistic

def chi2_test_statistic(M, Obs, K, num_M, num_Obs):
    #Getting frequencies from observations
    Ns = T.dot(Obs,T.ones((K,1)))
    p = Obs/Ns
        
    #Find the zeros so we can deal with them later
    pZEROs = T.eq(p, 0)
    mZEROs = T.eq(M, 0)
    
    #log probabilities, with -INF as log(0)
    lnM = T.log(M + mZEROs) - INF*mZEROs
    lnp = T.log(p + pZEROs) - INF*pZEROs


    #Using kroneker products so every row of M hits every row of P in the difference klnM - kln
    O_ones = T.ones((num_Obs,1))
    M_ones = T.ones((num_M,1))
    klnM = kron(lnM,O_ones)
    klnP = kron(M_ones, lnp)
    klnP_M = klnP - klnM
    kObs = kron(M_ones, Obs)
    
    G = 2.0*T.dot(klnP_M ,kObs.T)
    
    G = G*T.identity_like(G)
    G = T.dot(G,T.ones((num_M*num_Obs,1)))   
    G = T.reshape(G,(num_M,num_Obs))
    
    #The following quotient improves the convergence to chi^2 by an order of magnitude
    #source: http://en.wikipedia.org/wiki/Multinomial_test
    
    #numerator = T.dot(- 1.0/(M + 0.01),T.ones((K,1))) - T.ones((num_M,1))    
    #q1 = T.ones((num_M,num_Obs)) + T.dot(numerator,1.0/Ns.T/6.0)/(K-1.0)
        
    return G#/q1 
开发者ID:Underfit,项目名称:underfit,代码行数:35,代码来源:chi2pvalue.py


示例6: _meshgrid

def _meshgrid(height, width, depth):
    # This function is the grid generator from eq. (1) in reference [1].
    # It is equivalent to the following numpy code:
    #  x_t, y_t,z_t = np.meshgrid(np.linspace(-1, 1, width),
    #                         np.linspace(-1, 1, height))
    #  ones = np.ones(np.prod(x_t.shape))
    #  grid = np.vstack([x_t.flatten(), y_t.flatten(), ones])
    # It is implemented in Theano instead to support symbolic grid sizes.
    # Note: If the image size is known at layer construction time, we could
    # compute the meshgrid offline in numpy instead of doing it dynamically
    # in Theano. However, it hardly affected performance when we tried.
    x_t = T.dot(
        T.reshape(T.dot(
            _linspace(-1.0, 1.0, height).dimshuffle(0, 'x'),
            T.ones((1, width))), (height, width, 1)),
        T.ones((1, 1, depth))
    )
    y_t = T.dot(
        T.reshape(T.dot(
            T.ones((height, 1)),
            _linspace(-1.0, 1.0, width).dimshuffle('x', 0)), (height, width, 1)),
        T.ones((1, 1, depth))
    )
    z_t = T.dot(T.ones((height, width, 1)), T.reshape(_linspace(-1.0, 1.0, depth), (1, 1, -1)))

    x_t_flat = x_t.reshape((1, -1))
    y_t_flat = y_t.reshape((1, -1))
    z_t_flat = z_t.reshape((1, -1))
    ones = T.ones_like(x_t_flat)
    grid = T.concatenate([x_t_flat, y_t_flat, z_t_flat, ones], axis=0)
    return grid
开发者ID:marianocabezas,项目名称:cnn,代码行数:31,代码来源:layers.py


示例7: _initial_part_matrix

 def _initial_part_matrix(self, part, size, deterministic):
     if size is None:
         size = 1
     length, dist_name, dist_map = self._choose_alternative(
         part,
         (self.local_size, self.initial_dist_local_name, self.initial_dist_local_map),
         (self.global_size, self.initial_dist_global_name, self.initial_dist_global_map)
     )
     dtype = self.symbolic_initial_global_matrix.dtype
     if length == 0:  # in this case theano fails to compute sample of correct size
         return tt.ones((size, 0), dtype)
     length = tt.as_tensor(length)
     size = tt.as_tensor(size)
     shape = tt.stack((size, length))
     # apply optimizations if possible
     if not isinstance(deterministic, tt.Variable):
         if deterministic:
             return tt.ones(shape, dtype) * dist_map
         else:
             return getattr(self._rng, dist_name)(shape)
     else:
         sample = getattr(self._rng, dist_name)(shape)
         initial = tt.switch(
             deterministic,
             tt.ones(shape, dtype) * dist_map,
             sample
         )
         return initial
开发者ID:aasensio,项目名称:pymc3,代码行数:28,代码来源:opvi.py


示例8: instantiate

    def instantiate(self, shape=None):
        # Parse shape
        shape = [None, ] * self.ndim if shape is None else shape
        initshape = tuple([shape[n] if givenshape is None else givenshape for n, givenshape in enumerate(self.shape)])
        assert all([ishp is not None for ishp in initshape]), "Given shape information not sufficient to instantiate " \
                                                              "from ghost state."

        # Initialize. If shape is a tensor variable, initialize a tensor variable and return.
        if isinstance(shape, T.vector().__class__) or not self.shared:
            # Make variable
            var = T.zeros(shape=initshape, dtype='floatX') \
                if self.value == 0. else self.value(initshape) * T.ones(shape=initshape, dtype='floatX') \
                if callable(self.value) else self.value * T.ones(shape=initshape, dtype='floatX')
            # Safety cast
            var = T.cast(var, dtype='floatX')
            var.name = self.name
            # Warn if a shared variable is requested
            if self.shared:
                warn("Provided shape variable is a theano tensor variable, it cannot be used to initialize a shared "
                     "variable.")
            # Return
            return var
        else:
            # Make variable
            var = th.shared((getattr(np, th.config.floatX)(self.value)
                             if not callable(self.value) and not np.isscalar(self.value) else
                             getattr(np, th.config.floatX)(self.value(initshape)) if callable(self.value) else
                             self.value * np.ones(shape=initshape, dtype=th.config.floatX)))

            var.name = self.name
            # Safety cast and return
            return var
开发者ID:abailoni,项目名称:greedy_CNN,代码行数:32,代码来源:netutils.py


示例9: sample_h_given_v_2wise

def sample_h_given_v_2wise(v, W, Wh, bh, nh):
	phi = T.dot(v, W) + bh
	ephi = T.exp(phi)

	adder = np.zeros((nh/2, nh), dtype=theano.config.floatX)
	for i in xrange(len(adder)):
		adder[i, 2*i] = 1
		adder[i, 2*i+1] = 1
	adder = theano.shared(adder)
	# wobble =  1 + exp(phi_2i) + exp(phi_{2i+1}) + exp(phi_2i + phi_{21+1} + Wh_i)
	# p(h_2i = 1 | v) = (exp(phi_2i) + exp(phi_2i + phi_{21+1} + Wh_i ) / wobble
	# p(h_{2i+1} = 1 | v) = (exp(phi_2i) + exp(phi_2i + phi_{2i+1} + Wh_i )) / wobble
	# the second term is the same in both - the pair term.  but it must be broadcasted (the kron!)
	# dotting by adder returns a vector of half the size of sums of pairs of elements

	pairsum = T.dot(ephi, adder.T)
	first = ephi.T[T.arange(0, nh, 2)].T
	pairprod = pairsum*first - first**2
	pairterm = pairprod*T.exp(Wh)

	wobble = 1 + pairsum + pairterm

	pairterm_broadcast = kron(pairterm.dimshuffle(0, 'x'), T.ones(2))
	wobble_broadcast = kron(wobble.dimshuffle(0, 'x'), T.ones(2))

	prop_up = (ephi + pairterm_broadcast) / wobble_broadcast

	h = theano_rng.binomial(n=1, p = prop_up, dtype=theano.config.floatX, size=(nh,), ndim=1)

	return h
开发者ID:ebuchman,项目名称:minimum_probability_flow,代码行数:30,代码来源:rbm.py


示例10: pos_phase_updates

    def pos_phase_updates(self, v, init_state=None, n_steps=1):
        """
        Implements the positive phase sampling, which performs blocks Gibbs
        sampling in order to sample from p(g,h,x,y|v).
        :param v: fixed training set
        :param init: dictionary of initial values, or None if sampling from scratch
        :param n_steps: scalar, number of Gibbs steps to perform.
        :param restart: if False, start sampling from buffers self.pos_*
        """
        if init_state is None:
            assert n_steps
            # start sampler from scratch
            init_state = OrderedDict()
            init_state['g'] = T.ones((v.shape[0], self.n_g)) * T.nnet.sigmoid(self.gbias)
            init_state['h'] = T.ones((v.shape[0], self.n_h)) * T.nnet.sigmoid(self.hbias)

        [new_g, new_h, new_s1, new_s0, crap_v, pos_counter] = self.pos_phase(
                v, init_state=init_state, n_steps=n_steps)

        # update running average of positive phase activations
        pos_updates = OrderedDict()
        pos_updates[self.pos_counter] = pos_counter
        pos_updates[self.odd_even] = (self.odd_even + 1) % 2
        pos_updates[self.pos_g] = new_g
        pos_updates[self.pos_h] = new_h
        pos_updates[self.pos_s1] = new_s1
        pos_updates[self.pos_s0] = new_s0
        pos_updates[self.pos_s]  = self.s_hat(new_h, new_s1, new_s0)
        if self.flags['pos_phase_ch']:
            pos_updates[self.ch] = T.cast(0.999 * self.ch + 0.001 * new_h.mean(axis=0), floatX)
        return pos_updates
开发者ID:gdesjardins,项目名称:hossrbm,代码行数:31,代码来源:implicit_hossrbm_v05_2.py


示例11: pos_phase_updates

    def pos_phase_updates(self, v, l=None, init_state=None, n_steps=1, mean_field=False):
        """
        Implements the positive phase sampling, which performs blocks Gibbs
        sampling in order to sample from p(g,h,x,y|v).
        :param v: fixed training set
        :param l: l is None means we sample l, l not None means we clamp l.
        :param init: dictionary of initial values, or None if sampling from scratch
        :param n_steps: scalar, number of Gibbs steps to perform.
        :param restart: if False, start sampling from buffers self.pos_*
        """
        if init_state is None:
            assert n_steps
            # start sampler from scratch
            init_state = OrderedDict()
            init_state['g'] = T.ones((self.batch_size,self.n_g)) * T.nnet.sigmoid(self.gbias)
            init_state['h'] = T.ones((self.batch_size,self.n_h)) * T.nnet.sigmoid(self.hbias)
            init_state['l'] = T.ones((self.batch_size,self.n_l)) * T.nnet.softmax(self.lbias)

        outputs = self.pos_phase(v, l=l,
                init_state=init_state,
                n_steps=n_steps,
                mean_field=mean_field)

        pos_states = OrderedDict()
        pos_states['g'] = outputs[0]
        pos_states['h'] = outputs[1]
        pos_states['l'] = outputs[2] if l is None else self.input_labels

        # update running average of positive phase activations
        pos_updates = OrderedDict()
        pos_updates[self.pos_counter] = outputs[-1]
        pos_updates[self.odd_even] = (self.odd_even + 1) % 2
        return pos_states, pos_updates
开发者ID:gdesjardins,项目名称:hossrbm,代码行数:33,代码来源:bin_hossrbm_labels.py


示例12: get_output

    def get_output(self, train=False):
        X = self.get_input(train=train)
        c0 = self.c0[None,:] * T.ones((X.shape[0], self.context_dim))
        cn = self.cn[None,:] * T.ones((X.shape[0], self.context_dim))
        X = T.concatenate(
            [
                T.shape_padleft(self.e0,2) * T.ones((X.shape[0], 1, X.shape[2])),
                X,
                T.shape_padleft(self.en,2) * T.ones((X.shape[0], 1, X.shape[2])),
            ],
            axis = 1
        )
        X = X.dimshuffle(1,0,2) # timestep 置于第一纬
        # 只有将int32 mask 强制转换为 float32 才不会在scan里面将mask_t[:, None] * cl_t 结果upcast成float64
        mask = T.cast(self.get_output_mask(train=train), T.config.floatX)
        mask = mask.dimshuffle(1,0) # timestep 置于第一纬
        #theano.printing.debugprint([mask], print_type=True)
        def _forward_step(e_t, e_tm1, mask_t, cl_tm1):
            #print 'e_t:', e_t.type.ndim
            #print 'cl_t:', cl_tm1.type.ndim
            cl_t = T.nnet.sigmoid(
                T.dot(cl_tm1, self.Wl) + T.dot(e_tm1, self.Wsl)
            )
            cl_t = mask_t[:, None] * cl_t + (1. - mask_t[:, None]) * cl_tm1 # 如果它被mask就直接继承那个词
            #theano.printing.debugprint([mask_t], print_type=True)
            #theano.printing.debugprint([cl_t], print_type=True)
            return cl_t
        def _backward_step(e_t, e_tp1, mask_t, cr_tp1):
            cr_t = T.nnet.sigmoid(
            T.dot(cr_tp1, self.Wr) + T.dot(e_tp1, self.Wsr))
            cr_t = mask_t[:, None] * cr_t + (1. - mask_t[:, None]) * cr_tp1 # 如果它被mask就直接继承那个词
            return cr_t
        Cl, _ = theano.scan(_forward_step,
                        sequences=[dict(input=X, taps=[0, -1]), mask],
                        outputs_info=[
                            dict(initial=c0, taps=[-1]) # 注意不是c0!!!
                        ],

        )
        Cr, _ = theano.scan(_backward_step,
                            sequences=[dict(input=X, taps=[0, -1]), mask],
                            outputs_info=[
                                dict(initial=cn, taps=[-1])
                            ],
                            go_backwards=True,
        )
        Cr = Cr[::-1] # 翻转Cr
        def _concatenate_activation_step(e_t, mask_t, cl_t, cr_t):
            #print theano.printing.debugprint(cr_t, print_type=True)
            h_t = T.tanh( T.dot(T.concatenate([e_t, cl_t, cr_t], axis=1), self.W2)
                       + self.b2)
            h_t = mask_t[:, None] * h_t + (1. - mask_t[:, None]) * (-10000000000.) # 将mask的地方设置为最小值
            return h_t

        Y, _ = theano.scan(_concatenate_activation_step,
                    sequences=[X, mask, Cl, Cr],
                    outputs_info=None,
        )
        return Y.dimshuffle(1,0,2) # 重置样本为第一维
开发者ID:psy2013GitHub,项目名称:theano_prototype,代码行数:59,代码来源:BiContextLayer.py


示例13: apply_log_domain

    def apply_log_domain(self, l, probs, l_len=None, probs_mask=None):
        # Does the same computation as apply, but alpha is in the log domain
        # This avoids numerical underflow issues that were not corrected in the previous version.

        def _log(a):
            return tensor.log(tensor.clip(a, 1e-12, 1e12))

        def _log_add(a, b):
            maximum = tensor.maximum(a, b)
            return (maximum + tensor.log1p(tensor.exp(a + b - 2 * maximum)))

        def _log_mul(a, b):
            return a + b

        # See comments above
        B = probs.shape[1]
        C = probs.shape[2]-1
        L = l.shape[0]
        S = 2*L+1
        
        l_blk = C * tensor.ones((S, B), dtype='int32')
        l_blk = tensor.set_subtensor(l_blk[1::2,:], l)
        l_blk = l_blk.T     # now l_blk is B x S

        alpha0 = tensor.concatenate([   tensor.ones((B, 1)),
                                        tensor.zeros((B, S-1))
                                    ], axis=1)
        alpha0 = _log(alpha0)

        l_blk_2 = tensor.concatenate([-tensor.ones((B,2)), l_blk[:,:-2]], axis=1)
        l_case2 = tensor.neq(l_blk, C) * tensor.neq(l_blk, l_blk_2)

        def recursion(p, p_mask, prev_alpha):
            prev_alpha_1 = tensor.concatenate([tensor.zeros((B,1)),prev_alpha[:,:-1]], axis=1)
            prev_alpha_2 = tensor.concatenate([tensor.zeros((B,2)),prev_alpha[:,:-2]], axis=1)

            alpha_bar1 = tensor.set_subtensor(prev_alpha[:,1:], _log_add(prev_alpha[:,1:],prev_alpha[:,:-1]))
            alpha_bar2 = tensor.set_subtensor(alpha_bar1[:,2:], _log_add(alpha_bar1[:,2:],prev_alpha[:,:-2]))

            alpha_bar = tensor.switch(l_case2, alpha_bar2, alpha_bar1)

            probs = _log(p[tensor.arange(B)[:,None].repeat(S,axis=1).flatten(), l_blk.flatten()].reshape((B,S)))
            next_alpha = _log_mul(alpha_bar, probs)
            next_alpha = tensor.switch(p_mask[:,None], next_alpha, prev_alpha)
            
            return next_alpha

        alpha, _ = scan(fn=recursion,
                             sequences=[probs, probs_mask],
                             outputs_info=[alpha0])

        last_alpha = alpha[-1]
        # last_alpha = theano.printing.Print('a-1')(last_alpha)

        prob = _log_add(last_alpha[tensor.arange(B), 2*l_len.astype('int32')-1],
                        last_alpha[tensor.arange(B), 2*l_len.astype('int32')])

        # return the negative log probability of the labellings
        return -prob
开发者ID:CityU-HAN,项目名称:CTC-LSTM,代码行数:59,代码来源:ctc.py


示例14: result

    def result(theano, TT):
        def fn(s1, s2):
            return s1 + s2

        outputs, _ = theano.scan(
            fn,
            sequences=[TT.ones(10), 2 * TT.ones(10)])
        return theano.function([], outputs)()
开发者ID:MLDL,项目名称:tensorfuse,代码行数:8,代码来源:test_scan.py


示例15: scanr

 def scanr(self, x, y0=None, c0=None, mask=None, **kwargs):
     if y0 is None:
         #y0 = self.cact(self.y0)
         y0 = th.ones((x.shape[1],1))*self.y0
     if c0 is None:
         c0 = th.ones((x.shape[1],1))*self.c0
     return scanr(self.ws, y0, c0, x, mask=mask, iact=self.iact, fact=self.fact, oact=self.oact
                  , gact=self.gact, cact=self.cact, **kwargs)
开发者ID:tbepler,项目名称:rnn,代码行数:8,代码来源:lstm.py


示例16: gen_img

def gen_img(shape_params, rotation_matrix, width, height, nsteps, res):
    raster_space = gen_fragcoords(width, height)
    rd, ro = make_ro(rotation_matrix, raster_space, width, height)
    a = 0 - ro # c = 0
    b = 1 - ro # c = 1
    nmatrices = rotation_matrix.shape[0]
    tn = T.reshape(a, (nmatrices, 1, 1, 3))/rd
    tf = T.reshape(b, (nmatrices, 1, 1, 3))/rd
    tn_true = T.minimum(tn,tf)
    tf_true = T.maximum(tn,tf)
    # do X
    tn_x = tn_true[:,:,:,0]
    tf_x = tf_true[:,:,:,0]
    tmin = 0.0
    tmax = 10.0
    t0 = tmin
    t1 = tmax
    t02 = T.switch(tn_x > t0, tn_x, t0)
    t12 = T.switch(tf_x < t1, tf_x, t1)
    # y
    tn_x = tn_true[:,:,:,1]
    tf_x = tf_true[:,:,:,1]
    t03 = T.switch(tn_x > t02, tn_x, t02)
    t13 = T.switch(tf_x < t12, tf_x, t12)
    #z
    tn_x = tn_true[:,:,:,2]
    tf_x = tf_true[:,:,:,2]
    t04 = T.switch(tn_x > t03, tn_x, t03)
    t14 = T.switch(tf_x < t13, tf_x, t13)

    # Shift a little bit to avoid numerial inaccuracies
    t04 = t04*1.001
    t14 = t14*0.999

    nvoxgrids = shape_params.shape[0]
    left_over = T.ones((nvoxgrids, nmatrices * width * height,))
    step_size = (t14 - t04)/nsteps
    orig = T.reshape(ro, (nmatrices, 1, 1, 3)) + rd * T.reshape(t04,(nmatrices, width, height, 1))
    xres = yres = zres = res

    orig = T.reshape(orig, (nmatrices * width * height, 3))
    rd = T.reshape(rd, (nmatrices * width * height, 3))
    step_sz = T.reshape(step_size, (nmatrices * width * height,1))

    for i in range(nsteps):
        # print "step", i
        pos = orig + rd*step_sz*i
        voxel_indices = T.floor(pos*res)
        pruned = T.clip(voxel_indices,0,res-1)
        p_int =  T.cast(pruned, 'int32')
        indices = T.reshape(p_int, (nmatrices*width*height,3))
        attenuation = shape_params[:, indices[:,0],indices[:,1],indices[:,2]]
        left_over = left_over*T.exp(-attenuation*T.flatten(step_sz))

    img = left_over
    pixels = T.reshape(img, (nvoxgrids, nmatrices, width, height))
    mask = t14>t04
    return T.switch(t14>t04, pixels, T.ones_like(pixels)), rd, ro, tn_x, T.ones((nvoxgrids, nmatrices * width * height,)), orig, shape_params
开发者ID:zenna,项目名称:ig,代码行数:58,代码来源:cold2.py


示例17: f1_score

 def f1_score(self, y):
     n_total = y.shape[0]
     n_relevant_documents_predicted = T.sum(T.eq(T.ones(self.y_pred.shape), self.y_pred))
     two_vector = T.add(T.ones(self.y_pred.shape), T.ones(self.y_pred.shape))
     n_relevant_predicted_correctly = T.sum(T.eq(T.add(self.y_pred, y), two_vector))
     precision = T.true_div(n_relevant_predicted_correctly, n_relevant_documents_predicted)
     recall = T.true_div(n_relevant_predicted_correctly, n_total)
     f1_score =  T.mul(2.0, T.true_div(T.mul(precision, recall), T.add(precision, recall)))
     return [f1_score, precision, recall]
开发者ID:ericrincon,项目名称:Deep-Learning-NLP,代码行数:9,代码来源:LogisticRegression.py


示例18: backward

 def backward(self, y):
     Km1 = y.shape[0]
     k = tt.arange(Km1)[(slice(None),) + (None,) * (y.ndim - 1)]
     eq_share = -tt.log(Km1 - k)  # logit(1./(Km1 + 1 - k))
     z = inverse_logit(y + eq_share)
     yl = tt.concatenate([z, tt.ones(y[:1].shape)])
     yu = tt.concatenate([tt.ones(y[:1].shape), 1 - z])
     S = tt.extra_ops.cumprod(yu, 0)
     x = S * yl
     return x
开发者ID:Riashat,项目名称:pymc3,代码行数:10,代码来源:transforms.py


示例19: new_attention_step

 def new_attention_step(self, ct, prev_g, mem, q_q):
     cWq = T.dot(T.ones((1, self.batch_size), dtype=floatX), T.dot(T.dot(ct.T, self.W_b), q_q) * T.eye(n=self.batch_size, m=self.batch_size, dtype=floatX))
     cWm = T.dot(T.ones((1, self.batch_size), dtype=floatX), T.dot(T.dot(ct.T, self.W_b), mem) * T.eye(n=self.batch_size, m=self.batch_size, dtype=floatX))
     z = T.concatenate([ct, mem, q_q, ct * q_q, ct * mem, T.abs_(ct - q_q), T.abs_(ct - mem), cWq, cWm], axis=0)
     
     l_1 = T.dot(self.W_1, z) + self.b_1.dimshuffle(0, 'x')
     l_1 = T.tanh(l_1)
     l_2 = T.dot(self.W_2, l_1) + self.b_2.dimshuffle(0, 'x')
     G = T.nnet.sigmoid(l_2)[0]
     return G
开发者ID:vazgenh,项目名称:Dynamic-memory-networks-in-Theano,代码行数:10,代码来源:dmn_batch.py


示例20: gradient

 def gradient(self, observed, at_risk):
     prediction = self.output
     risk = T.exp(prediction)
     product = self.input * (risk * T.ones((1, self.input.shape[0])))
     numerator = Te.cumsum(product[::-1])[::-1][at_risk]
     denominator = Te.cumsum(risk[::-1])[::-1][at_risk] * T.ones((1, self.input.shape[0]))
     numerator = numerator.flatten()
     denominator = denominator.flatten()
     gradient = T.dot(observed, self.input - (numerator / denominator))
     return gradient
开发者ID:fatemeh91,项目名称:SurvivalNet,代码行数:10,代码来源:RiskLayer.py



注:本文中的theano.tensor.ones函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.ones_like函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.nonzero函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap