• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.zeros_like函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.zeros_like函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_like函数的具体用法?Python zeros_like怎么用?Python zeros_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了zeros_like函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: lstm

def lstm(mask, state_in, t_params, n_dim_in, n_dim_out, prefix, one_step=False, init_h=None):
    '''
    Long Short-Term Memory (LSTM) layer
    '''
    def _step(_mask, _state_in, _prev_h, _prev_c):
        _pre_act = tensor.dot(_prev_h, t_params[_concat(prefix, 'U')]) + _state_in

        _gate_i = tensor.nnet.sigmoid(_slice(_pre_act, 0, n_dim_out))
        _gate_f = tensor.nnet.sigmoid(_slice(_pre_act, 1, n_dim_out))
        _gate_o = tensor.nnet.sigmoid(_slice(_pre_act, 2, n_dim_out))

        _next_c = _gate_f * _prev_c + _gate_i * tensor.tanh(_slice(_pre_act, 3, n_dim_out))
        _next_c = _mask[:, None] * _next_c + (1. - _mask)[:, None] * _prev_c
        _next_h = _gate_o * tensor.tanh(_next_c)
        _next_h = _mask[:, None] * _next_h + (1. - _mask)[:, None] * _prev_h

        return _next_h, _next_c

    params = OrderedDict()
    params[_concat(prefix, 'W')] = numpy.concatenate([ortho_weight(n_dim_in, n_dim_out), ortho_weight(n_dim_in, n_dim_out), ortho_weight(n_dim_in, n_dim_out), ortho_weight(n_dim_in, n_dim_out)], 1)
    params[_concat(prefix, 'U')] = numpy.concatenate([ortho_weight(n_dim_out, n_dim_out), ortho_weight(n_dim_out, n_dim_out), ortho_weight(n_dim_out, n_dim_out), ortho_weight(n_dim_out, n_dim_out)], 1)
    params[_concat(prefix, 'b')] = numpy.zeros((4 * n_dim_out,), config.floatX)
    init_t_params(params, t_params)

    state_in = (tensor.dot(state_in, t_params[_concat(prefix, 'W')]) + t_params[_concat(prefix, 'b')])
    if init_h is None:
        init_h = tensor.alloc(to_floatX(0.), state_in.shape[-2], n_dim_out)
    if one_step:
        state_out, _ = _step(mask, state_in, init_h, tensor.zeros_like(init_h))
        return state_out
    else:
        [state_out, _], _ = theano.scan(_step, [mask, state_in], [init_h, tensor.zeros_like(init_h)])
        return state_out
开发者ID:Mourzoufle,项目名称:seq-to-seq-rnn,代码行数:33,代码来源:layers.py


示例2: reconstruct

 def reconstruct(self, x, n_samples) :
     mu, log_sigma = self.encoder(x)
     if n_samples <= 0 :
         y = self.decoder(mu)
     else :
         #sample from posterior
         if self.continuous :
             #hack to find out size of variables
             (y_mu, y_log_sigma) = self.decoder(mu)
             (y_mu, y_log_sigma) = (T.zeros_like(y_mu), T.zeros_like(y_log_sigma))
         else :
             y = T.zeros(x.shape)
         for i in range(n_samples) :
             z = reparam_trick(mu, log_sigma, self.srng)
             if self.continuous :
                 (new_y_mu, new_y_log_sigma) = self.decoder(z)
                 y_mu = y_mu + new_y_mu
                 y_log_sigma = y_log_sigma + new_y_log_sigma
             else :
                 y = y + self.decoder(z)
         if self.continuous :
             y_mu = y_mu / n_samples
             y_log_sigma = y_log_sigma / n_samples
             y = (y_mu, y_log_sigma)
         else :
             y = (y / n_samples)
     if self.continuous :
         (y_mu, y_log_sigma) = y
         I = T.eye(y_mu.shape[0])
         cov = (T.pow(T.exp(y_log_sigma), 2)) * I
         y = np.random.multivariate_normal(y_mu.eval(), cov.eval())
     else :
         y = y.eval()
     return y
开发者ID:budzianowski,项目名称:VAEB,代码行数:34,代码来源:VAEB.py


示例3: compute_cost_log_in_parallel

def compute_cost_log_in_parallel(original_rnn_outputs, labels, func, x_ends, y_ends):
	mask = T.log(1 - T.or_(T.eq(labels, T.zeros_like(labels)), T.eq(labels, shift_matrix(labels, 2))))

	initial_state = T.log(T.zeros_like(labels))
	initial_state = T.set_subtensor(initial_state[:,0], 0)

	def select_probabilities(rnn_outputs, label):
		return rnn_outputs[:,label]	

	rnn_outputs, _ = theano.map(select_probabilities, [original_rnn_outputs, labels])
	rnn_outputs = T.log(rnn_outputs.dimshuffle((1,0,2)))

	def forward_step(probabilities, last_probabilities):
		all_forward_probabilities = T.stack(
			last_probabilities + probabilities,
			log_shift_matrix(last_probabilities, 1) + probabilities,
			log_shift_matrix(last_probabilities, 2) + probabilities + mask,
		)

		result = func(all_forward_probabilities, 0)
		return result

	forward_probabilities, _ = theano.scan(fn = forward_step, sequences = rnn_outputs, outputs_info = initial_state)
	forward_probabilities = forward_probabilities.dimshuffle((1,0,2))

	def compute_cost(forward_probabilities, x_end, y_end):
		return -func(forward_probabilities[x_end-1,y_end-2:y_end])

	return theano.map(compute_cost, [forward_probabilities, x_ends, y_ends])[0]
开发者ID:choko,项目名称:ctc,代码行数:29,代码来源:ctc.py


示例4: generic_compute_Lx_batches

def generic_compute_Lx_batches(samples, weights, biases, bs, cbs):
    tsamples = [x.reshape((bs//cbs, cbs, x.shape[1])) for x in samples]
    final_ws = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
                for x in weights]
    final_bs = [T.unbroadcast(T.shape_padleft(T.zeros_like(x)),0)
                for x in biases]
    n_samples = len(samples)
    n_weights = len(weights)
    n_biases = len(biases)
    def comp_step(*args):
        lsamples = args[:n_samples]
        terms1 = generic_compute_Lx_term1(lsamples, weights, biases)
        rval = []
        for (term1, acc) in zip(terms1, args[n_samples:]):
            rval += [acc + term1]
        return rval

    rvals,_ = theano.sandbox.scan.scan(
        comp_step,
        sequences=tsamples,
        states=final_ws + final_bs,
        n_steps=bs // cbs,
        profile=0,
        mode=theano.Mode(linker='cvm_nogc'),
        flags=['no_optimization'] )
    accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
    accs2 = generic_compute_Lx_term2(samples,weights,biases)
    return [x - y for x, y in zip(accs1, accs2)]
开发者ID:gdesjardins,项目名称:DBM,代码行数:28,代码来源:natural.py


示例5: get_aggregator

    def get_aggregator(self):
        initialized = shared_like(0.)
        numerator_acc = shared_like(self.numerator)
        denominator_acc = shared_like(self.denominator)

        conditional_update_num = ifelse(initialized,
                                        self.numerator + numerator_acc,
                                        self.numerator)
        conditional_update_den = ifelse(initialized,
                                        self.denominator + denominator_acc,
                                        self.denominator)

        initialization_updates = [(numerator_acc,
                                   tensor.zeros_like(numerator_acc)),
                                  (denominator_acc,
                                   tensor.zeros_like(denominator_acc)),
                                  (initialized, 0.)]
        accumulation_updates = [(numerator_acc,
                                 conditional_update_num),
                                (denominator_acc,
                                 conditional_update_den),
                                (initialized, 1.)]
        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(numerator_acc /
                                                  denominator_acc))
        return aggregator
开发者ID:Fdenpc,项目名称:blocks,代码行数:28,代码来源:aggregation.py


示例6: compute_Lx_batches

def compute_Lx_batches(v, g, h, xw_mat, xv_mat, xa, xb, xc, bs, cbs):
    xw = xw_mat.flatten()
    xv = xv_mat.flatten()
    tv = v.reshape((bs // cbs, cbs, v.shape[1]))
    tg = g.reshape((bs // cbs, cbs, g.shape[1]))
    th = h.reshape((bs // cbs, cbs, h.shape[1]))

    final_w1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xw_mat)),0)
    final_v1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xv_mat)),0)
    final_a1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xa)),0)
    final_b1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xb)),0)
    final_c1 = T.unbroadcast(T.shape_padleft(T.zeros_like(xc)),0)
    def comp_step(lv, lg, lh,
                  acc_w1, acc_v1, acc_a1, acc_b1, acc_c1):
        terms1 = compute_Lx_term1(lv, lg, lh, xw, xv, xa, xb, xc)
        accs1 = [acc_w1, acc_v1, acc_a1, acc_b1, acc_c1]
        rval = []

        for (term1, acc) in zip(terms1,accs1):
            rval += [acc + term1]
        return rval
    rvals,_ = theano.sandbox.scan.scan(
        comp_step,
        sequences=[tv,tg,th],
        states=[
            final_w1, final_v1, final_a1, final_b1, final_c1],
        n_steps=bs // cbs,
        profile=0,
        mode=theano.Mode(linker='cvm_nogc'),
        flags=['no_optimization'] )
    accs1 = [x[0]/numpy.float32(bs//cbs) for x in rvals]
    accs2 = compute_Lx_term2(v,g,h,xw,xv,xa,xb,xc)
    return [x - y for x, y in zip(accs1, accs2)]
开发者ID:gdesjardins,项目名称:DBM,代码行数:33,代码来源:natural.py


示例7: get_aggregator

    def get_aggregator(self):
        initialized = shared_like(0.)
        total_acc = shared_like(self.variable)

        total_zeros = tensor.as_tensor(self.variable).zeros_like()

        conditional_update_num = self.variable + ifelse(initialized,
                                                         total_acc,
                                                         total_zeros)

        initialization_updates = [(total_acc,
                                   tensor.zeros_like(total_acc)),
                                  (initialized,
                                   tensor.zeros_like(initialized))]

        accumulation_updates = [(total_acc,
                                 conditional_update_num),
                                (initialized, tensor.ones_like(initialized))]

        aggregator = Aggregator(aggregation_scheme=self,
                                initialization_updates=initialization_updates,
                                accumulation_updates=accumulation_updates,
                                readout_variable=(total_acc))

        return aggregator
开发者ID:davidbau,项目名称:net-intent,代码行数:25,代码来源:ablation.py


示例8: grad

    def grad(self, inputs, out_grads):
        batch_mean, rolling_mean, rolling_grad, alpha = inputs
        out_grad, = out_grads

        if self.update_averages:
            assert treeano.utils.is_shared_variable(rolling_mean)
            assert treeano.utils.is_shared_variable(rolling_grad)
            # HACK this is super hacky and won't work for certain
            # computation graphs
            # TODO make assertion again
            if (hasattr(rolling_mean, "default_update") or
                    hasattr(rolling_grad, "default_update")):
                warnings.warn("rolling mean/grad already has updates - "
                              "overwritting. this can be caused by calculating "
                              "the gradient of backprop to the future mean "
                              "multiple times")

            rolling_mean.default_update = (alpha * rolling_mean +
                                           (1 - alpha) * batch_mean)
            rolling_grad.default_update = (alpha * rolling_grad +
                                           (1 - alpha) * out_grad)
        else:
            # HACK remove default_update
            if hasattr(rolling_mean, "default_update"):
                delattr(rolling_mean, "default_update")
            if hasattr(rolling_grad, "default_update"):
                delattr(rolling_grad, "default_update")

        return [rolling_grad,
                T.zeros_like(rolling_mean),
                T.zeros_like(rolling_grad),
                T.zeros_like(alpha)]
开发者ID:diogo149,项目名称:treeano,代码行数:32,代码来源:bttf_mean.py


示例9: _construct_compute_fe_terms

 def _construct_compute_fe_terms(self):
     """
     Construct theano function to compute the log-likelihood and posterior
     KL-divergence terms for the variational free-energy.
     """
     # setup some symbolic variables for theano to deal with
     Xd = T.matrix()
     Xc = T.zeros_like(Xd)
     Xm = T.zeros_like(Xd)
     # construct values to output
     if self.x_type == 'bernoulli':
         ll_term = log_prob_bernoulli(self.x, self.xg)
     else:
         ll_term = log_prob_gaussian2(self.x, self.xg, \
                 log_vars=self.bounded_logvar)
     all_klds = gaussian_kld(self.q_z_given_x.output_mean, \
             self.q_z_given_x.output_logvar, \
             self.prior_mean, self.prior_logvar)
     kld_term = T.sum(all_klds, axis=1)
     # compile theano function for a one-sample free-energy estimate
     fe_term_sample = theano.function(inputs=[Xd], \
             outputs=[ll_term, kld_term], \
             givens={self.Xd: Xd, self.Xc: Xc, self.Xm: Xm})
     # construct a wrapper function for multi-sample free-energy estimate
     def fe_term_estimator(X, sample_count):
         ll_sum = np.zeros((X.shape[0],))
         kld_sum = np.zeros((X.shape[0],))
         for i in range(sample_count):
             result = fe_term_sample(X)
             ll_sum = ll_sum + result[0].ravel()
             kld_sum = kld_sum + result[1].ravel()
         mean_nll = -ll_sum / float(sample_count)
         mean_kld = kld_sum / float(sample_count)
         return [mean_nll, mean_kld]
     return fe_term_estimator
开发者ID:Philip-Bachman,项目名称:ICML-2015,代码行数:35,代码来源:OneStageModel.py


示例10: get_celerite_matrices

    def get_celerite_matrices(self, x, diag):
        x = tt.as_tensor_variable(x)
        diag = tt.as_tensor_variable(diag)
        ar, cr, ac, bc, cc, dc = self.coefficients
        a = diag + tt.sum(ar) + tt.sum(ac)
        U = tt.concatenate((
            ar[None, :] + tt.zeros_like(x)[:, None],
            ac[None, :] * tt.cos(dc[None, :] * x[:, None])
            + bc[None, :] * tt.sin(dc[None, :] * x[:, None]),
            ac[None, :] * tt.sin(dc[None, :] * x[:, None])
            - bc[None, :] * tt.cos(dc[None, :] * x[:, None]),
        ), axis=1)

        V = tt.concatenate((
            tt.zeros_like(ar)[None, :] + tt.ones_like(x)[:, None],
            tt.cos(dc[None, :] * x[:, None]),
            tt.sin(dc[None, :] * x[:, None]),
        ), axis=1)

        dx = x[1:] - x[:-1]
        P = tt.concatenate((
            tt.exp(-cr[None, :] * dx[:, None]),
            tt.exp(-cc[None, :] * dx[:, None]),
            tt.exp(-cc[None, :] * dx[:, None]),
        ), axis=1)

        return a, U, V, P
开发者ID:dfm,项目名称:exoplanet,代码行数:27,代码来源:terms.py


示例11: lstm_layer

def lstm_layer(hidden_inpt, hidden_to_hidden,
               ingate_peephole, outgate_peephole, forgetgate_peephole,
               f):
    n_hidden_out = hidden_to_hidden.shape[0]

    def lstm_step(x_t, s_tm1, h_tm1):
        x_t += T.dot(h_tm1, hidden_to_hidden)

        inpt = T.tanh(x_t[:, :n_hidden_out])
        gates = x_t[:, n_hidden_out:]
        inpeep = s_tm1 * ingate_peephole
        outpeep = s_tm1 * outgate_peephole
        forgetpeep = s_tm1 * forgetgate_peephole

        ingate = f(gates[:, :n_hidden_out] + inpeep)
        forgetgate = f(
            gates[:, n_hidden_out:2 * n_hidden_out] + forgetpeep)
        outgate = f(gates[:, 2 * n_hidden_out:] + outpeep)

        s_t = inpt * ingate + s_tm1 * forgetgate
        h_t = f(s_t) * outgate
        return [s_t, h_t]

    (states, hidden_rec), _ = theano.scan(
        lstm_step,
        sequences=hidden_inpt,
        outputs_info=[T.zeros_like(hidden_inpt[0, :, 0:n_hidden_out]),
                      T.zeros_like(hidden_inpt[0, :, 0:n_hidden_out])
                      ])

    return states, hidden_rec
开发者ID:ddofer,项目名称:breze,代码行数:31,代码来源:rnn.py


示例12: mf

    def mf(self, V, Y = None, return_history = False, niter = None, block_grad = None):

        drop_mask = T.zeros_like(V)

        if Y is not None:
            drop_mask_Y = T.zeros_like(Y)
        else:
            batch_size = V.shape[0]
            num_classes = self.dbm.hidden_layers[-1].n_classes
            assert isinstance(num_classes, int)
            Y = T.alloc(1., V.shape[0], num_classes)
            drop_mask_Y = T.alloc(1., V.shape[0])

        history = self.do_inpainting(X=V,
            Y=Y,
            return_history=True,
            drop_mask=drop_mask,
            drop_mask_Y=drop_mask_Y,
            noise=False,
            niter=niter,
            block_grad=block_grad)

        if return_history:
            return [elem['H_hat'] for elem in history]

        return history[-1]['H_hat']
开发者ID:cc13ny,项目名称:galatea,代码行数:26,代码来源:ensemble.py


示例13: rnade_sym

 def rnade_sym(self,x,W,V_alpha,b_alpha,V_mu,b_mu,V_sigma,b_sigma,activation_rescaling):
     """ x is a matrix of column datapoints (VxB) V = n_visible, B = batch size """
     def density_given_previous_a_and_x(x, w, V_alpha, b_alpha, V_mu, b_mu, V_sigma, b_sigma,activation_factor, p_prev, a_prev, x_prev,):
         a = a_prev + T.dot(T.shape_padright(x_prev, 1), T.shape_padleft(w, 1))
         h = self.nonlinearity(a * activation_factor)  # BxH
         #x = theano.printing.Print('x')(x)
         Alpha = T.nnet.softmax(T.dot(h, V_alpha) + T.shape_padleft(b_alpha))  # BxC
         Alpha = theano.printing.Print('Alphas')(Alpha)
         Mu = T.dot(h, V_mu) + T.shape_padleft(b_mu)  # BxC
         Mu = theano.printing.Print('Mu')(Mu)
         Sigma = T.exp((T.dot(h, V_sigma) + T.shape_padleft(b_sigma)))  # BxC
         Sigma = theano.printing.Print('Sigmas')(Sigma)
         arg = -constantX(0.5) * T.sqr((Mu - T.shape_padright(x, 1)) / Sigma) - T.log(Sigma) - constantX(0.5 * numpy.log(2 * numpy.pi)) + T.log(Alpha)
         arg = theano.printing.Print('printing argument of logsumexp')(arg)
         p_var = log_sum_exp(arg)
         p_var = theano.printing.Print('p_var')(p_var)
         p = p_prev + p_var
         #p = theano.printing.Print('p')(p)
         return (p, a, x)
     # First element is different (it is predicted from the bias only)
     a0 = T.zeros_like(T.dot(x.T, W))  # BxH
     p0 = T.zeros_like(x[0])
     x0 = T.ones_like(x[0])    
     ([ps, _as, _xs], updates) = theano.scan(density_given_previous_a_and_x,
                                             sequences=[x, W, V_alpha, b_alpha,V_mu,b_mu,V_sigma,b_sigma,activation_rescaling],
                                             outputs_info=[p0, a0, x0])
     return (ps[-1], updates)
开发者ID:sidsig,项目名称:NIPS-2014,代码行数:27,代码来源:RNN_RNADE_slow.py


示例14: filter_and_prob

def filter_and_prob(inpt, transition, emission,
           visible_noise_mean, visible_noise_cov,
           hidden_noise_mean, hidden_noise_cov,
           initial_hidden, initial_hidden_cov):
    step = forward_step(
        transition, emission,
        visible_noise_mean, visible_noise_cov,
        hidden_noise_mean, hidden_noise_cov)

    hidden_mean_0 = T.zeros_like(hidden_noise_mean).dimshuffle('x', 0)
    hidden_cov_0 = T.zeros_like(hidden_noise_cov).dimshuffle('x', 0, 1)
    f0, F0, ll0 = step(inpt[0], hidden_mean_0, hidden_cov_0)
    replace = {hidden_noise_mean: initial_hidden, 
               hidden_noise_cov: initial_hidden_cov}
    f0 = theano.clone(f0, replace)
    F0 = theano.clone(F0, replace)
    ll0 = theano.clone(ll0, replace)

    (f, F, ll), _ = theano.scan(
        step,
        sequences=inpt[1:],
        outputs_info=[f0, F0, None])

    ll = ll.sum(axis=0)

    f = T.concatenate([T.shape_padleft(f0), f])
    F = T.concatenate([T.shape_padleft(F0), F])
    ll += ll0

    return f, F, ll
开发者ID:ddofer,项目名称:breze,代码行数:30,代码来源:lds.py


示例15: create_cost_fun

	def create_cost_fun (self):

		# create a cost function that
		# takes each prediction at every timestep
		# and guesses next timestep's value:
		what_to_predict = self.input_mat[:, 1:]
		# because some sentences are shorter, we
		# place masks where the sentences end:
		# (for how long is zero indexed, e.g. an example going from `[2,3)`)
		# has this value set 0 (here we substract by 1):
		for_how_long = self.for_how_long - 1
		# all sentences start at T=0:
		starting_when = T.zeros_like(self.for_how_long)
								 
		self.lstm_cost = masked_loss(self.lstm_predictions,
								what_to_predict,
								for_how_long,
								starting_when).sum()

		zero_entropy = T.zeros_like(self.entropy)
		real_entropy = T.switch(self.mask_matrix,self.entropy,zero_entropy)
		zero_key_entropy = T.zeros_like(self.key_entropy)
		real_key_entropy = T.switch(self.mask_matrix,self.key_entropy,zero_key_entropy)

		self.final_cost = masked_loss(self.final_predictions,
								what_to_predict,
								for_how_long,
								starting_when).sum()+self.entropy_reg*real_entropy.sum()+self.key_entropy_reg*real_key_entropy.sum()
开发者ID:darongliu,项目名称:Lstm_Turing_LM,代码行数:28,代码来源:lm_v4.py


示例16: grad

  def grad(self, inputs, output_grads):
    Z_f, Z_b, V_f, V_b, c_f, c_b, i_f, i_b = inputs
    DY_f, DY_b, DH_f, DH_b, Dd_f, Dd_b = output_grads

    Z_f_raw = Z_f.owner.inputs[0].owner.inputs[0]
    Z_b_raw = Z_b.owner.inputs[0].owner.inputs[0]
    #TODO!!!
    V_f_raw = V_f.owner.inputs[0]
    V_b_raw = V_b.owner.inputs[0]
    c_f_raw = c_f.owner.inputs[0].owner.inputs[0]
    c_b_raw = c_b.owner.inputs[0].owner.inputs[0]
    i_f_raw = i_f.owner.inputs[0].owner.inputs[0]
    i_b_raw = i_b.owner.inputs[0].owner.inputs[0]
    #we have to make sure that this in only computed once!
    #for this we have to extract the raw variables before conversion to continuous gpu array
    #so that theano can merge the nodes
    Y_f, Y_b, H_f, H_b, d_f, d_b = BLSTMOpInstance(Z_f_raw, Z_b_raw, V_f_raw, V_b_raw, c_f_raw, c_b_raw, i_f_raw, i_b_raw)
    if isinstance(DY_f.type, theano.gradient.DisconnectedType):
      DY_f = T.zeros_like(Z_f)
    if isinstance(DY_b.type, theano.gradient.DisconnectedType):
      DY_b = T.zeros_like(Z_b)
    if isinstance(Dd_f.type, theano.gradient.DisconnectedType):
      Dd_f = T.zeros_like(c_f)
    if isinstance(Dd_b.type, theano.gradient.DisconnectedType):
      Dd_b = T.zeros_like(c_b)
    DZ_f, DZ_b, DV_f, DV_b, Dc_f, Dc_b = BLSTMOpGradNoInplaceInstance(V_f, V_b, c_f, c_b, i_f, i_b, Dd_f, Dd_b, DY_f, DY_b, Y_f, Y_b, H_f, H_b)
    Di_f = theano.gradient.grad_undefined(self, 5, inputs[5], 'cannot diff w.r.t. index')
    Di_b = theano.gradient.grad_undefined(self, 6, inputs[6], 'cannot diff w.r.t. index')

    return [DZ_f, DZ_b, DV_f, DV_b, Dc_f, Dc_b, Di_f, Di_b]
开发者ID:atuxhe,项目名称:returnn,代码行数:30,代码来源:OpBLSTM.py


示例17: T_subspacel1_slow_shrinkage

def T_subspacel1_slow_shrinkage(a,L,lam_sparse,lam_slow,small_value=.001):
    amp = T.sqrt(a[::2,:]**2 + a[1::2,:]**2 + small_value)
    #damp = amp[:,1:] - amp[:,:-1]

    # compose slow shrinkage with subspace l1 shrinkage

    # slow shrinkage
    div = T.zeros_like(amp)
    d1 = amp[:,1:] - amp[:,:-1]
    d2 = d1[:,1:] - d1[:,:-1]
    div = T.set_subtensor(div[:,1:-1],-d2)
    div = T.set_subtensor(div[:,0], -d1[:,0])
    div = T.set_subtensor(div[:,-1], d1[:,-1])
    slow_amp_shrinkage = 1 - (lam_slow/L)*(div/amp)
    slow_amp_value = T.switch(T.gt(slow_amp_shrinkage,0),slow_amp_shrinkage,0)
    slow_shrinkage_prox_a = slow_amp_value*a[::2,:]
    slow_shrinkage_prox_b = slow_amp_value*a[1::2,:]

    # subspace l1 shrinkage
    amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a**2 + slow_shrinkage_prox_b**2)
    #amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
    amp_shrinkage = 1. - (lam_sparse/L)/amp_slow_shrinkage_prox
    amp_value = T.switch(T.gt(amp_shrinkage,0.),amp_shrinkage,0.)
    subspacel1_prox = T.zeros_like(a)
    subspacel1_prox = T.set_subtensor(subspacel1_prox[ ::2,:],amp_value*slow_shrinkage_prox_a)
    subspacel1_prox = T.set_subtensor(subspacel1_prox[1::2,:],amp_value*slow_shrinkage_prox_b)
    return subspacel1_prox
开发者ID:baylabs,项目名称:hdl,代码行数:27,代码来源:theano_methods.py


示例18: castray

def castray(ro, rd, shape_params, nprims, width, height):
    tmin = 1.0
    tmax = 20.0
    precis = 0.002
    m = -1.0
    # There are a sequence of distances, d1, d2, ..., dn
    # then theres the accumulated distances d1, d1+d2, d1+d2+d3....
    # What we actually want in the output is the sfor each ray the distance to the surface
    # So we want something like 0, 20, 25, 27, 28, 28, 28, 28, 28
    # OK

    max_num_steps = 25

    # distcolors = map(ro + rd * 0, width, height) #FIXME, reshape instead of mul by 0
    distcolors = mapedit(ro + rd * 0, shape_params, nprims, width, height)
    dists = distcolors
    steps = T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
    accum_dists = T.reshape(dists, (width, height, 1))

    for i in range(max_num_steps - 1):
        # distcolors = map(ro + rd * accum_dists, width, height) #FIXME, reshape instead of mul by 0
        distcolors = mapedit(ro + rd * accum_dists, shape_params, nprims, width, height) #FIXME, reshape instead of mul by 0
        dists = distcolors
        steps = steps + T.switch(dists < precis, T.zeros_like(dists), T.ones_like(dists))
        accum_dists = accum_dists + T.reshape(dists, (width, height, 1))

    last_depth = T.reshape(accum_dists, (width, height))
    depthmap = T.switch(last_depth < tmax, last_depth / tmax, T.zeros_like(last_depth))
    color = 1.0 - steps / float(max_num_steps)
    # Distance marched along ray and delta between last two steps
    return depthmap
开发者ID:zenna,项目名称:Arrows.jl,代码行数:31,代码来源:iq.py


示例19: _construct_sample_from_prior

 def _construct_sample_from_prior(self):
     """
     Construct a function for drawing independent samples from the
     distribution generated by this MultiStageModel. This function returns
     the full sequence of "partially completed" examples.
     """
     z_sym = T.matrix()
     x_sym = T.matrix()
     irs = self.ir_steps
     oputs = [self.obs_transform(self.s0)]
     oputs.extend([self.obs_transform(self.si[i]) for i in range(irs)])
     _, hi_zmuv = self._construct_zmuv_samples(x_sym, 1)
     sample_func = theano.function(inputs=[z_sym, x_sym], outputs=oputs, \
             givens={ self.z: z_sym, \
                      self.x_in: T.zeros_like(x_sym), \
                      self.x_out: T.zeros_like(x_sym), \
                      self.hi_zmuv: hi_zmuv }, \
             updates=self.scan_updates)
     def prior_sampler(samp_count):
         x_samps = to_fX( np.zeros((samp_count, self.obs_dim)) )
         old_switch = self.train_switch.get_value(borrow=False)
         # set model to generation mode
         self.set_train_switch(switch_val=0.0)
         z_samps = to_fX( npr.randn(samp_count, self.z_dim) )
         model_samps = sample_func(z_samps, x_samps)
         # set model back to either training or generation mode
         self.set_train_switch(switch_val=old_switch)
         return model_samps
     return prior_sampler
开发者ID:Philip-Bachman,项目名称:NN-Python,代码行数:29,代码来源:MultiStageModel.py


示例20: __call__

    def __call__(self, input_, *xs):
        '''
        Maybe unclear: input_ is the variable to be scaled, xs are the
        actual inputs.
        '''
        updates = theano.OrderedUpdates()

        if len(xs) != len(self.dims_in):
            raise ValueError('Number of (external) inputs for baseline must'
                             ' match parameters')

        ws = []
        for i in xrange(len(xs)):
            # Maybe not the most pythonic way...
            ws.append(self.__dict__['w%d' % i])

        ids = T.sum([x.dot(W) for x, W in zip(xs, ws)], axis=0).T
        ids_c = T.zeros_like(ids) + ids
        input_scaled = input_ / ids_c
        input_ = T.zeros_like(input_) + input_

        outs = OrderedDict(
            x_c=input_,
            x_scaled=input_scaled,
            ids=ids,
            ids_c=ids_c
        )

        return outs, updates
开发者ID:Jeremy-E-Johnson,项目名称:cortex,代码行数:29,代码来源:layers.py



注:本文中的theano.tensor.zeros_like函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python basic._allclose函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.zeros函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap