• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensor.squeeze函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中theano.tensor.squeeze函数的典型用法代码示例。如果您正苦于以下问题:Python squeeze函数的具体用法?Python squeeze怎么用?Python squeeze使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了squeeze函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: getTrainingFunc2

    def getTrainingFunc2(self):
        input = T.dmatrix()
        target = T.dvector()
        learning_rate = T.scalar()
        
        y = input
        for i in xrange(0, self.n_layers-1):
            y = T.maximum(0.0, T.dot(y, self.params[i*3]) + self.params[i*3+1] )
            y = y*self.theano_rng.binomial(y.shape, 1, 0.5)
        
        y = T.maximum(0, T.dot(y, self.params[(self.n_layers-1)*3]) + self.params[(self.n_layers-1)*3+1] )
        
        y = T.squeeze(y.T)
        #y = T.dot(y, self.params[-1])
        diff = y - target
        #regulator = theano.printing.Print('norm:')(T.sum(abs(y))*alpha)
        #L = theano.printing.Print('L:')(T.sum(diff*diff) + regulator)
        L = T.sum(diff*diff) #- target*T.log(y) - (1-target)*T.log(1-y)
        
        gparam = T.grad(L, [ self.params[i] for i in xrange(len(self.params)) if i%3 != 2 ])

        updates = {}
        for i,p,g,m in zip(xrange(len(gparam)),[ self.params[i] for i in xrange(len(self.params)) if i%3 != 2 ], gparam, [ self.moments[i] for i in xrange(len(self.moments)) if i%3 != 2 ]):
            if i%2 == 0:
                updates[m] = 0.9*m - learning_rate*0.0005*p - learning_rate*g        
            else:
                updates[m] = 0.9*m - learning_rate*g
            updates[p] = p + m

        train_func = theano.function( inputs = [input, target, learning_rate], outputs=[L,y], updates= updates)
        return train_func
开发者ID:amoliu,项目名称:autosub,代码行数:31,代码来源:parse.py


示例2: get_output

 def get_output(self, go_backwards = False, train = False):
     self.reset_states(train.shape[0])
     inputs = train.dimshuffle((1, 0, 2))
     results, _ = theano.scan(
         self.step,
         sequences=inputs,
         outputs_info=[self.states[0],self.states[1]],
         go_backwards=go_backwards)
     '''
     # deal with Theano API inconsistency
     if type(results) is list:
         outputs = results[0]
         states = results[1:]
     else:
         outputs = results
         states = []
 
     outputs = T.squeeze(outputs)
     last_output = outputs[-1]
     '''
     
     #outputs = np.asarray(results)[:,0]
     #outputs = T.squeeze(outputs)
     #outputs = outputs.dimshuffle((1, 0, 2))
     
     #states = [T.squeeze(state[-1]) for state in states]
     #return last_output, outputs, states
     
     outputs = results[0]
     outputs = T.squeeze(outputs)
     outputs = outputs.dimshuffle((1, 0, 2))
     return outputs
开发者ID:FudanNLP,项目名称:NeuralSentenceOrdering,代码行数:32,代码来源:LSTM.py


示例3: _comp_modes

 def _comp_modes(self):
     try:
         return tt.as_tensor_variable(self.comp_dists.mode)
     except AttributeError:
         return tt.squeeze(tt.stack([comp_dist.mode
                                     for comp_dist in self.comp_dists],
                                    axis=-1))
开发者ID:pymc-devs,项目名称:pymc3,代码行数:7,代码来源:mixture.py


示例4: get_relative_position

    def get_relative_position(self, t):
        """The planets' positions relative to the star

        Args:
            t: The times where the position should be evaluated.

        Returns:
            The components of the position vector at ``t`` in units of
            ``R_sun``.

        """
        dt = tt.mod(tt.shape_padright(t) - self._ref_time, self.period)
        dt -= self._half_period
        x = tt.squeeze(self.speed * dt)
        y = tt.squeeze(self._b_norm + tt.zeros_like(dt))
        z = -tt.ones_like(x)
        return x, y, z
开发者ID:dfm,项目名称:exoplanet,代码行数:17,代码来源:simple.py


示例5: squeeze

def squeeze(x, axis):
    '''Remove a 1-dimension from the tensor at index "axis".
    '''
    broadcastable = x.broadcastable[:axis] + x.broadcastable[axis+1:]
    x = T.patternbroadcast(x, [i == axis for i in range(x.type.ndim)])
    x = T.squeeze(x)
    x = T.patternbroadcast(x, broadcastable)
    return x
开发者ID:fvisin,项目名称:keras,代码行数:8,代码来源:theano_backend.py


示例6: _comp_logp

    def _comp_logp(self, value):
        comp_dists = self.comp_dists

        try:
            value_ = value if value.ndim > 1 else tt.shape_padright(value)

            return comp_dists.logp(value_)
        except AttributeError:
            return tt.squeeze(tt.stack([comp_dist.logp(value)
                                        for comp_dist in comp_dists],
                                       axis=1))
开发者ID:brandonwillard,项目名称:pymc3,代码行数:11,代码来源:mixture.py


示例7: logsumexp

def logsumexp(x, axis=None, keepdims=False):
    max_value = T.max(x, axis=axis, keepdims=True)
    res = max_value + T.log(T.sum(T.exp(x-max_value), axis=axis, keepdims=True))
    if not keepdims:
        if axis is None:
            return T.squeeze(res)

        slices = [slice(None, None, None)]*res.ndim
        slices[axis] = 0  # Axis being merged
        return res[tuple(slices)]

    return res
开发者ID:ppoulin91,项目名称:learn2track,代码行数:12,代码来源:utils.py


示例8: get_radial_velocity

    def get_radial_velocity(self, t, K=None, output_units=None):
        """Get the radial velocity of the star

        .. note:: The convention in exoplanet is that positive `z` points
            *towards* the observer. However, for consistency with radial
            velocity literature this method returns values where positive
            radial velocity corresponds to a redshift as expected.

        Args:
            t: The times where the radial velocity should be evaluated.
            K (Optional): The semi-amplitudes of the orbits. If provided, the
                ``m_planet`` and ``incl`` parameters will be ignored and this
                amplitude will be used instead.
            output_units (Optional): An AstroPy velocity unit. If not given,
                the output will be evaluated in ``m/s``. This is ignored if a
                value is given for ``K``.

        Returns:
            The reflex radial velocity evaluated at ``t`` in units of
            ``output_units``. For multiple planets, this will have one row for
            each planet.

        """

        # Special case for K given: m_planet, incl, etc. is ignored
        if K is not None:
            f = self._get_true_anomaly(t)
            if self.ecc is None:
                return tt.squeeze(K * tt.cos(f))
            # cos(w + f) + e * cos(w) from Lovis & Fischer
            return tt.squeeze(
                K * (self.cos_omega*tt.cos(f) - self.sin_omega*tt.sin(f) +
                     self.ecc * self.cos_omega))

        # Compute the velocity using the full orbit solution
        if output_units is None:
            output_units = u.m / u.s
        conv = (1 * u.R_sun / u.day).to(output_units).value
        v = self.get_star_velocity(t)
        return -conv * v[2]
开发者ID:dfm,项目名称:exoplanet,代码行数:40,代码来源:keplerian.py


示例9: get_planet_position

    def get_planet_position(self, t):
        """The planets' positions in the barycentric frame

        Args:
            t: The times where the position should be evaluated.

        Returns:
            The components of the position vector at ``t`` in units of
            ``R_sun``.

        """
        return tuple(tt.squeeze(x)
                     for x in self._get_position(self.a_planet, t))
开发者ID:dfm,项目名称:exoplanet,代码行数:13,代码来源:keplerian.py


示例10: get_planet_velocity

    def get_planet_velocity(self, t):
        """Get the planets' velocity vector

        Args:
            t: The times where the velocity should be evaluated.

        Returns:
            The components of the velocity vector at ``t`` in units of
            ``M_sun/day``.

        """
        return tuple(tt.squeeze(x)
                     for x in self._get_velocity(-self.m_star, t))
开发者ID:dfm,项目名称:exoplanet,代码行数:13,代码来源:keplerian.py


示例11: __init__

    def __init__(self, p, *args, **kwargs):
        super().__init__(*args, **kwargs)
        try:
            self.k = tt.shape(p)[-1].tag.test_value
        except AttributeError:
            self.k = tt.shape(p)[-1]
        p = tt.as_tensor_variable(floatX(p))

        # From #2082, it may be dangerous to automatically rescale p at this
        # point without checking for positiveness
        self.p = p
        self.mode = tt.argmax(p, axis=-1)
        if self.mode.ndim == 1:
            self.mode = tt.squeeze(self.mode)
开发者ID:aloctavodia,项目名称:pymc3,代码行数:14,代码来源:discrete.py


示例12: get_star_velocity

    def get_star_velocity(self, t):
        """Get the star's velocity vector

        .. note:: For a system with multiple planets, this will return one
            column per planet with the contributions from each planet. The
            total velocity can be found by summing along the last axis.

        Args:
            t: The times where the velocity should be evaluated.

        Returns:
            The components of the velocity vector at ``t`` in units of
            ``M_sun/day``.

        """
        return tuple(tt.squeeze(x)
                     for x in self._get_velocity(self.m_planet, t))
开发者ID:dfm,项目名称:exoplanet,代码行数:17,代码来源:keplerian.py


示例13: get_relative_velocity

    def get_relative_velocity(self, t):
        """The planets' velocity relative to the star

        .. note:: This treats each planet independently and does not take the
            other planets into account when computing the position of the
            star. This is fine as long as the planet masses are small.

        Args:
            t: The times where the velocity should be evaluated.

        Returns:
            The components of the velocity vector at ``t`` in units of
            ``R_sun/day``.

        """
        return tuple(tt.squeeze(x)
                     for x in self._get_velocity(-self.m_total, t))
开发者ID:dfm,项目名称:exoplanet,代码行数:17,代码来源:keplerian.py


示例14: get_star_position

    def get_star_position(self, t):
        """The star's position in the barycentric frame

        .. note:: If there are multiple planets in the system, this will
            return one column per planet with each planet's contribution to
            the motion. The star's full position can be computed by summing
            over the last axis.

        Args:
            t: The times where the position should be evaluated.

        Returns:
            The components of the position vector at ``t`` in units of
            ``R_sun``.

        """
        return tuple(tt.squeeze(x)
                     for x in self._get_position(self.a_star, t))
开发者ID:dfm,项目名称:exoplanet,代码行数:18,代码来源:keplerian.py


示例15: _compute_losses

    def _compute_losses(self, model_output):
        # model_output.shape : (batch_size, seq_len, K, M, target_size)
        # self.dataset.symb_targets.shape = (batch_size, seq_len+K-1, target_dims)

        # targets.shape = (batch_size, seq_len, 3)
        targets = self.dataset.symb_targets[:, : -self.model.k + 1 or None, :]

        # mask.shape : (batch_size, seq_len)
        mask = self.dataset.symb_mask

        # samples.shape : (batch_size, seq_len, 3)
        # T.squeeze(.) should remove the K=1 and M=1 dimensions
        self.samples = self.model.get_max_component_samples(T.squeeze(model_output))

        # loss_per_time_step.shape = (batch_size, seq_len)
        self.loss_per_time_step = l2distance(self.samples, targets)
        # loss_per_seq.shape = (batch_size,)
        self.loss_per_seq = T.sum(self.loss_per_time_step * mask, axis=1) / T.sum(mask, axis=1)

        return self.loss_per_seq
开发者ID:ppoulin91,项目名称:learn2track,代码行数:20,代码来源:gru_msp.py


示例16: prederrrate

def prederrrate(output, target_output, mask,db='Y_PRED_ERRORS:',verbose=False):
    """
    Calculates the misclassification rate. Masks 'masked' samples
    All matrices are shape (sequences x sequence_length x nclasses)
    :param output: Output from nntools network. example: last_layer,get_output(input,deterministic=False)
    :param target_output: tensor3 with one-hot-encoded targets  (sequences x sequence_length x nclasses)
    :param mask: tensor3 binary mask indicating if output should be included as error. 1 is included, 0 is excluded
    :param verbose: if true prints the cross entropy
    :param db: versose printing name
    :return:
    """
    true_labels = T.argmax(target_output, axis=-1).flatten()
    preds = T.argmax(output, axis=-1).flatten()
    eq = T.eq(true_labels,preds)
    n_time_steps = T.sum(mask)
    acc = T.sum(eq*T.squeeze(mask)) / n_time_steps

    if verbose:
        acc = theano.printing.Print(db+' ACC')(acc)
    error = 1.0-acc
    return error
开发者ID:benathi,项目名称:nntools,代码行数:21,代码来源:LSTMTrainingFunctions.py


示例17: fwd_old

  def fwd_old(self, x, V, A, L):
    """
    x : signal
    V : eigenvectors
    A : area 
    L : eigenvalues
    """
    V = V[:,:self.K]
    L = L[:self.K]

    sampleLoc = (L.dimshuffle(0,'x') - self.evalSamples.dimshuffle('x',0)) / self.dEval
    basis = self.cubicBSpline(sampleLoc)
    basis = basis.dimshuffle('x',0,1)

    rho = T.sqrt(T.sum(A))

    # weight the basis columns for each input function to generate a ghat
    # Q x K, a window for each input function
    ghat = T.squeeze(T.batched_dot(
            T.tile(basis, [self.nin, 1, 1]), 
            self.beta)[:,:,0]) # crazy stuff here, why doesn't squeeze work?
    # Q x K x N
    V_ = T.tile(V.dimshuffle('x',1,0), [self.nin, 1, 1])
    # Q x K x N
    tmp = (ghat.dimshuffle(0,'x',1) * V).dimshuffle(0,2,1)
    # Q x N x N
    transl = rho * T.batched_dot(V_.dimshuffle(0,2,1), tmp)
    transl = A.dimshuffle('x',0,'x') * transl
    # Q x K x N
    tmp = (V.dimshuffle(0,'x',1) * x.dimshuffle(0,1,'x')).dimshuffle(1,2,0)
    # Q x K x N
    desc = rho * T.batched_dot(tmp, transl)
    desc = T.abs_(desc)
    
    desc = desc.dimshuffle(2,0,'x',1) # BC01 format : N x Q x 1 x K
    return self.activation(theano.tensor.nnet.conv.conv2d(desc, self.W).flatten(2) + self.b)
开发者ID:jonathanmasci,项目名称:ShapeNet,代码行数:36,代码来源:layers_lscnn.py


示例18: rnn


#.........这里部分代码省略.........
                indices = indices[::-1]

            successive_outputs = []
            successive_states = []
            states = initial_states
            for i in indices:
                output, new_states = step_function(inputs[i], states + constants)

                if len(successive_outputs) == 0:
                    prev_output = zeros_like(output)
                else:
                    prev_output = successive_outputs[-1]

                output = T.switch(mask[i], output, prev_output)
                kept_states = []
                for state, new_state in zip(states, new_states):
                    kept_states.append(T.switch(mask[i], new_state, state))
                states = kept_states

                successive_outputs.append(output)
                successive_states.append(states)

            outputs = T.stack(*successive_outputs)
            states = []
            for i in range(len(successive_states[-1])):
                states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states]))
        else:
            # build an all-zero tensor of shape (samples, output_dim)
            initial_output = step_function(inputs[0], initial_states + constants)[0] * 0
            # Theano gets confused by broadcasting patterns in the scan op
            initial_output = T.unbroadcast(initial_output, 0, 1)

            def _step(input, mask, output_tm1, *states):
                output, new_states = step_function(input, states)
                # output previous output if masked.
                output = T.switch(mask, output, output_tm1)
                return_states = []
                for state, new_state in zip(states, new_states):
                    return_states.append(T.switch(mask, new_state, state))
                return [output] + return_states

            results, _ = theano.scan(
                _step,
                sequences=[inputs, mask],
                outputs_info=[initial_output] + initial_states,
                non_sequences=constants,
                go_backwards=go_backwards)

            # deal with Theano API inconsistency
            if type(results) is list:
                outputs = results[0]
                states = results[1:]
            else:
                outputs = results
                states = []
    else:
        if unroll:
            indices = list(range(input_length))
            if go_backwards:
                indices = indices[::-1]

            successive_outputs = []
            successive_states = []
            states = initial_states
            for i in indices:
                output, states = step_function(inputs[i], states + constants)
                successive_outputs.append(output)
                successive_states.append(states)
            outputs = T.stack(*successive_outputs)
            states = []
            for i in range(len(successive_states[-1])):
                states.append(T.stack(*[states_at_step[i] for states_at_step in successive_states]))

        else:
            def _step(input, *states):
                output, new_states = step_function(input, states)
                return [output] + new_states

            results, _ = theano.scan(
                _step,
                sequences=inputs,
                outputs_info=[None] + initial_states,
                non_sequences=constants,
                go_backwards=go_backwards)

            # deal with Theano API inconsistency
            if type(results) is list:
                outputs = results[0]
                states = results[1:]
            else:
                outputs = results
                states = []

    outputs = T.squeeze(outputs)
    last_output = outputs[-1]

    axes = [1, 0] + list(range(2, outputs.ndim))
    outputs = outputs.dimshuffle(axes)
    states = [T.squeeze(state[-1]) for state in states]
    return last_output, outputs, states
开发者ID:fvisin,项目名称:keras,代码行数:101,代码来源:theano_backend.py


示例19: rttn


#.........这里部分代码省略.........
            T.addbroadcast(ctx, 1)

            # horizon state is (B, HorizonSize, RNNWORD)
            branch_owners = branch_tensor[horizon_indices, T.arange(s_batch).reshape((s_batch, 1))]
            #branch_owners = branch_tensor[T.arange(s_batch), horizon_indices] # indexes into the branches
            horizon_state = T.concatenate([branch_owners, horizon_words], axis=-1) 
            
            # now create the probability tensor
            p_horizon = horizon_state * ctx  # elemwise multiplying
            p_horizon = T.sum(p_horizon, axis=-1) #then summing. 
            #this was basically a dot, but per batch row and resulting in a dim reduction
            # now, given (B,Horizon), we can get a softmax distribution per row
            p_horizon = T.nnet.softmax(p_horizon)
            # note, this means we can also sample if we want to do a dynamic oracle. 
            
            return h_vplus, branch_tensor, horizon_state, p_horizon

        output_info = initial_states + [None, None]
        
        (h_v, branch_tensor, 
         horizon_states, p_horizons), _ = theano.scan(
                                                 _step,
                                                sequences=[T.arange(inputs.shape[0]), 
                                                           inputs, 
                                                           mask, 
                                                           action_types,
                                                           tree_topology, 
                                                           horizon_words, 
                                                           horizon_indices],
                                                outputs_info=output_info,
                                                non_sequences=[context_matrix] + constants)     
        branch_tensor = branch_tensor[-1]
        
    else:

        def _step(iter_index, x_input, x_type, x_topology, 
                              horizon_words, horizon_indices, 
                              h_traverse, branch_tensor, W_ctx, *constants):
            '''Notes for this function:
               W_ctx is passed in under non sequences but is separated here from the constants
            '''
            ### topology
            batch_index = T.arange(x_topology.shape[0])
            h_parent = colgather(branch_tensor, batch_index, x_topology)
            states = (h_parent, h_traverse, x_type) + constants
            h_child, h_vplus = step_function(x_input, states)
            
            branch_tensor = T.set_subtensor(branch_tensor[iter_index], h_child)
            
            ### shape sizes
            s_batch = shape_key['batch']
            s_rnn = shape_key['rnn'] 
            s_word = shape_key['word']
            s_rnn_word = s_rnn + s_word

            # ctx is used as an attentional vector over the horizon states
            # W_ctx is (4, R, RW), h_vplus is (B, R); horizont_types is (B,)
            # horizon_types lets different tree actions be considered
            ctx = T.dot(h_vplus, W_ctx)
            ctx = T.addbroadcast(T.reshape(ctx, (s_batch, 1, s_rnn_word)), 1)

            # horizon state is (B, HorizonSize, s_rnn_word)
            branch_owners = branch_tensor[T.arange(s_batch), horizon_indices] # indexes into the branches
            horizon_state = T.concatenate([branch_owners, horizon_words], axis=-1) 
            
            # now create the probability tensor
            p_horizon = horizon_state * ctx  # elemwise multiplying
            p_horizon = T.sum(p_horizon, axis=-1) #then summing. 
            #this was basically a dot, but per batch row and resulting in a dim reduction
            # now, given (B,Horizon), we can get a softmax distribution per row
            p_horizon = T.nnet.softmax(p_horizon) # b, horizon
            #p_horizon = T.addbroadcast(T.reshape(p_horizon, (s_batch, s_horizon, 1)), 1)
            # note, this means we can also sample if we want to do a dynamic oracle. 
            #horizon_attn = T.sum(p_horizon * horizon_state, axis=1)
            
            
            return h_vplus, branch_tensor, horizon_state, p_horizon

        output_info = initial_states + [None, None]
        
        (h_v, branch_tensor, 
         horizon_states, p_horizons), _ = theano.scan(
                                                 _step,
                                                sequences=[T.arange(inputs.shape[0]), 
                                                           inputs, 
                                                           action_types,
                                                           tree_topology, 
                                                           horizon_words, 
                                                           horizon_indices],
                                                outputs_info=output_info,
                                                non_sequences=[context_matrix] + constants)     
        branch_tensor = branch_tensor[-1]
    
    unshuffle = lambda tensor: T.squeeze(tensor).dimshuffle([1, 0] + list(range(2, tensor.ndim)))
    h_v = unshuffle(h_v)
    branch_tensor = unshuffle(branch_tensor)
    horizon_states = unshuffle(horizon_states)
    p_horizons = unshuffle(p_horizons)

    return branch_tensor, h_v, horizon_states, p_horizons
开发者ID:braingineer,项目名称:ikelos,代码行数:101,代码来源:theano_backend.py


示例20: rnn

def rnn(step_function, inputs, initial_states,
        go_backwards=False, mask=None):
    '''Iterates over the time dimension of a tensor.

    Parameters
    ----------
    inputs: tensor of temporal data of shape (samples, time, ...)
        (at least 3D).
    step_function:
        Parameters:
            input: tensor with shape (samples, ...) (no time dimension),
                representing input for the batch of samples at a certain
                time step.
            states: list of tensors.
        Returns:
            output: tensor with shape (samples, ...) (no time dimension),
            new_states: list of tensors, same length and shapes
                as 'states'.
    initial_states: tensor with shape (samples, ...) (no time dimension),
        containing the initial values for the states used in
        the step function.
    go_backwards: boolean. If True, do the iteration over
        the time dimension in reverse order.
    mask: binary tensor with shape (samples, time, 1),
        with a zero for every element that is masked.

    Returns
    -------
    A tuple (last_output, outputs, new_states).
        last_output: the latest output of the rnn, of shape (samples, ...)
        outputs: tensor with shape (samples, time, ...) where each
            entry outputs[s, t] is the output of the step function
            at time t for sample s.
        new_states: list of tensors, latest states returned by
            the step function, of shape (samples, ...).
    '''
    ndim = inputs.ndim
    assert ndim >= 3, "Input should be at least 3D."
    axes = [1, 0] + list(range(2, ndim))
    inputs = inputs.dimshuffle(axes)
    if mask is None:
        mask = expand_dims(ones_like(T.sum(inputs, axis=-1)))
    else:
        mask = mask.dimshuffle(axes)

    def _step(input, mask, output_tm1, *states):
        output, new_states = step_function(input, states)
        # output previous output if masked.
        output = T.switch(mask, output, output_tm1)
        return_states = []
        for state, new_state in zip(states, new_states):
            return_states.append(T.switch(mask, new_state, state))
        return [output] + return_states

    # build an all-zero tensor of shape (samples, output_dim)
    initial_output = step_function(inputs[0], initial_states)[0] * 0
    # Theano gets confused by broadcasting patterns in the scan op
    initial_output = T.unbroadcast(initial_output, 0, 1)

    results, _ = theano.scan(
        _step,
        sequences=[inputs, mask],
        outputs_info=[initial_output] + initial_states,
        go_backwards=go_backwards)

    # deal with Theano API inconsistency
    if type(results) is list:
        outputs = results[0]
        states = results[1:]
    else:
        outputs = results
        states = []

    outputs = T.squeeze(outputs)
    last_output = outputs[-1]

    axes = [1, 0] + list(range(2, outputs.ndim))
    outputs = outputs.dimshuffle(axes)
    states = [T.squeeze(state[-1]) for state in states]
    return last_output, outputs, states
开发者ID:luogongning,项目名称:keras,代码行数:80,代码来源:theano_backend.py



注:本文中的theano.tensor.squeeze函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensor.stack函数代码示例发布时间:2022-05-27
下一篇:
Python tensor.square函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap