• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.sigmoid函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sigmoid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __call__

    def __call__(self, inputs, state, scope=None):
        num_proj = self._num_units if self._num_proj is None else self._num_proj

        c_prev = tf.slice(state, [0, 0], [-1, self._num_units])
        m_prev = tf.slice(state, [0, self._num_units], [-1, num_proj])

        input_size = inputs.get_shape().with_rank(2)[1]
        if input_size.value is None:
            raise ValueError("Could not infer input size from inputs.get_shape()[-1]")

        with tf.variable_scope(type(self).__name__,
                               initializer=self._initializer):
            # i = input_gate, j = new_input, f = forget_gate, o = output_gate
            cell_inputs = tf.concat(1, [inputs, m_prev])
            lstm_matrix = tf.nn.bias_add(tf.matmul(cell_inputs, self._concat_w),
                                         self._b)
            i, j, f, o = tf.split(1, 4, lstm_matrix)

            c = tf.sigmoid(f + self._forget_bias) * c_prev + tf.sigmoid(i) * tf.sigmoid(j)

            m = tf.sigmoid(o) * tf.tanh(c)

            if self._num_proj is not None:
                m = tf.matmul(m, self._concat_w_proj)

        new_state = tf.concat(1, [c, m])
        return m, new_state
开发者ID:IgorWang,项目名称:RNNLM,代码行数:27,代码来源:model_utils.py


示例2: loss_fn

 def loss_fn(w_flat):
   w = tf.reshape(w_flat, [visible_size, hidden_size])
   x = tf.matmul(data, w)
   x = tf.sigmoid(x)
   x = tf.matmul(x, w, transpose_b=True)
   x = tf.sigmoid(x)
   return tf.reduce_mean(tf.square(x-data))
开发者ID:yaroslavvb,项目名称:stuff,代码行数:7,代码来源:eager_lbfgs.py


示例3: unit

        def unit(x, hidden_memory_tm1):
            previous_hidden_state, c_prev = tf.unpack(hidden_memory_tm1)

            # Input Gate
            i = tf.sigmoid(
                tf.matmul(x, self.Wi) +
                tf.matmul(previous_hidden_state, self.Ui) + self.bi
            )

            # Forget Gate
            f = tf.sigmoid(
                tf.matmul(x, self.Wf) +
                tf.matmul(previous_hidden_state, self.Uf) + self.bf
            )

            # Output Gate
            o = tf.sigmoid(
                tf.matmul(x, self.Wog) +
                tf.matmul(previous_hidden_state, self.Uog) + self.bog
            )

            # New Memory Cell
            c_ = tf.nn.tanh(
                tf.matmul(x, self.Wc) +
                tf.matmul(previous_hidden_state, self.Uc) + self.bc
            )

            # Final Memory cell
            c = f * c_prev + i * c_

            # Current Hidden state
            current_hidden_state = o * tf.nn.tanh(c)

            return tf.pack([current_hidden_state, c])
开发者ID:Soledad89,项目名称:SeqGAN,代码行数:34,代码来源:model.py


示例4: __call__

  def __call__(self, inputs, state, scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):
      """JZS3, mutant 2 with n units cells."""
      with tf.variable_scope(scope or type(self).__name__):  # "JZS1Cell"
        with tf.variable_scope("Zinput"):  # Reset gate and update gate.
          # We start with bias of 1.0 to not reset and not update.
          '''equation 1'''

          z = tf.sigmoid(linear([inputs, tf.tanh(state)], 
                            self._num_units, True, 1.0, weight_initializer = self._weight_initializer, orthogonal_scale_factor = self._orthogonal_scale_factor))

          '''equation 2'''
        with tf.variable_scope("Rinput"):
          r = tf.sigmoid(linear([inputs, state],
                            self._num_units, True, 1.0, weight_initializer = self._weight_initializer, orthogonal_scale_factor = self._orthogonal_scale_factor))
          '''equation 3'''
        with tf.variable_scope("Candidate"):
          component_0 = linear([state*r,inputs],
                            self._num_units, True)
          
          component_2 = (tf.tanh(component_0))*z
          component_3 = state*(1 - z)

        h_t = component_2 + component_3

      return h_t, h_t #there is only one hidden state output to keep track of. 
开发者ID:tonydeep,项目名称:tensorflow_with_latest_papers,代码行数:26,代码来源:rnn_cell_modern.py


示例5: mkDiscriminator

def mkDiscriminator(input, weights):
    l1 = tf.nn.tanh(tf.matmul(input, weights['w1']) + weights['b1'])
    l2 = tf.nn.tanh(tf.matmul(l1,weights['w2']) + weights['b2'])
    l3 = tf.sigmoid(tf.matmul(l2,weights['w3']) + weights['b3'])
    return l3
    l4 = tf.sigmoid(tf.matmul(l3,weights['w4']) + weights['b4'])
    return l4
开发者ID:Daiver,项目名称:jff,代码行数:7,代码来源:main.py


示例6: __call__

    def __call__(self, inputs, state, scope = None):
        with tf.variable_scope(scope or type(self).__name__):
            with tf.variable_scope("Gates"):
                reset, update = tf.split(
                    1,
                    2,
                    linear(
                        [inputs, states], 
                        2 * self._num_units,
                        bias = True,
                        bias_start = 1.0
                    )
                )
                reset, update = tf.sigmoid(reset), tf.sigmoid(update)

            with tf.variable_scope("Candidate"):
                candidate = linear(
                    [inputs, reset * state],
                    self._num_units,
                    bias = True
                )
                candidate = tf.tanh(candidate)

            new_state = update * state + (1 - update) * candidate

            return new_state, new_state
开发者ID:chetkhatri,项目名称:TensorFlow-Playground,代码行数:26,代码来源:rnn_units.py


示例7: forward_propogation

	def forward_propogation(self):
		x = tf.placeholder("float")
		z2 = tf.add(tf.matmul(x,self.W1),self.b1)
		a2 = tf.sigmoid(z2, name="Hidden Activation")
		z3 = tf.add(tf.matmul(a2,self.W2),self.b2)
		a3 = tf.sigmoid(z3, name="Output Activation")
		return a3
开发者ID:sjcjohnston,项目名称:spectral_prediction,代码行数:7,代码来源:autoencoder.py


示例8: __call__

    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with tf.variable_scope(self, scope or "basic_lstm_cell", reuse=self._reuse):
            # Parameters of gates are concatenated into one multiply for
            # efficiency.
            if self._state_is_tuple:
                c_prev, h_prev = state
            else:
                c_prev, h_prev = tf.split(
                    value=state, num_or_size_splits=2, axis=1)
            concat = tf.contrib.rnn._linear(
                [inputs, h_prev], 4 * self._num_units, True)

            # i = input_gate, g = new_input, f = forget_gate, o = output_gate
            i, g, f, o = tf.split(value=concat, num_or_size_splits=4, axis=1)

            c = (c_prev * tf.sigmoid(f + self._forget_bias) +
                 tf.sigmoid(i) * tf.tanh(g))
            h = tf.tanh(c) * tf.sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(c, h)
            else:
                new_state = tf.concat([c, h], 1)
            return h, new_state
开发者ID:seasky100,项目名称:tensorflow_end2end_speech_recognition,代码行数:25,代码来源:basic_lstm.py


示例9: __call__

  def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    
    with vs.variable_scope(scope or type(self).__name__):
      if self._dropMaskInput.get_shape()[1:] != inputs.get_shape()[1:]:
        print("error: "+str(self._dropMaskInput.get_shape()[1:])+" != "+str(inputs.get_shape()[1:]))
        assert(False)
      if self._dropMaskState.get_shape()[1:] != state.get_shape()[1:]:
        print("error: "+str(self._dropMaskState.get_shape()[1:])+" != "+str(state.get_shape()[1:]))
        assert(False)
      dropin = tf.mul(self._dropMaskInput, inputs)
      dropst = tf.mul(self._dropMaskState, state)

      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        concat = rnn_cell._linear([dropin, dropst], 2 * self._num_units, True, 1.0)
        r, u = tf.split(1, 2, concat)
        r, u = tf.sigmoid(r), tf.sigmoid(u)

      with vs.variable_scope("Candidate"):
        htilda = self._activation(rnn_cell._linear([dropin, r * dropst], self._num_units, True))

      new_h = u * dropst + (1 - u) * htilda

    return new_h, new_h
开发者ID:jasonbunk,项目名称:char-rnn-tensorflow,代码行数:25,代码来源:dropgru.py


示例10: build_losses

    def build_losses(self, logits_real, logits_fake):
        """D and G play two-player minimax game with value function V(G,D)

          min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]

        Args:
            logits_real (tf.Tensor): discrim logits from real samples
            logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
        """
        with tf.name_scope("GAN_loss"):
            score_real = tf.sigmoid(logits_real)
            score_fake = tf.sigmoid(logits_fake)
            tf.summary.histogram('score-real', score_real)
            tf.summary.histogram('score-fake', score_fake)

            with tf.name_scope("discrim"):
                d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')
                d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')

                d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real')
                d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake')

                d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy')
                self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss')

            with tf.name_scope("gen"):
                self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')
                g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy')

            add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
开发者ID:ahuirecome,项目名称:tensorpack,代码行数:33,代码来源:GAN.py


示例11: __call__

  def __call__(self, inputs, state, scope=None):
    with tf.device("/gpu:"+str(self._gpu_for_layer)):
      """JZS2, mutant 2 with n units cells."""
      with tf.variable_scope(scope or type(self).__name__):  # "JZS1Cell"
        with tf.variable_scope("Zinput"):  # Reset gate and update gate.
          '''equation 1'''

          z = tf.sigmoid(linear.linear([inputs, state], 
                            self._num_units, True, 1.0))

          '''equation 2 '''
        with tf.variable_scope("Rinput"):
          r = tf.sigmoid(inputs+(linear.linear([state],
                            self._num_units, True, 1.0)))
          '''equation 3'''

        with tf.variable_scope("Candidate"):

          component_0 = linear.linear([state*r,inputs],
                            self._num_units, True)
          
          component_2 = (tf.tanh(component_0))*z
          component_3 = state*(1 - z)

        h_t = component_2 + component_3

      return h_t, h_t #there is only one hidden state output to keep track of. 
开发者ID:ml-lab,项目名称:Seq2Seq_Upgrade_TensorFlow,代码行数:27,代码来源:rnn_cell_enhanced.py


示例12: __call__

  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with tf.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = tf.split(1, 2, state)
      concat = linear.linear([inputs, h], 4 * self._num_units, True)

      fs = []

      # This can be made more efficient since we're doing more than needs to be
      # done, but for now w/e
      for child_state in child_states:
          c_k, h_k = tf.split(1, 2, child_state)
          concat = linear.linear([inputs, h_k], 4 * self._num_units, True)
          i_k, j_k, f_k, o_k = tf.split(1, 4, concat)
          fs.append(f_k)


      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      # TODO: forget gate for each child, probably need to split by number
      # of child states or something
      i, j, f, o = tf.split(1, 4, concat)

      # If no children just treat it like a regular lstm
      if not fs:
        fs.append(f)

      new_c = sum(c * tf.sigmoid(fs + self._forget_bias)) + tf.sigmoid(i) * tf.tanh(j)
      new_h = tf.tanh(new_c) * tf.sigmoid(o)

    return new_h, tf.concat(1, [new_c, new_h])
开发者ID:StevenLOL,项目名称:LSTMRelatedness,代码行数:31,代码来源:treelstm.py


示例13: lstm_cell

 def lstm_cell(i, o, state):
   """
   Create a LSTM cell. See e.g.: http://arxiv.org/pdf/1402.1128v1.pdf
   Note that in this formulation, we omit the various connections between the
   previous state and the gates.
   """                   
   i_list = tf.pack([i, i, i, i])
   #print i_list.get_shape().as_list()
   o_list = tf.pack([o, o, o, o])
                         
   ins = tf.batch_matmul(i_list, fico_x)
   outs = tf.batch_matmul(o_list, fico_m)
   
   h_x = ins + outs + fico_b
   #print h_x.get_shape().as_list()
   
   #forget_gate = tf.sigmoid(tf.matmul(i, fx) + tf.matmul(o, fm) + fb)
   forget_gate = tf.sigmoid(h_x[0,:,:])
   
   #input_gate = tf.sigmoid(tf.matmul(i, ix) + tf.matmul(o, im) + ib)
   input_gate = tf.sigmoid(h_x[1,:,:])
   
   #update = tf.tanh(tf.matmul(i, cx) + tf.matmul(o, cm) + cb)
   update = tf.tanh(h_x[2,:,:])
   
   state = forget_gate*state + input_gate*update
   
   #output_gate = tf.sigmoid(tf.matmul(i, ox) + tf.matmul(o, om) + ob)
   output_gate = tf.sigmoid(h_x[3,:,:])
   
   h = output_gate * tf.tanh(state)
   #print 'h', h.get_shape().as_list()
   return h, state
开发者ID:kcbighuge,项目名称:tensorflow-deeplearning,代码行数:33,代码来源:6_lstm.py


示例14: _compute_loss

  def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape, either [batch_size, num_anchors,
        num_classes] or [batch_size, num_anchors, 1]. If the shape is
        [batch_size, num_anchors, 1], all the classses are equally weighted.

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors, num_classes]
        representing the value of the loss function.
    """
    if self._bootstrap_type == 'soft':
      bootstrap_target_tensor = self._alpha * target_tensor + (
          1.0 - self._alpha) * tf.sigmoid(prediction_tensor)
    else:
      bootstrap_target_tensor = self._alpha * target_tensor + (
          1.0 - self._alpha) * tf.cast(
              tf.sigmoid(prediction_tensor) > 0.5, tf.float32)
    per_entry_cross_ent = (tf.nn.sigmoid_cross_entropy_with_logits(
        labels=bootstrap_target_tensor, logits=prediction_tensor))
    return per_entry_cross_ent * weights
开发者ID:Exscotticus,项目名称:models,代码行数:26,代码来源:losses.py


示例15: __call__

  def __call__(self, inputs, state, timestep = 0, scope=None):
    """Most basic RNN: output = new_state = tanh(W * input + U * state + B)."""

    current_state = state
    for highway_layer in xrange(self.num_highway_layers):
      with tf.variable_scope('highway_factor_'+str(highway_layer)):
        if self.use_inputs_on_each_layer or highway_layer == 0:
          highway_factor = tf.tanh(multiplicative_integration([inputs, current_state], self._num_units))
        else:
          highway_factor = tf.tanh(linear([current_state], self._num_units, True))

      with tf.variable_scope('gate_for_highway_factor_'+str(highway_layer)):
        if self.use_inputs_on_each_layer or highway_layer == 0:
          gate_for_highway_factor = tf.sigmoid(multiplicative_integration([inputs, current_state], self._num_units, initial_bias_value = -3.0))
        else:
          gate_for_highway_factor = tf.sigmoid(linear([current_state], self._num_units, True, -3.0))

        gate_for_hidden_factor = 1 - gate_for_highway_factor

        if self.use_recurrent_dropout and self.is_training:
          highway_factor = tf.nn.dropout(highway_factor, self.recurrent_dropout_factor)

      current_state = highway_factor * gate_for_highway_factor + current_state * gate_for_hidden_factor

    return current_state, current_state
开发者ID:Ahndaehwan,项目名称:tensorflow_with_latest_papers,代码行数:25,代码来源:rnn_cell_mulint_modern.py


示例16: build_node

    def build_node(self, x_in, c_in, h_in, scope="lstm_cell"):
        #print (x_in, c_in, h_in, scope)
        #print [type(thing) for thing in (x_in, c_in, h_in, scope)]
        # print [(item.name, item.dtype) for thing in (h_in, c_in) for item in thing]
        # print (x_in.name, x_in.dtype)

        with tf.variable_scope(scope):
            # print x.shape
            # print h_in.get_shape()
            x_with_h = tf.concat(2, [x_in, h_in])

            ones_for_bias = tf.constant(np.ones([batch_size,1,1]), name="b", dtype=tf.float32)
            x_h_concat = tf.concat(2, [ones_for_bias, x_with_h])

            # forget gate layer
            # print "w_f: ", self.w_f.get_shape()
            # print "x_h_concat: ", x_h_concat.get_shape()
            f = tf.sigmoid(tf.batch_matmul(x_h_concat, self.w_f))

            # candidate values
            i = tf.sigmoid(tf.batch_matmul(x_h_concat, self.w_i))
            candidate_c = tf.tanh(tf.batch_matmul(x_h_concat, self.w_c))

            # new cell state (hidden)
            # forget old values of c
            old_c_to_keep = tf.mul(f, c_in)
            # scaled candidate values of c
            new_c_to_keep = tf.mul(i, candidate_c)
            c = tf.add(old_c_to_keep, new_c_to_keep)

            # new scaled output
            o = tf.sigmoid(tf.batch_matmul(x_h_concat, self.w_o))
            h = tf.mul(o, tf.tanh(c))
            return (c, h)
开发者ID:liangkai,项目名称:char-rnn-tf,代码行数:34,代码来源:rnn.py


示例17: add_model

    def add_model(self, inputs1, inputs2, seq_len1, seq_len2):
        #self.initial_state = tf.constant(np.zeros(()), dtype=tf.float32)
        print 'adsf add_model'
        self.initial_state = tf.constant(np.zeros((self.config.batch_size,self.config.hidden_size)), dtype=tf.float32)
        rnn_outputs  = []
        rnn_outputs1 = []
        rnn_outputs2 = []
        h_curr1 = self.initial_state
        h_curr2 = self.initial_state
        print 'nthgnghn'
        with tf.variable_scope('rnn'):
            Whh = tf.get_variable('Whh', shape=(self.config.hidden_size,self.config.hidden_size), dtype=tf.float32)
            Wxh = tf.get_variable('Wxh', shape=(self.config.embed_size,self.config.hidden_size),  dtype=tf.float32)
            b1  = tf.get_variable('bhx', shape=(self.config.hidden_size,),                        dtype=tf.float32)
            print Wxh.get_shape
            print inputs1[0].get_shape
            print inputs2[0].get_shape
            for i in range(self.config.max_steps):
                h_curr2 = tf.matmul(h_curr2,Whh) 
                h_curr2 += tf.matmul(inputs2[i],Wxh)
                h_curr2 += b1
                h_curr2 = tf.sigmoid(h_curr2)

                h_curr1 = tf.sigmoid(tf.matmul(h_curr1,Whh) + tf.matmul(inputs1[i],Wxh) + b1)
                rnn_outputs1.append(h_curr1)
                rnn_outputs2.append(h_curr2)
        
        rnn_states = [tf.concat(1, [rnn_outputs1[i], rnn_outputs2[i]]) for i in range(self.config.max_steps)]
        return rnn_states
开发者ID:anushabala,项目名称:deep-playlist,代码行数:29,代码来源:model_rnn.py


示例18: call

  def call(self, x, h):
    channels = x.shape[self._feature_axis].value

    with tf.variable_scope('gates'):
      inputs = tf.concat([x, h], axis=self._feature_axis)
      n = channels + self._filters
      m = 2 * self._filters if self._filters > 1 else 2
      W = tf.get_variable('kernel', self._kernel + [n, m])
      y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
      if self._normalize:
        r, u = tf.split(y, 2, axis=self._feature_axis)
        r = tf.contrib.layers.layer_norm(r)
        u = tf.contrib.layers.layer_norm(u)
      else:
        y += tf.get_variable('bias', [m], initializer=tf.ones_initializer())
        r, u = tf.split(y, 2, axis=self._feature_axis)
      r, u = tf.sigmoid(r), tf.sigmoid(u)

      # TODO
      #tf.summary.histogram('reset_gate', r)
      #tf.summary.histogram('update_gate', u)

    with tf.variable_scope('candidate'):
      inputs = tf.concat([x, r * h], axis=self._feature_axis)
      n = channels + self._filters
      m = self._filters
      W = tf.get_variable('kernel', self._kernel + [n, m])
      y = tf.nn.convolution(inputs, W, 'SAME', data_format=self._data_format)
      if self._normalize:
        y = tf.contrib.layers.layer_norm(y)
      else:
        y += tf.get_variable('bias', [m], initializer=tf.zeros_initializer())
      h = u * h + (1 - u) * self._activation(y)

	return h, h
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:35,代码来源:ConvLSTM_Cell.py


示例19: unroll

    def unroll(inp, state):
        g_i = tf.sigmoid(tf.matmul(inp, w_xi) + tf.matmul(state, w_hi) + b_i)
        g_r = tf.sigmoid(tf.matmul(inp, w_xr) + tf.matmul(state, w_hr) + b_r)
        u = tf.tanh(tf.matmul(inp, w_xu) + g_r * tf.matmul(state, w_hu) + b_u)
        state = state * (1 - g_i) + u * g_i

        return state
开发者ID:ziyu-zhang,项目名称:ins-seg-public,代码行数:7,代码来源:nnlib.py


示例20: __call__

  def __call__(self, x_placeholder, h_prev, C_prev):
    with tf.variable_scope(self.scope, reuse=True):
      embedding = tf.get_variable('embedding')
      W = tf.get_variable('weight')

    x_embedding = tf.nn.embedding_lookup(embedding, x_placeholder)

    if self.is_training:
      x_embedding = tf.nn.dropout(x_embedding, self.keep_prob)

    # forget gate
    concat_input = tf.concat(1, [h_prev, x_embedding])
    gates = tf.matmul(concat_input, W)
    m_f, m_i, m_C_update, m_o = tf.split(1, 4, gates)

    # forget gate
    f = tf.sigmoid(m_f)
    # input gate
    i = tf.sigmoid(m_i)
    # output gate
    o = tf.sigmoid(m_o)
    # Cell update
    C_update = tf.tanh(m_C_update)

    # cell after update
    # Add a dropout layer.
    C = tf.mul(f, C_prev) + tf.mul(i, C_update)

    # output
    h = tf.mul(o, tf.tanh(C))
    return h, C
开发者ID:wenjiesha,项目名称:sentiment_lstm,代码行数:31,代码来源:model.py



注:本文中的tensorflow.sigmoid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.sign函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.shape函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap