• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.sigmoid函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.sigmoid函数的典型用法代码示例。如果您正苦于以下问题:Python sigmoid函数的具体用法?Python sigmoid怎么用?Python sigmoid使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了sigmoid函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: __call__

    def __call__(self, inputs, state, scope=None):
        """Long short-term memory cell (LSTM)."""
        with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
            # Parameters of gates are concatenated into one multiply for efficiency.
            if self._state_is_tuple:
                c, h = state
            else:
                c, h = array_ops.split(1, 2, state)     
        
            i = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "i")  
            j = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "j")   
            f = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "f")   
            o = linear_tt([inputs, h], self._num_units, self._mat_ranks, bias =True, scope = "o")   
        
#             concat = _linear([inputs, h], 4 * self._num_units, True)
#             # i = input_gate, j = new_input, f = forget_gate, o = output_gate
#             i , j, f, o = array_ops.split(1, 4, concat)

            new_c = (c * sigmoid(f + self._forget_bias) + sigmoid(i) *
                     self._activation(j))
            new_h = self._activation(new_c) * sigmoid(o)

            if self._state_is_tuple:
                new_state = LSTMStateTuple(new_c, new_h)
            else:
                new_state = array_ops.concat(1, [new_c, new_h])
            return new_h, new_state
开发者ID:yuqirose,项目名称:tensor-compress,代码行数:27,代码来源:TensorBasicLSTMCell.py


示例2: __call__

  def __call__(self, inputs, state, scope=None):
    """LSTM cell with layer normalization and recurrent dropout."""

    with vs.variable_scope(scope or type(self).__name__) as scope:  # LayerNormBasicLSTMCell  # pylint: disable=unused-variables
      c, h = state
      args = array_ops.concat(1, [inputs, h])
      concat = self._linear(args)

      i, j, f, o = array_ops.split(1, 4, concat)
      if self._layer_norm:
        i = self._norm(i, "input")
        j = self._norm(j, "transform")
        f = self._norm(f, "forget")
        o = self._norm(o, "output")

      g = self._activation(j)
      if (not isinstance(self._keep_prob, float)) or self._keep_prob < 1:
        g = nn_ops.dropout(g, self._keep_prob, seed=self._seed)

      new_c = (c * math_ops.sigmoid(f + self._forget_bias)
               + math_ops.sigmoid(i) * g)
      if self._layer_norm:
        new_c = self._norm(new_c, "state")
      new_h = self._activation(new_c) * math_ops.sigmoid(o)

      new_state = rnn_cell.LSTMStateTuple(new_c, new_h)
      return new_h, new_state
开发者ID:KalraA,项目名称:tensorflow,代码行数:27,代码来源:rnn_cell.py


示例3: GetParams

 def GetParams(self):
   """Tests for scale & elementwise layers in TF-TRT."""
   input_name = "input"
   input_dims = [10, 24, 24, 20]
   output_name = "output"
   g = ops.Graph()
   with g.as_default():
     x = array_ops.placeholder(
         dtype=dtypes.float32, shape=input_dims, name=input_name)
     for weights_shape in [
         (1,),  # scale
         (24, 1, 1),  # scale
         (24, 24, 20),  # scale
         (20,),  # elementwise
         (1, 24, 1, 1),  # elementwise
         (1, 24, 24, 1),  # elementwise
         (1, 24, 24, 20),  # elementwise
         (24, 20),  # elementwise
     ]:
       a = self._ConstOp(weights_shape)
       f = x + a
       x = math_ops.sigmoid(f)
       a = self._ConstOp(weights_shape)
       f = a + x
       x = math_ops.sigmoid(f)
     gen_array_ops.reshape(x, [5, -1], name=output_name)
   return trt_test.TfTrtIntegrationTestParams(
       gdef=g.as_graph_def(),
       input_names=[input_name],
       input_dims=[input_dims],
       output_names=[output_name],
       expected_output_dims=[(5, 23040)])
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:32,代码来源:binary_tensor_weight_broadcast_test.py


示例4: call

  def call(self, inputs, state):
    """
    """
    (c_prev, m_prev) = state
    self._batch_size = inputs.shape[0].value or array_ops.shape(inputs)[0]
    scope = vs.get_variable_scope()
    with vs.variable_scope(scope, initializer=self._initializer):
      x = array_ops.concat([inputs, m_prev], axis=1)
      with vs.variable_scope("first_gemm"):
        if self._linear1 is None:
          # no bias for bottleneck
          self._linear1 = _Linear(x, self._fact_size, False)
        R_fact = self._linear1(x)
      with vs.variable_scope("second_gemm"):
        if self._linear2 is None:
          self._linear2 = _Linear(R_fact, 4*self._num_units, True)
        R = self._linear2(R_fact)
      i, j, f, o = array_ops.split(R, 4, 1)

      c = (math_ops.sigmoid(f + self._forget_bias) * c_prev +
           math_ops.sigmoid(i) * math_ops.tanh(j))
      m = math_ops.sigmoid(o) * self._activation(c)

    if self._num_proj is not None:
      with vs.variable_scope("projection"):
        if self._linear3 is None:
          self._linear3 = _Linear(m, self._num_proj, False)
        m = self._linear3(m)

    new_state = rnn_cell_impl.LSTMStateTuple(c, m)
    return m, new_state
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:31,代码来源:flstm.py


示例5: LSTMCell

 def LSTMCell(cls, x, mprev, cprev, weights):
   xm = array_ops.concat([x, mprev], 1)
   i_i, i_g, f_g, o_g = array_ops.split(
       value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
   new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
       i_g) * math_ops.tanh(i_i)
   new_c = clip_ops.clip_by_value(new_c, -50.0, 50.0)
   new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
   return new_m, new_c
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:9,代码来源:function_test.py


示例6: _logits_to_prediction

 def _logits_to_prediction(self, logits=None):
   predictions = {PredictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[PredictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
   predictions[PredictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:caikehe,项目名称:tensorflow,代码行数:9,代码来源:head.py


示例7: _logits_to_prediction

 def _logits_to_prediction(self, logits=None):
   predictions = {PedictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[PedictionKey.LOGISTIC] = math_ops.sigmoid(logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[PedictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
   # Workaround for argmax dropping the second demension.
   predictions[PedictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:10,代码来源:head.py


示例8: __call__

 def __call__(self, inputs, state, scope=None):
     """Gated recurrent unit (GRU) with nunits cells."""
     with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
         with vs.variable_scope("Gates"):  # Reset gate and update gate.
             # We start with bias of 1.0 to not reset and not update.
             r, u = array_ops.split(1, 2, linear([inputs, state], 2 * self._num_units, True, 1.0))
             r, u = sigmoid(r), sigmoid(u)
         with vs.variable_scope("Candidate"):
             c = tanh(linear([inputs, r * state], self._num_units, True))
         new_h = u * state + (1 - u) * c
     return new_h, new_h
开发者ID:ExploreMailbot,项目名称:tensorflow,代码行数:11,代码来源:rnn_cell.py


示例9: __call__

 def __call__(self, inputs, state, scope=None):
     """Gated recurrent unit (GRU) with nunits cells."""
     with vs.variable_scope(scope or "gru_cell"):
         with vs.variable_scope("gates"):  # Reset gate and update gate.
             # We start with bias of 1.0 to not reset and not update.
             r, u = array_ops.split(1, 2, _linear([inputs, state], 2 * self._num_units, True, 1.0, scope=scope))
             r, u = sigmoid(r), sigmoid(u)
         with vs.variable_scope("candidate"):
             c = self._activation(_linear([inputs, r * state], self._num_units, True, scope=scope))
         new_h = u * state + (1 - u) * c
     return new_h, new_h
开发者ID:yuikns,项目名称:tensorflow,代码行数:11,代码来源:rnn_cell.py


示例10: _logits_to_predictions

 def _logits_to_predictions(self, logits):
   """See `_MultiClassHead`."""
   predictions = {prediction_key.PredictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[prediction_key.PredictionKey.LOGISTIC] = math_ops.sigmoid(
         logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[prediction_key.PredictionKey.PROBABILITIES] = math_ops.sigmoid(
       logits)
   predictions[prediction_key.PredictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:12,代码来源:head.py


示例11: __call__

  def __call__(self, inputs, state, scope=None):
    """Recurrent Highway Network cell (RHN)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicRHNCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      if self._state_is_tuple:
        y = state
      else:
        y = array_ops.split(1, 1, state)
      assert self._recurrence_depth > 0 and type(self._recurrence_depth) is int
      # h_transform = [None] * self._recurrence_depth
      # t = [None] * self._recurrence_depth
      # s = [None] * self._recurrence_depth
      # concat = [None] * self._recurrence_depth
      # for i in range(self._recurrence_depth):
      #   if i == 0:
      #     concat[i] = _linear([inputs, h], 2 * self._num_units, True)
      #     # h = nonlinear transform, t = transfer gate
      #     h_transform[i], t[i] = array_ops.split(1, 2, concat[i])
      #     t[i] = sigmoid(t[i] + self._transfer_bias)
      #     s[i] = self._activation(h_transform[i]) * t[i] + \
      #         (1.0 - t[i]) * _linear([inputs], 1 * self._num_units, False)
      #   if i > 0:
      #     concat[i] = _linear([h], 2 * self._num_units, True)
      #     # h = nonlinear transform, t = transfer gate
      #     h_transform[i], t[i] = array_ops.split(1, 2, concat[i])
      #     t[i] = sigmoid(t[i] + self._transfer_bias)
      #     s[i] = self._activation(h_transform[i]) * t[i] + \
      #         (1.0 - t[i]) * s[i-1]

      # ALTERNATIVE IMPLEMENTATION:
      for i in range(self._recurrence_depth):
        if i == 0:
          concat = _linear([inputs, y], 2 * self._num_units, True)
          # h = nonlinear transform, t = transfer gate
          h, t = array_ops.split(1, 2, concat)
          t = sigmoid(t + self._transfer_bias)
          s = self._activation(h) * t + \
              (1.0 - t) * _linear([inputs], 1 * self._num_units, False)
        if i > 0:
          concat = _linear([s], 2 * self._num_units, True)
          # h = nonlinear transform, t = transfer gate
          h, t = array_ops.split(1, 2, concat)
          t = sigmoid(t + self._transfer_bias)
          s = self._activation(h) * t + \
              (1.0 - t) * s
      new_y = s

      if self._state_is_tuple:
        new_state = RHNStateTuple(new_y)
      else:
        new_state = array_ops.concat(1, new_y)
      return new_y
开发者ID:julian121266,项目名称:tensorflow,代码行数:52,代码来源:rnn_cell.py


示例12: __call__

  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM) with hypernetworks and layer normalization."""
    with vs.variable_scope(scope or type(self).__name__):
      # Parameters of gates are concatenated into one multiply for efficiency.
      total_h, total_c = tf.split(1, 2, state)
      h = total_h[:, 0:self._num_units]
      c = total_c[:, 0:self._num_units]

      self.hyper_state = tf.concat(1, [total_h[:, self._num_units:], total_c[:, self._num_units:]])
      hyper_input = tf.concat(1, [inputs, h])
      hyper_output, hyper_new_state = self.hyper_cell(hyper_input, self.hyper_state)
      self.hyper_output = hyper_output
      self.hyper_state = hyper_new_state

      input_below_ = rnn_cell._linear([inputs],
                                      4 * self._num_units, False, scope="out_1")
      input_below_ = self.hyper_norm(input_below_, 4 * self._num_units, scope="hyper_x")
      state_below_ = rnn_cell._linear([h],
                                      4 * self._num_units, False, scope="out_2")
      state_below_ = self.hyper_norm(state_below_, 4 * self._num_units, scope="hyper_h")

      if self.is_layer_norm:
        s1 = vs.get_variable("s1", initializer=tf.ones([4 * self._num_units]), dtype=tf.float32)
        s2 = vs.get_variable("s2", initializer=tf.ones([4 * self._num_units]), dtype=tf.float32)
        s3 = vs.get_variable("s3", initializer=tf.ones([self._num_units]), dtype=tf.float32)

        b1 = vs.get_variable("b1", initializer=tf.zeros([4 * self._num_units]), dtype=tf.float32)
        b2 = vs.get_variable("b2", initializer=tf.zeros([4 * self._num_units]), dtype=tf.float32)
        b3 = vs.get_variable("b3", initializer=tf.zeros([self._num_units]), dtype=tf.float32)


        input_below_ = ln(input_below_, s1, b1)


        state_below_ = ln(state_below_, s2, b2)

      lstm_matrix = tf.add(input_below_, state_below_)
      i, j, f, o = array_ops.split(1, 4, lstm_matrix)
      new_c = (c * sigmoid(f) + sigmoid(i) *
               self._activation(j))

      # Currently normalizing c causes lot of nan's in the model, thus commenting it out for now.
      # new_c_ = ln(new_c, s3, b3)
      new_c_ = new_c
      new_h = self._activation(new_c_) * sigmoid(o)

      hyper_h, hyper_c = tf.split(1, 2, hyper_new_state)
      new_total_h = tf.concat(1, [new_h, hyper_h])
      new_total_c = tf.concat(1, [new_c, hyper_c])
      new_total_state = tf.concat(1, [new_total_h, new_total_c])
      return new_h, new_total_state
开发者ID:pbhatia243,项目名称:tf-layer-norm,代码行数:51,代码来源:layers.py


示例13: __call__

  def __call__(self, inputs, state, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or type(self).__name__):  # "BasicLSTMCell"
      # Parameters of gates are concatenated into one multiply for efficiency.
      c, h = array_ops.split(1, 2, state)
      concat = linear([inputs, h], 4 * self._num_units, True)

      # i = input_gate, j = new_input, f = forget_gate, o = output_gate
      i, j, f, o = array_ops.split(1, 4, concat)

      new_c = c * sigmoid(f + self._forget_bias) + sigmoid(i) * tanh(j)
      new_h = tanh(new_c) * sigmoid(o)

      return new_h, array_ops.concat(1, [new_c, new_h])
开发者ID:AdvanceCodingTechnology,项目名称:tensorflow,代码行数:14,代码来源:rnn_cell.py


示例14: __call__

  def __call__(self, inputs, state, scope=None):
    """Gated recurrent unit (GRU) with nunits cells."""
    dim = self._num_units
    with vs.variable_scope(scope or type(self).__name__):  # "GRUCell"
      with vs.variable_scope("Gates"):  # Reset gate and update gate.
        # We start with bias of 1.0 to not reset and not update.
        with vs.variable_scope( "Layer_Parameters"):

          s1 = vs.get_variable("s1", initializer=tf.ones([2*dim]), dtype=tf.float32)
          s2 = vs.get_variable("s2", initializer=tf.ones([2*dim]), dtype=tf.float32)
          s3 = vs.get_variable("s3", initializer=tf.ones([dim]), dtype=tf.float32)
          s4 = vs.get_variable("s4", initializer=tf.ones([dim]), dtype=tf.float32)
          b1 = vs.get_variable("b1", initializer=tf.zeros([2*dim]), dtype=tf.float32)
          b2 = vs.get_variable("b2", initializer=tf.zeros([2*dim]), dtype=tf.float32)
          b3 = vs.get_variable("b3", initializer=tf.zeros([dim]), dtype=tf.float32)
          b4 = vs.get_variable("b4", initializer=tf.zeros([dim]), dtype=tf.float32)


          # Code below initialized for all cells
          # s1 = tf.Variable(tf.ones([2 * dim]), name="s1")
          # s2 = tf.Variable(tf.ones([2 * dim]), name="s2")
          # s3 = tf.Variable(tf.ones([dim]), name="s3")
          # s4 = tf.Variable(tf.ones([dim]), name="s4")
          # b1 = tf.Variable(tf.zeros([2 * dim]), name="b1")
          # b2 = tf.Variable(tf.zeros([2 * dim]), name="b2")
          # b3 = tf.Variable(tf.zeros([dim]), name="b3")
          # b4 = tf.Variable(tf.zeros([dim]), name="b4")

        input_below_ = rnn_cell._linear([inputs],
                               2 * self._num_units, False, scope="out_1")
        input_below_ = ln(input_below_, s1, b1)
        state_below_ = rnn_cell._linear([state],
                               2 * self._num_units, False, scope="out_2")
        state_below_ = ln(state_below_, s2, b2)
        out =tf.add(input_below_, state_below_)
        r, u = array_ops.split(1, 2, out)
        r, u = sigmoid(r), sigmoid(u)

      with vs.variable_scope("Candidate"):
          input_below_x = rnn_cell._linear([inputs],
                                           self._num_units, False, scope="out_3")
          input_below_x = ln(input_below_x, s3, b3)
          state_below_x = rnn_cell._linear([state],
                                           self._num_units, False, scope="out_4")
          state_below_x = ln(state_below_x, s4, b4)
          c_pre = tf.add(input_below_x,r * state_below_x)
          c = self._activation(c_pre)
      new_h = u * state + (1 - u) * c
    return new_h, new_h
开发者ID:jessemzhang,项目名称:deep_learning_genomics_nlp,代码行数:49,代码来源:LN_rnn_cell.py


示例15: __call__

    def __call__(self, inputs, state, scope=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        dtype = inputs.dtype
        batch_size, feature_size = inputs.get_shape().as_list()
        if self._use_tgate:
            # Time gate
            feature_size = feature_size - 1
            tvscope = vs.get_variable_scope()
            with vs.variable_scope(tvscope, initializer=None) as unit_scope:
                with vs.variable_scope(unit_scope) as time_gate_scope:
                    w_t1 = vs.get_variable(
                        "w_t1", shape=[1, self._num_units], dtype=dtype)
                    bias_t1 = vs.get_variable(
                        "bias_t1", [self._num_units], dtype=dtype,
                        initializer=init_ops.constant_initializer(0.0, dtype=dtype))
                    w_tx1 = vs.get_variable(
                        "w_tx1", shape=[feature_size, self._num_units], dtype=dtype)
                seq = tf.slice(inputs, begin=[0, 0], size=[batch_size, feature_size])
                delta_t = tf.slice(inputs, begin=[0, 56], size=[batch_size, 1])


                t1_act = (self._activation(math_ops.matmul(delta_t, w_t1)) +
                          math_ops.matmul(seq, w_tx1) + bias_t1)
                t1 = sigmoid(t1_act)
                inputs = seq
        # for initial state
        (state, state_decay) = state
        with vs.variable_scope("gates"):  # Reset gate and update gate.
            # We start with bias of 1.0 to not reset and not update.
            value = sigmoid(_linear(
                [inputs, state], 2 * self._num_units, True, 1.0))
            r, u = array_ops.split(value=value,
                                   num_or_size_splits=2,
                                   axis=1)
        with vs.variable_scope("candidate"):
            c = self._activation(_linear([inputs, r * state],
                                         self._num_units, True))
        new_h = u * state + (1 - u) * c

        if self._use_tgate:
            new_h_decay = u * t1 * state_decay + (1 - u * t1) * c
            new_state = (new_h, new_h_decay)
            new_state = (TGRUStateTuple(new_h, new_h_decay))
            new_h = tf.concat([new_h, new_h_decay], axis=1)
        else:
            new_state = (new_h, new_h)
            new_state = (TGRUStateTuple(new_h, new_h))

        return new_h, new_state
开发者ID:gjjg1331jggj,项目名称:Attention-GRU-3M,代码行数:49,代码来源:trnn.py


示例16: GetParams

  def GetParams(self):
    """Test for multi connection neighboring nodes wiring tests in TF-TRT."""
    dtype = dtypes.float32
    input_name = "input"
    input_dims = [2, 3, 7, 5]
    g = ops.Graph()
    with g.as_default():
      x = array_ops.placeholder(dtype=dtype, shape=input_dims, name=input_name)
      e = constant_op.constant(
          np.random.normal(.05, .005, [3, 2, 3, 4]),
          name="weights",
          dtype=dtype)
      conv = nn.conv2d(
          input=x,
          filter=e,
          data_format="NCHW",
          strides=[1, 1, 1, 1],
          padding="VALID",
          name="conv")
      b = constant_op.constant(
          np.random.normal(2.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      t = conv + b

      b = constant_op.constant(
          np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      q = conv - b
      edge = math_ops.sigmoid(q)

      b = constant_op.constant(
          np.random.normal(5.0, 1.0, [1, 4, 1, 1]), name="bias", dtype=dtype)
      d = b + conv
      edge3 = math_ops.sigmoid(d)

      edge1 = gen_math_ops.tan(conv)
      t = t - edge1
      q = q + edge
      t = t + q
      t = t + d
      t = t - edge3
      array_ops.squeeze(t, name=self.output_name)
    return trt_test.TfTrtIntegrationTestParams(
        gdef=g.as_graph_def(),
        input_names=[input_name],
        input_dims=[input_dims],
        expected_engines=["my_trt_op_0", "my_trt_op_1"],
        expected_output_dims=(2, 4, 5, 4),
        allclose_atol=1.e-03,
        allclose_rtol=1.e-03)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:48,代码来源:multi_connection_neighbor_engine_test.py


示例17: predictions

  def predictions(self, examples):
    """Add operations to compute predictions by the model.

    If logistic_loss is being used, predicted probabilities are returned.
    Otherwise, (raw) linear predictions (w*x) are returned.

    Args:
      examples: Examples to compute predictions on.

    Returns:
      An Operation that computes the predictions for examples.

    Raises:
      ValueError: if examples are not well defined.
    """
    self._assertSpecified(
        ['example_weights', 'sparse_features', 'dense_features'], examples)
    self._assertList(['sparse_features', 'dense_features'], examples)

    result = self._linear_predictions(examples)
    if self._options['loss_type'] == 'logistic_loss':
      # Convert logits to probability for logistic loss predictions.
      with name_scope('sdca/logistic_prediction'):
        result = math_ops.sigmoid(result)
    return result
开发者ID:billho,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops.py


示例18: _Model

 def _Model(x):
   w = variable_scope.get_variable(
       "w", (64, 64),
       initializer=init_ops.random_uniform_initializer(seed=312))
   b = variable_scope.get_variable(
       "b", (64), initializer=init_ops.zeros_initializer()),
   return math_ops.sigmoid(math_ops.matmul(x, w) + b)
开发者ID:kadeng,项目名称:tensorflow,代码行数:7,代码来源:function_test.py


示例19: call

    def call(self, inputs, state, att_score=None):
        """Gated recurrent unit (GRU) with nunits cells."""
        if self._gate_linear is None:
            bias_ones = self._bias_initializer
            if self._bias_initializer is None:
                bias_ones = init_ops.constant_initializer(
                    1.0, dtype=inputs.dtype)
            with vs.variable_scope("gates"):  # Reset gate and update gate.
                self._gate_linear = _Linear(
                    [inputs, state],
                    2 * self._num_units,
                    True,
                    bias_initializer=bias_ones,
                    kernel_initializer=self._kernel_initializer)

        value = math_ops.sigmoid(self._gate_linear([inputs, state]))
        r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)

        r_state = r * state
        if self._candidate_linear is None:
            with vs.variable_scope("candidate"):
                self._candidate_linear = _Linear(
                    [inputs, r_state],
                    self._num_units,
                    True,
                    bias_initializer=self._bias_initializer,
                    kernel_initializer=self._kernel_initializer)
        c = self._activation(self._candidate_linear([inputs, r_state]))
        u = (1.0 - att_score) * u
        new_h = u * state + (1 - u) * c
        return new_h, new_h
开发者ID:q64545,项目名称:x-deeplearning,代码行数:31,代码来源:utils.py


示例20: call

 def call(self, inputs, state):
   """Gated recurrent unit (GRU) with nunits cells."""
   with vs.variable_scope("gates"):  # Reset gate and update gate.
     # We start with bias of 1.0 to not reset and not update.
     bias_ones = self._bias_initializer
     if self._bias_initializer is None:
       dtype = inputs.dtype
       bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
     # pylint: disable=protected-access
     value = math_ops.sigmoid(
         rnn_cell_impl._linear([inputs, state], 2 * self._num_units, True,
                               bias_ones, self._kernel_initializer))
     r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
     # pylint: enable=protected-access
   with vs.variable_scope("candidate"):
     # pylint: disable=protected-access
     with vs.variable_scope("input_projection"):
       hi = rnn_cell_impl._linear(inputs, self._num_units, True,
                                  self._bias_initializer,
                                  self._kernel_initializer)
     with vs.variable_scope("hidden_projection"):
       hh = r * (rnn_cell_impl._linear(state, self._num_units, True,
                                       self._bias_initializer,
                                       self._kernel_initializer))
     # pylint: enable=protected-access
     c = self._activation(hi + hh)
   new_h = u * state + (1 - u) * c
   return new_h, new_h
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:28,代码来源:cudnn_rnn_ops.py



注:本文中的tensorflow.python.ops.math_ops.sigmoid函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.sign函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.select函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap