• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python math_ops.greater函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.math_ops.greater函数的典型用法代码示例。如果您正苦于以下问题:Python greater函数的具体用法?Python greater怎么用?Python greater使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了greater函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: mode

  def mode(self, name="mode"):
    """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `a > 1`, `b > 1`. This returns the mode when `a > 1` and `b > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Beta distribution.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._a, self._b, self._a_b_sum], name):
        a = self._a
        b = self._b
        a_b_sum = self._a_b_sum
        one = constant_op.constant(1, self.dtype)
        mode = (a - 1)/ (a_b_sum - 2)

        if self.allow_nan_stats:
          return math_ops.select(
              math_ops.logical_and(
                  math_ops.greater(a, 1), math_ops.greater(b, 1)),
              mode,
              (constant_op.constant(float("NaN"), dtype=self.dtype) *
               array_ops.ones_like(a_b_sum, dtype=self.dtype)))
        else:
          return control_flow_ops.with_dependencies([
              check_ops.assert_less(one, a),
              check_ops.assert_less(one, b)], mode)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:33,代码来源:beta.py


示例2: _variance

  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = array_ops.where(math_ops.greater(self.df, 2.),
                            self.df - 2.,
                            array_ops.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           math_ops.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = array_ops.where(
        self.df > array_ops.fill(self.batch_shape_tensor(), 2.),
        var,
        array_ops.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return array_ops.where(
          math_ops.greater(
              self.df,
              array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          array_ops.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              check_ops.assert_less(
                  array_ops.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
开发者ID:daiwk,项目名称:tensorflow,代码行数:33,代码来源:student_t.py


示例3: _mode

 def _mode(self):
     mode = (self.a - 1.0) / (self.a_b_sum - 2.0)
     if self.allow_nan_stats:
         nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
         return math_ops.select(
             math_ops.logical_and(math_ops.greater(self.a, 1.0), math_ops.greater(self.b, 1.0)),
             mode,
             array_ops.fill(self.batch_shape(), nan, name="nan"),
         )
     else:
         return control_flow_ops.with_dependencies(
             [
                 check_ops.assert_less(
                     array_ops.ones((), dtype=self.dtype),
                     self.a,
                     message="Mode not defined for components of a <= 1.",
                 ),
                 check_ops.assert_less(
                     array_ops.ones((), dtype=self.dtype),
                     self.b,
                     message="Mode not defined for components of b <= 1.",
                 ),
             ],
             mode,
         )
开发者ID:caisq,项目名称:tensorflow,代码行数:25,代码来源:beta.py


示例4: _variance

    def _variance(self):
        var = self._ones() * math_ops.square(self.sigma) * self.df / (self.df - 2)
        # When 1 < df <= 2, variance is infinite.
        inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
        result_where_defined = math_ops.select(
            math_ops.greater(self.df, array_ops.fill(self.batch_shape(), 2.0)),
            var,
            array_ops.fill(self.batch_shape(), inf, name="inf"),
        )

        if self.allow_nan_stats:
            nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
            return math_ops.select(
                math_ops.greater(self.df, self._ones()),
                result_where_defined,
                array_ops.fill(self.batch_shape(), nan, name="nan"),
            )
        else:
            return control_flow_ops.with_dependencies(
                [
                    check_ops.assert_less(
                        array_ops.ones((), dtype=self.dtype),
                        self.df,
                        message="variance not defined for components of df <= 1",
                    )
                ],
                result_where_defined,
            )
开发者ID:apollos,项目名称:tensorflow,代码行数:28,代码来源:student_t.py


示例5: compute_lr

 def compute_lr(self, grad, var):
   scaled_lr = self._learning_rate
   if self._skip_list is None or not any(v in var.name
                                         for v in self._skip_list):
     w_norm = linalg_ops.norm(var, ord=2)
     g_norm = linalg_ops.norm(grad, ord=2)
     trust_ratio = array_ops.where(
         math_ops.greater(w_norm, 0),
         array_ops.where(
             math_ops.greater(g_norm, 0),
             (self._eeta * w_norm /
              (g_norm + self._weight_decay * w_norm + self._epsilon)), 1.0),
         1.0)
     scaled_lr = self._learning_rate * trust_ratio
   return scaled_lr
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:15,代码来源:lars_optimizer.py


示例6: _safe_div

def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  if isinstance(denominator, float):
    if math_ops.equal(denominator, 0.0):
      return ops.convert_to_tensor(0.0, dtype=numerator.dtype)
    return math_ops.div(numerator, denominator)
  if context.in_eager_mode() and denominator._rank() == 0:  # pylint: disable=protected-access
    if math_ops.equal(denominator, 0.0):
      return ops.convert_to_tensor(0.0, dtype=numerator.dtype)
    return math_ops.div(numerator, denominator)
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
开发者ID:smajida,项目名称:tensorflow,代码行数:31,代码来源:losses_impl.py


示例7: filter_functions

  def filter_functions():
    take_all = lambda x: constant_op.constant(True)
    is_zero = lambda x: math_ops.equal(x, 0)
    greater = lambda x: math_ops.greater(x + 5, 0)

    tests = []
    filters = [take_all, is_zero, greater]
    identity = lambda x: x
    for x, predicate_1 in enumerate(filters):
      for y, predicate_2 in enumerate(filters):
        tests.append(("Mixed{}{}".format(x, y), identity,
                      [predicate_1, predicate_2]))
        for z, predicate_3 in enumerate(filters):
          tests.append(("Mixed{}{}{}".format(x, y, z), identity,
                        [predicate_1, predicate_2, predicate_3]))

    take_all_multiple = lambda x, y: constant_op.constant(True)
    # Multi output
    tests.append(("Multi1", lambda x: (x, x),
                  [take_all_multiple, take_all_multiple]))
    tests.append(("Multi2", lambda x: (x, 2), [
        take_all_multiple,
        lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)
    ]))
    return tuple(tests)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:map_and_filter_fusion_test.py


示例8: map_and_filter_functions

  def map_and_filter_functions():
    identity = lambda x: x
    increment = lambda x: x + 1
    minus_five = lambda x: x - 5

    def increment_and_square(x):
      y = x + 1
      return y * y

    take_all = lambda x: constant_op.constant(True)
    is_zero = lambda x: math_ops.equal(x, 0)
    is_odd = lambda x: math_ops.equal(x % 2, 0)
    greater = lambda x: math_ops.greater(x + 5, 0)

    functions = [identity, increment, minus_five, increment_and_square]
    filters = [take_all, is_zero, is_odd, greater]
    tests = []

    for x, fun in enumerate(functions):
      for y, predicate in enumerate(filters):
        tests.append(("Mixed{}{}".format(x, y), fun, predicate))

    # Multi output
    tests.append(("Multi1", lambda x: (x, x),
                  lambda x, y: constant_op.constant(True)))
    tests.append(
        ("Multi2", lambda x: (x, 2),
         lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
    return tuple(tests)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:29,代码来源:map_and_filter_fusion_test.py


示例9: _prune_invalid_weights

def _prune_invalid_weights(sparse_ids, sparse_weights):
  """Prune invalid weights (< 0) from the input ids and weights."""
  if sparse_weights is not None:
    is_weights_valid = math_ops.greater(sparse_weights.values, 0)
    sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
    sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
  return sparse_ids, sparse_weights
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:7,代码来源:embedding_ops.py


示例10: gcd

def gcd(a, b, name=None):
  """Returns the greatest common divisor via Euclid's algorithm.

  Args:
    a: The dividend. A scalar integer `Tensor`.
    b: The divisor. A scalar integer `Tensor`.
    name: An optional name for the operation.

  Returns:
    A scalar `Tensor` representing the greatest common divisor between `a` and
    `b`.

  Raises:
    ValueError: If `a` or `b` are not scalar integers.
  """
  with ops.name_scope(name, 'gcd', [a, b]):
    a = ops.convert_to_tensor(a)
    b = ops.convert_to_tensor(b)

    a.shape.assert_has_rank(0)
    b.shape.assert_has_rank(0)

    if not a.dtype.is_integer:
      raise ValueError('a must be an integer type. Got: %s' % a.dtype)
    if not b.dtype.is_integer:
      raise ValueError('b must be an integer type. Got: %s' % b.dtype)

    cond = lambda _, b: math_ops.greater(b, array_ops.zeros_like(b))
    body = lambda a, b: [b, math_ops.mod(a, b)]
    a, b = control_flow_ops.while_loop(cond, body, [a, b], back_prop=False)
    return a
开发者ID:1000sprites,项目名称:tensorflow,代码行数:31,代码来源:util_ops.py


示例11: average_impurity

  def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:25,代码来源:tensor_forest.py


示例12: mode

  def mode(self, name="mode"):
    """Mode of the distribution.

    Note that the mode for the Beta distribution is only defined
    when `alpha > 1`. This returns the mode when `alpha > 1`,
    and NaN otherwise. If `self.allow_nan_stats` is `False`, an exception
    will be raised rather than returning `NaN`.

    Args:
      name: The name for this op.

    Returns:
      Mode of the Dirichlet distribution.
    """
    with ops.name_scope(self.name):
      with ops.op_scope([self._alpha, self._alpha_0], name):
        one = constant_op.constant(1, self.dtype)
        mode = (self._alpha - 1)/ (
            array_ops.expand_dims(self._alpha_0, -1) - math_ops.cast(
                self.event_shape()[0], self.dtype))

        if self.allow_nan_stats:
          return math_ops.select(
              math_ops.greater(self._alpha, 1),
              mode,
              (constant_op.constant(float("NaN"), dtype=self.dtype) *
               array_ops.ones_like(self._alpha, dtype=self.dtype)))
        else:
          return control_flow_ops.with_dependencies([
              check_ops.assert_less(
                  one, self._alpha,
                  message="mode not defined for components of alpha <= 1")
          ], mode)
开发者ID:10imaging,项目名称:tensorflow,代码行数:33,代码来源:dirichlet.py


示例13: _safe_div

def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  if compat.forward_compatible(2018, 11, 1):
    return math_ops.div_no_nan(numerator, denominator, name=name)
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator,
                   array_ops.where(
                       math_ops.equal(denominator, 0),
                       array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:26,代码来源:loss_ops.py


示例14: _get_stratified_batch_from_tensors

def _get_stratified_batch_from_tensors(val, label, reject_probs, batch_size,
                                       queue_threads=3):
  """Reject examples one-at-a-time based on class."""
  # Make rejection probabilities into a tensor so they can be dynamically
  # accessed by tensors.
  reject_probs = constant_op.constant(
      reject_probs, dtype=dtypes.float32, name='rejection_probabilities')

  # Make queue that will have proper class proportions. Contains exactly one
  # batch at a time.
  val_shape = val.get_shape()
  label_shape = label.get_shape()
  final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
                                    shapes=[val_shape, label_shape],
                                    dtypes=[val.dtype, label.dtype],
                                    name='batched_queue')

  # Conditionally enqueue.
  eq_tf = array_ops.reshape(math_ops.greater(
      random_ops.random_uniform([1]),
      array_ops.slice(reject_probs, [label], [1])),
                            [])
  conditional_enqueue = control_flow_ops.cond(
      eq_tf,
      lambda: final_q.enqueue([val, label]),
      control_flow_ops.no_op)
  queue_runner.add_queue_runner(queue_runner.QueueRunner(
      final_q, [conditional_enqueue] * queue_threads))

  return final_q.dequeue_many(batch_size)
开发者ID:Bala96,项目名称:tensorflow,代码行数:30,代码来源:sampling_ops.py


示例15: testLargeCase

  def testLargeCase(self):
    shape = [32, 512, 256, 1]
    predictions = random_ops.random_uniform(
        shape, 0.0, 1.0, dtype=dtypes_lib.float32)
    labels = math_ops.greater(random_ops.random_uniform(shape, 0.0, 1.0), 0.5)

    result, update_op = metric_ops.precision_recall_at_equal_thresholds(
        labels=labels, predictions=predictions, num_thresholds=201)
    # Run many updates, enough to cause highly inaccurate values if the
    # code used float32 for accumulation.
    num_updates = 71

    with self.test_session() as sess:
      sess.run(variables.local_variables_initializer())
      for _ in xrange(num_updates):
        sess.run(update_op)

      prdata = sess.run(result)

      # Since we use random values, we won't know the tp/fp/tn/fn values, but
      # tp and fp at threshold 0 should be the total number of positive and
      # negative labels, hence their sum should be total number of pixels.
      expected_value = 1.0 * np.product(shape) * num_updates
      got_value = prdata.tp[0] + prdata.fp[0]
      # They should be at least within 1.
      self.assertNear(got_value, expected_value, 1.0)
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:26,代码来源:metric_ops_large_test.py


示例16: string_input_producer

def string_input_producer(string_tensor,
                          num_epochs=None,
                          shuffle=True,
                          seed=None,
                          capacity=32,
                          shared_name=None,
                          name=None,
                          cancel_op=None):
  """Output strings (e.g. filenames) to a queue for an input pipeline.

  Note: if `num_epochs` is not `None`, this function creates local counter
  `epochs`. Use `local_variable_initializer()` to initialize local variables.

  Args:
    string_tensor: A 1-D string tensor with the strings to produce.
    num_epochs: An integer (optional). If specified, `string_input_producer`
      produces each string from `string_tensor` `num_epochs` times before
      generating an `OutOfRange` error. If not specified,
      `string_input_producer` can cycle through the strings in `string_tensor`
      an unlimited number of times.
    shuffle: Boolean. If true, the strings are randomly shuffled within each
      epoch.
    seed: An integer (optional). Seed used if shuffle == True.
    capacity: An integer. Sets the queue capacity.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: A name for the operations (optional).
    cancel_op: Cancel op for the queue (optional).

  Returns:
    A queue with the output strings.  A `QueueRunner` for the Queue
    is added to the current `Graph`'s `QUEUE_RUNNER` collection.

  Raises:
    ValueError: If the string_tensor is a null Python list.  At runtime,
    will fail with an assertion if string_tensor becomes a null tensor.
  """
  not_null_err = "string_input_producer requires a non-null input tensor"
  if not isinstance(string_tensor, ops.Tensor) and not string_tensor:
    raise ValueError(not_null_err)

  with ops.name_scope(name, "input_producer", [string_tensor]) as name:
    string_tensor = ops.convert_to_tensor(string_tensor, dtype=dtypes.string)
    with ops.control_dependencies([
        control_flow_ops.Assert(
            math_ops.greater(array_ops.size(string_tensor), 0),
            [not_null_err])]):
      string_tensor = array_ops.identity(string_tensor)
    return input_producer(
        input_tensor=string_tensor,
        element_shape=[],
        num_epochs=num_epochs,
        shuffle=shuffle,
        seed=seed,
        capacity=capacity,
        shared_name=shared_name,
        name=name,
        summary_name="fraction_of_%d_full" % capacity,
        cancel_op=cancel_op)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:59,代码来源:input.py


示例17: _prune_invalid_ids

def _prune_invalid_ids(sparse_ids, sparse_weights):
    """Prune invalid IDs (< 0) from the input ids and weights."""
    is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
    if sparse_weights is not None:
        is_id_valid = math_ops.logical_and(is_id_valid, math_ops.greater(sparse_weights.values, 0))
    sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
    if sparse_weights is not None:
        sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
    return sparse_ids, sparse_weights
开发者ID:yuikns,项目名称:tensorflow,代码行数:9,代码来源:embedding_ops.py


示例18: _logits_to_prediction

 def _logits_to_prediction(self, logits=None):
   predictions = {PredictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[PredictionKey.PROBABILITIES] = math_ops.sigmoid(logits)
   predictions[PredictionKey.CLASSES] = math_ops.to_int64(
       math_ops.greater(logits, 0))
   return predictions
开发者ID:caikehe,项目名称:tensorflow,代码行数:9,代码来源:head.py


示例19: _SegmentMinGrad

def _SegmentMinGrad(op, grad):
  """Gradient for SegmentMin."""
  zeros = array_ops.zeros(array_ops.shape(op.inputs[0]),
                          dtype=op.inputs[0].dtype)
  gathered_grads = array_ops.gather(grad, op.inputs[1])
  gathered_outputs = array_ops.gather(op.outputs[0], op.inputs[1])
  return math_ops.select(math_ops.greater(op.inputs[0], gathered_outputs),
                         zeros,
                         gathered_grads), None
开发者ID:TeMedy,项目名称:tensorflow,代码行数:9,代码来源:math_grad.py


示例20: AddForwardAccumulateLoop

  def AddForwardAccumulateLoop(self, value):
    """Add an accumulation loop for each value needed in backprop.

    This is added to the forward loop at the first time when a value
    in the forward loop is used by backprop gradient computation loop.

    The pseudocode is:
    ```
      acc;
      while (_pivot) {
        if (index == 0) [value] else Concat(acc, [value]);
      }
    ```

    Args:
      value: The tensor that is accumulated.

    Returns:
      The accumulated history of value.

    Raises:
      ValueError: If the shape of "value" is not known statically.
    """
    if not value.get_shape().is_fully_defined():
      raise ValueError("Must have known shape: %s" % value)
    self._grad_context.Exit()
    # TODO(irving): Now that acc starts out empty, most of the
    # conditional logic can go away.
    acc = constant_op.constant([],
                               value.dtype,
                               shape=[0] + value.get_shape().as_list(),
                               name="f_acc")
    self.Enter()
    self.AddName(acc.name)
    enter_acc = _Enter(acc, self._name, is_constant=False,
                       parallel_iterations=self._parallel_iterations,
                       name="f_acc")
    merge_acc = merge([enter_acc, enter_acc])[0]
    switch_acc = switch(merge_acc, self._pivot)

    # If index = 0 then [value] else Concat(acc, [value]).
    cond = math_ops.greater(self._index, 0)
    switch_add_acc = switch(switch_acc[1], cond)
    expand_value = array_ops.expand_dims(value, 0)
    true_branch = array_ops.concat(0, [switch_add_acc[1], expand_value])
    false_branch = array_ops.identity(switch_add_acc[0])
    false_branch = with_dependencies([false_branch], expand_value)
    add_acc = merge([false_branch, true_branch])[0]

    next_acc = next_iteration(add_acc)
    merge_acc.op._update_input(1, next_acc)

    exit_acc = exit(switch_acc[0], name="f_acc")
    self.Exit()
    self._grad_context.Enter()
    return exit_acc
开发者ID:p-zhang,项目名称:tensorflow,代码行数:56,代码来源:control_flow_ops.py



注:本文中的tensorflow.python.ops.math_ops.greater函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python math_ops.greater_equal函数代码示例发布时间:2022-05-27
下一篇:
Python math_ops.floormod函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap