• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python array_ops.strided_slice函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.array_ops.strided_slice函数的典型用法代码示例。如果您正苦于以下问题:Python strided_slice函数的具体用法?Python strided_slice怎么用?Python strided_slice使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了strided_slice函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _get_diff_for_monotonic_comparison

def _get_diff_for_monotonic_comparison(x):
  """Gets the difference x[1:] - x[:-1]."""
  x = array_ops.reshape(x, [-1])
  if not is_numeric_tensor(x):
    raise TypeError('Expected x to be numeric, instead found: %s' % x)

  # If x has less than 2 elements, there is nothing to compare.  So return [].
  is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
  short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)

  # With 2 or more elements, return x[1:] - x[:-1]
  s_len = array_ops.shape(x) - 1
  diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
  return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:14,代码来源:check_ops.py


示例2: _inverse

  def _inverse(self, y):
    # To derive the inverse mapping note that:
    #   y[i] = exp(x[i]) / normalization
    # and
    #   y[end] = 1 / normalization.
    # Thus:
    # x[i] = log(exp(x[i])) - log(y[end]) - log(normalization)
    #      = log(exp(x[i])/normalization) - log(y[end])
    #      = log(y[i]) - log(y[end])
    shape = (np.asarray(y.shape.as_list(), dtype=np.int32)
             if y.shape.is_fully_defined()
             else array_ops.shape(y, name="shape"))
    ndims = distribution_util.prefer_static_rank(y)

    # Do this first to make sure CSE catches that it'll happen again in
    # _inverse_log_det_jacobian.
    x = math_ops.log(y)

    # We now extract the last coordinate of the rightmost dimension.
    # Our trick is to slice from [0,0,...,shape[-1]-1] to shape[:-1]+[1].
    begin = array_ops.one_hot(indices=ndims-1,
                              depth=ndims,
                              on_value=shape[-1]-np.array(1, dtype=shape.dtype),
                              dtype=shape.dtype)
    size = array_ops.concat([shape[:-1], np.asarray([1], dtype=shape.dtype)], 0)
    log_normalization = -array_ops.strided_slice(x, begin, begin + size)

    # Here we slice out all but the last coordinate; see above for idea.
    begin = array_ops.zeros_like(shape)
    size = array_ops.concat([shape[:-1], [shape[-1] - 1]], 0)
    x = array_ops.strided_slice(x, begin, begin + size)

    x += log_normalization

    if self._static_event_ndims == 0:
      x = array_ops.squeeze(x, squeeze_dims=[ndims-1])

    # Set shape hints.
    if y.shape.ndims is not None:
      shape = y.shape.as_list()
      if self._static_event_ndims == 0:
        shape = shape[:-1]
      elif shape[-1] is not None:
        shape[-1] -= 1
      shape = tensor_shape.TensorShape(shape)
      x.shape.assert_is_compatible_with(shape)
      x.set_shape(shape)

    return x
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:49,代码来源:softmax_centered.py


示例3: _flip_vector_to_matrix_dynamic

def _flip_vector_to_matrix_dynamic(vec, batch_shape):
  """flip_vector_to_matrix with dynamic shapes."""
  # Shapes associated with batch_shape
  batch_rank = array_ops.size(batch_shape)

  # Shapes associated with vec.
  vec = ops.convert_to_tensor(vec, name="vec")
  vec_shape = array_ops.shape(vec)
  vec_rank = array_ops.rank(vec)
  vec_batch_rank = vec_rank - 1

  m = vec_batch_rank - batch_rank
  # vec_shape_left = [M1,...,Mm] or [].
  vec_shape_left = array_ops.strided_slice(vec_shape, [0], [m])
  # If vec_shape_left = [], then condensed_shape = [1] since reduce_prod([]) = 1
  # If vec_shape_left = [M1,...,Mm], condensed_shape = [M1*...*Mm]
  condensed_shape = [math_ops.reduce_prod(vec_shape_left)]
  k = array_ops.gather(vec_shape, vec_rank - 1)
  new_shape = array_ops.concat(0, (batch_shape, [k], condensed_shape))

  def _flip_front_dims_to_back():
    # Permutation corresponding to [N1,...,Nn] + [k, M1,...,Mm]
    perm = array_ops.concat(
        0, (math_ops.range(m, vec_rank), math_ops.range(0, m)))
    return array_ops.transpose(vec, perm=perm)

  x_flipped = control_flow_ops.cond(
      math_ops.less(0, m),
      _flip_front_dims_to_back,
      lambda: array_ops.expand_dims(vec, -1))

  return array_ops.reshape(x_flipped, new_shape)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:32,代码来源:operator_pd.py


示例4: test3DNegativeStride

  def test3DNegativeStride(self):
    for dtype in self.numeric_types:
      with self.test_session():
        i = array_ops.placeholder(dtype, shape=[3, 4, 10])
        with self.test_scope():
          o = array_ops.strided_slice(i, [2, 2, 6], [0, 0, 2], [-1, -1, -2])
        params = {
          i: [[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
               [9, 8, 7, 6, 5, 4, 3, 2, 1, 0],
               [5, 3, 1, 7, 9, 2, 4, 6, 8, 0],
               [4, 5, 2, 4, 3, 7, 6, 8, 9, 4]],
              [[5, 5, 5, 5, 5, 5, 5, 5, 5, 5],
               [4, 3, 4, 5, 7, 6, 5, 3, 4, 5],
               [8, 7, 6, 5, 4, 3, 2, 1, 8, 7],
               [7, 1, 7, 1, 8, 1, 8, 1, 3, 1]],
              [[7, 5, 7, 5, 7, 5, 7, 5, 7, 5],
               [1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
               [9, 8, 7, 9, 8, 7, 9, 8, 7, 9],
               [9, 9, 5, 5, 6, 6, 3, 3, 6, 6]]]
        }
        result = o.eval(feed_dict=params)

        self.assertAllEqual([[[9, 8],
                              [1, 1]],
                             [[2, 4],
                              [5, 7]]], result)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:26,代码来源:slice_ops_test.py


示例5: _my_metric_op

 def _my_metric_op(predictions, labels):
   # For the case of binary classification, the 2nd column of "predictions"
   # denotes the model predictions.
   labels = math_ops.to_float(labels)
   predictions = array_ops.strided_slice(
       predictions, [0, 1], [-1, 2], end_mask=1)
   labels = math_ops.cast(labels, predictions.dtype)
   return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
开发者ID:eduardofv,项目名称:tensorflow,代码行数:8,代码来源:debug_test.py


示例6: testConcatSlice

 def testConcatSlice(self):
   r1 = test_ops.stub_resource_handle_op(container="a", shared_name="b")
   r2 = test_ops.stub_resource_handle_op(container="a", shared_name="c")
   c = array_ops.stack([r1, r2])
   s = array_ops.strided_slice(c, [1], [2])
   self.evaluate(test_ops.resource_create_op(s))
   with self.assertRaises(errors.AlreadyExistsError):
     self.evaluate(test_ops.resource_create_op(r2))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:8,代码来源:array_ops_test.py


示例7: testInt64GPU

 def testInt64GPU(self):
   if not test_util.is_gpu_available():
     self.skipTest("No GPU available")
   with self.test_session(use_gpu=True, force_gpu=True):
     x = constant_op.constant([1., 2., 3.])
     begin = constant_op.constant([2], dtype=dtypes.int64)
     end = constant_op.constant([3], dtype=dtypes.int64)
     strides = constant_op.constant([1], dtype=dtypes.int64)
     s = array_ops.strided_slice(x, begin, end, strides)
     self.assertAllEqual([3.], self.evaluate(s))
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:10,代码来源:array_ops_test.py


示例8: testStridedSlice

  def testStridedSlice(self):
    self._testNAry(lambda x: array_ops.strided_slice(*x),
                   [np.array([[], [], []], dtype=np.float32),
                    np.array([1, 0], dtype=np.int32),
                    np.array([3, 0], dtype=np.int32),
                    np.array([1, 1], dtype=np.int32)],
                   expected=np.array([[], []], dtype=np.float32))

    if np.int64 in self.int_types:
      self._testNAry(
          lambda x: array_ops.strided_slice(*x), [
              np.array([[], [], []], dtype=np.float32), np.array(
                  [1, 0], dtype=np.int64), np.array([3, 0], dtype=np.int64),
              np.array([1, 1], dtype=np.int64)
          ],
          expected=np.array([[], []], dtype=np.float32))

    self._testNAry(lambda x: array_ops.strided_slice(*x),
                   [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                             dtype=np.float32),
                    np.array([1, 1], dtype=np.int32),
                    np.array([3, 3], dtype=np.int32),
                    np.array([1, 1], dtype=np.int32)],
                   expected=np.array([[5, 6], [8, 9]], dtype=np.float32))

    self._testNAry(lambda x: array_ops.strided_slice(*x),
                   [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                             dtype=np.float32),
                    np.array([0, 2], dtype=np.int32),
                    np.array([2, 0], dtype=np.int32),
                    np.array([1, -1], dtype=np.int32)],
                   expected=np.array([[3, 2], [6, 5]], dtype=np.float32))

    self._testNAry(lambda x: x[0][0:2, array_ops.newaxis, ::-1],
                   [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                             dtype=np.float32)],
                   expected=np.array([[[3, 2, 1]], [[6, 5, 4]]],
                                     dtype=np.float32))

    self._testNAry(lambda x: x[0][1, :, array_ops.newaxis],
                   [np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
                             dtype=np.float32)],
                   expected=np.array([[4], [5], [6]], dtype=np.float32))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:43,代码来源:nary_ops_test.py


示例9: training_graph

  def training_graph(self,
                     input_data,
                     input_labels,
                     data_spec=None,
                     **tree_kwargs):
    """Constructs a TF graph for training a random forest.

    Args:
      input_data: A tensor or SparseTensor or placeholder for input data.
      input_labels: A tensor or placeholder for labels associated with
        input_data.
      data_spec: A list of tf.dtype values specifying the original types of
        each column.
      **tree_kwargs: Keyword arguments passed to each tree's training_graph.

    Returns:
      The last op in the random forest training graph.
    """
    data_spec = [constants.DATA_FLOAT] if data_spec is None else data_spec
    tree_graphs = []
    for i in range(self.params.num_trees):
      with ops.device(self.device_assigner.get_device(i)):
        seed = self.params.base_random_seed
        if seed != 0:
          seed += i
        # If using bagging, randomly select some of the input.
        tree_data = input_data
        tree_labels = input_labels
        if self.params.bagging_fraction < 1.0:
          # TODO(thomaswc): This does sampling without replacment.  Consider
          # also allowing sampling with replacement as an option.
          batch_size = array_ops.strided_slice(
              array_ops.shape(input_data), [0], [1])
          r = random_ops.random_uniform(batch_size, seed=seed)
          mask = math_ops.less(
              r, array_ops.ones_like(r) * self.params.bagging_fraction)
          gather_indices = array_ops.squeeze(
              array_ops.where(mask), squeeze_dims=[1])
          # TODO(thomaswc): Calculate out-of-bag data and labels, and store
          # them for use in calculating statistics later.
          tree_data = array_ops.gather(input_data, gather_indices)
          tree_labels = array_ops.gather(input_labels, gather_indices)
        if self.params.bagged_features:
          tree_data = self._bag_features(i, tree_data)

        initialization = self.trees[i].tree_initialization()

        with ops.control_dependencies([initialization]):
          tree_graphs.append(
              self.trees[i].training_graph(
                  tree_data, tree_labels, seed, data_spec=data_spec,
                  **tree_kwargs))

    return control_flow_ops.group(*tree_graphs, name='train')
开发者ID:BinRoot,项目名称:Tensorflow,代码行数:54,代码来源:tensor_forest.py


示例10: _objective

  def _objective(self, x):
    """Rosenbrock function. (Carl Edward Rasmussen, 2001-07-21).

    f(x) = sum_{i=1:D-1} 100*(x(i+1) - x(i)^2)^2 + (1-x(i))^2

    Args:
      x: a Variable
    Returns:
      f: a tensor (objective value)
    """

    d = array_ops.size(x)
    s = math_ops.add(
        100 * math_ops.square(
            math_ops.subtract(
                array_ops.strided_slice(x, [1], [d]),
                math_ops.square(array_ops.strided_slice(x, [0], [d - 1])))),
        math_ops.square(
            math_ops.subtract(1.0, array_ops.strided_slice(x, [0], [d - 1]))))
    return math_ops.reduce_sum(s)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:20,代码来源:external_optimizer_test.py


示例11: _check_shapes_dynamic

  def _check_shapes_dynamic(self, operator, v, diag):
    """Return (v, diag) with Assert dependencies, which check shape."""
    checks = []
    with ops.name_scope("check_shapes", values=[operator, v, diag]):
      s_v = array_ops.shape(v)
      r_op = operator.rank()
      r_v = array_ops.rank(v)
      if diag is not None:
        s_d = array_ops.shape(diag)
        r_d = array_ops.rank(diag)

      # Check tensor rank.
      checks.append(check_ops.assert_rank(
          v, r_op, message="v is not the same rank as operator."))
      if diag is not None:
        checks.append(check_ops.assert_rank(
            diag, r_op - 1, message="diag is not the same rank as operator."))

      # Check batch shape
      checks.append(check_ops.assert_equal(
          operator.batch_shape(), array_ops.strided_slice(s_v, [0], [r_v - 2]),
          message="v does not have same batch shape as operator."))
      if diag is not None:
        checks.append(check_ops.assert_equal(
            operator.batch_shape(), array_ops.strided_slice(
                s_d, [0], [r_d - 1]),
            message="diag does not have same batch shape as operator."))

      # Check event shape
      checks.append(check_ops.assert_equal(
          operator.vector_space_dimension(), array_ops.gather(s_v, r_v - 2),
          message="v does not have same event shape as operator."))
      if diag is not None:
        checks.append(check_ops.assert_equal(
            array_ops.gather(s_v, r_v - 1), array_ops.gather(s_d, r_d - 1),
            message="diag does not have same event shape as v."))

      v = control_flow_ops.with_dependencies(checks, v)
      if diag is not None:
        diag = control_flow_ops.with_dependencies(checks, diag)
      return v, diag
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:41,代码来源:operator_pd_vdvt_update.py


示例12: test1DNegtiveStride

  def test1DNegtiveStride(self):
    for dtype in self.numeric_types:
      with self.test_session():
        i = array_ops.placeholder(dtype, shape=[10])
        with self.test_scope():
          o = array_ops.strided_slice(i, [6], [2], [-2])
        params = {
          i: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
        }
        result = o.eval(feed_dict=params)

        self.assertAllEqual([6, 4], result)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:12,代码来源:slice_ops_test.py


示例13: tree_initialization

  def tree_initialization(self):
    def _init_tree():
      return state_ops.scatter_update(self.variables.tree, [0], [[-1, -1]]).op

    def _nothing():
      return control_flow_ops.no_op()

    return control_flow_ops.cond(
        math_ops.equal(
            array_ops.squeeze(
                array_ops.strided_slice(self.variables.tree, [0, 0], [1, 1])),
            -2), _init_tree, _nothing)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:12,代码来源:tensor_forest.py


示例14: test2DDegenerateNegativeStride

  def test2DDegenerateNegativeStride(self):
    for dtype in self.numeric_types:
      with self.test_session():
        i = array_ops.placeholder(dtype, shape=[2, 3])
        with self.test_scope():
          o = array_ops.strided_slice(i, [0, 0], [-1, 3], [-1, 1])
        params = {
            i: [[0, 1, 2],
                [3, 4, 5]]
        }
        result = o.eval(feed_dict=params)

        self.assertEqual(tensor_shape.TensorShape((0, 3)), result.shape)
开发者ID:Brandon1016,项目名称:tensorflow,代码行数:13,代码来源:slice_ops_test.py


示例15: _get_identity_operator

 def _get_identity_operator(self, v):
   """Get an `OperatorPDIdentity` to play the role of `D` in `VDV^T`."""
   with ops.name_scope("get_identity_operator", values=[v]):
     if v.get_shape().is_fully_defined():
       v_shape = v.get_shape().as_list()
       v_batch_shape = v_shape[:-2]
       r = v_shape[-1]
       id_shape = v_batch_shape + [r, r]
     else:
       v_shape = array_ops.shape(v)
       v_rank = array_ops.rank(v)
       v_batch_shape = array_ops.strided_slice(v_shape, [0], [v_rank - 2])
       r = array_ops.gather(v_shape, v_rank - 1)  # Last dim of v
       id_shape = array_ops.concat_v2((v_batch_shape, [r, r]), 0)
     return operator_pd_identity.OperatorPDIdentity(
         id_shape, v.dtype, verify_pd=self._verify_pd)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:16,代码来源:operator_pd_vdvt_update.py


示例16: _StridedSliceGradGrad

def _StridedSliceGradGrad(op, grad):
  """Gradient for StridedSliceGrad op."""
  begin = op.inputs[1]
  end = op.inputs[2]
  strides = op.inputs[3]

  return None, None, None, None, array_ops.strided_slice(
      grad,
      begin,
      end,
      strides,
      begin_mask=op.get_attr("begin_mask"),
      end_mask=op.get_attr("end_mask"),
      ellipsis_mask=op.get_attr("ellipsis_mask"),
      new_axis_mask=op.get_attr("new_axis_mask"),
      shrink_axis_mask=op.get_attr("shrink_axis_mask"))
开发者ID:Jackhuang945,项目名称:tensorflow,代码行数:16,代码来源:array_grad.py


示例17: batch_shape

  def batch_shape(self, name="batch_shape"):
    """Shape of batches associated with this operator.

    If this operator represents the batch matrix `A` with
    `A.shape = [N1,...,Nn, k, k]`, the `batch_shape` is `[N1,...,Nn]`.

    Args:
      name:  A name scope to use for ops added by this method.

    Returns:
      `int32` `Tensor`
    """
    # Derived classes get this "for free" once .shape() is implemented.
    with ops.name_scope(self.name):
      with ops.name_scope(name, values=self.inputs):
        return array_ops.strided_slice(self.shape(), [0], [self.rank() - 2])
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:16,代码来源:operator_pd.py


示例18: testStridedSliceGradWithNonConstAxis

  def testStridedSliceGradWithNonConstAxis(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      end = array_ops.placeholder(dtype='int32')
      shape = array_ops.shape(conv)
      end_val = [1, 2, 3, 4]
      s = array_ops.strided_slice(
          conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
      s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
                                            [1, 2, 3, 1], s)
      output = array_ops.identity(s_grad)

      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={end: end_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                end: end_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if node.name.startswith('LayoutOptimizerTranspose'):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self.assertIn('LayoutOptimizerTransposeNHWCToNCHW-Conv2D-0', nodes)
      self.assertIn('LayoutOptimizerTransposeNCHWToNHWC-StridedSliceGrad-0-0',
                    nodes)
      self.assertIn('LayoutOptimizerVecPermuteNHWCToNCHW_StridedSliceGrad_2',
                    nodes)
      self.assertIn('LayoutOptimizer-StridedSlice-StridedSliceGrad/begin',
                    nodes)
      self.assertIn('LayoutOptimizer-StridedSlice-StridedSliceGrad/strides',
                    nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:autodrive,项目名称:tensorflow,代码行数:45,代码来源:layout_optimizer_test.py


示例19: extract_batch_shape

def extract_batch_shape(x, num_event_dims, name="extract_batch_shape"):
  """Extract the batch shape from `x`.

  Assuming `x.shape = batch_shape + event_shape`, when `event_shape` has
  `num_event_dims` dimensions.  This `Op` returns the batch shape `Tensor`.

  Args:
    x: `Tensor` with rank at least `num_event_dims`.  If rank is not high enough
      this `Op` will fail.
    num_event_dims:  `int32` scalar `Tensor`.  The number of trailing dimensions
      in `x` to be considered as part of `event_shape`.
    name:  A name to prepend to created `Ops`.

  Returns:
    batch_shape:  `1-D` `int32` `Tensor`
  """
  with ops.name_scope(name, values=[x]):
    x = ops.convert_to_tensor(x, name="x")
    return array_ops.strided_slice(
        array_ops.shape(x), [0], [array_ops.rank(x) - num_event_dims])
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:20,代码来源:operator_pd.py


示例20: testStridedSliceWithNonConstAxis

  def testStridedSliceWithNonConstAxis(self):
    if test.is_gpu_available(cuda_only=True):
      random_seed.set_random_seed(0)
      x = random_ops.truncated_normal([1, 784], seed=0)
      conv = _two_layer_model(x)
      end = array_ops.placeholder(dtype='int32')
      s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
      output = array_ops.identity(s)

      end_val = [1, 2, 3, 4]
      with session.Session() as sess:
        output_val_ref = sess.run(output, feed_dict={end: end_val})

      with session.Session(config=_get_config()) as sess:
        metadata = config_pb2.RunMetadata()
        output_val = sess.run(
            output, run_metadata=metadata, feed_dict={
                end: end_val
            })

      nodes = []
      num_transposes = 0
      for node in metadata.cost_graph.node:
        if _is_transpose(node.name):
          num_transposes += 1
        nodes.append(node.name)

      # Four transposes were initially added in the Expand phase of
      # LayoutOptimizer; two of them are cancelled out in the Collapse phase.
      expected_num_transposes = 2
      self.assertEqual(expected_num_transposes, num_transposes)
      self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
      self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
      self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
      self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
      self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
      self.assertAllClose(output_val_ref, output_val, atol=1e-3)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:37,代码来源:layout_optimizer_test.py



注:本文中的tensorflow.python.ops.array_ops.strided_slice函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python array_ops.strided_slice_grad函数代码示例发布时间:2022-05-27
下一篇:
Python array_ops.stop_gradient函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap