• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python batching.unbatch函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.data.experimental.ops.batching.unbatch函数的典型用法代码示例。如果您正苦于以下问题:Python unbatch函数的具体用法?Python unbatch怎么用?Python unbatch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了unbatch函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testUnbatchDatasetWithDenseAndSparseTensor

 def testUnbatchDatasetWithDenseAndSparseTensor(self):
   st = sparse_tensor.SparseTensorValue(
       indices=[[i, i] for i in range(10)],
       values=list(range(10)),
       dense_shape=[10, 10])
   data = dataset_ops.Dataset.from_tensors((list(range(10)), st))
   data = data.apply(batching.unbatch())
   data = data.batch(5)
   data = data.apply(batching.unbatch())
   expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]))
                      for i in range(10)]
   self.assertDatasetProduces(data, expected_output=expected_output)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:unbatch_test.py


示例2: testUnbatchDatasetWithRaggedTensor

 def testUnbatchDatasetWithRaggedTensor(self):
   rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
                                           [[5]], [[6]], [[7]], [[8]], [[9]]])
   data = dataset_ops.Dataset.from_tensors(rt)
   data = data.apply(batching.unbatch())
   data = data.batch(5)
   data = data.batch(2)
   data = data.apply(batching.unbatch())
   expected_output = [
       ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]]]),
       ragged_factory_ops.constant_value([[[5]], [[6]], [[7]], [[8]], [[9]]]),
   ]
   self.assertDatasetProduces(
       data, expected_output=expected_output)
开发者ID:aritratony,项目名称:tensorflow,代码行数:14,代码来源:unbatch_test.py


示例3: testUnbatchDatasetWithDenseSparseAndRaggedTensor

 def testUnbatchDatasetWithDenseSparseAndRaggedTensor(self):
   st = sparse_tensor.SparseTensorValue(
       indices=[[i, i] for i in range(10)],
       values=list(range(10)),
       dense_shape=[10, 10])
   rt = ragged_factory_ops.constant_value([[[0]], [[1]], [[2]], [[3]], [[4]],
                                           [[5]], [[6]], [[7]], [[8]], [[9]]])
   data = dataset_ops.Dataset.from_tensors((list(range(10)), st, rt))
   data = data.apply(batching.unbatch())
   data = data.batch(5)
   data = data.apply(batching.unbatch())
   expected_output = [(i, sparse_tensor.SparseTensorValue([[i]], [i], [10]),
                       ragged_factory_ops.constant_value([[i]]))
                      for i in range(10)]
   self.assertDatasetProduces(
       data, expected_output=expected_output)
开发者ID:aritratony,项目名称:tensorflow,代码行数:16,代码来源:unbatch_test.py


示例4: testSkipEagerUnbatchDynamicShapeMismatch

  def testSkipEagerUnbatchDynamicShapeMismatch(self):
    ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
    ph2 = array_ops.placeholder(dtypes.int32, shape=None)
    data = dataset_ops.Dataset.from_tensors((ph1, ph2))
    data = data.apply(batching.unbatch())
    iterator = dataset_ops.make_initializable_iterator(data)
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      # Mismatch in the 0th dimension.
      sess.run(
          iterator.initializer,
          feed_dict={
              ph1: np.arange(7).astype(np.int32),
              ph2: np.arange(8).astype(np.int32)
          })
      with self.assertRaises(errors.InvalidArgumentError):
        self.evaluate(next_element)

      # No 0th dimension (i.e. scalar value) for one component.
      sess.run(
          iterator.initializer,
          feed_dict={
              ph1: np.arange(7).astype(np.int32),
              ph2: 7
          })
      with self.assertRaises(errors.InvalidArgumentError):
        self.evaluate(next_element)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:28,代码来源:unbatch_test.py


示例5: benchmarkNativeUnbatch

  def benchmarkNativeUnbatch(self):
    batch_sizes = [1, 2, 5, 10, 20, 50]
    elems_per_trial = 10000
    with ops.Graph().as_default():
      dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
      batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
      dataset = dataset.batch(batch_size_placeholder)
      dataset = dataset.apply(batching.unbatch())
      dataset = dataset.skip(elems_per_trial)
      iterator = dataset.make_initializable_iterator()
      next_element = iterator.get_next()

      with session.Session() as sess:
        for batch_size in batch_sizes:
          deltas = []
          for _ in range(5):
            sess.run(
                iterator.initializer,
                feed_dict={batch_size_placeholder: batch_size})
            start = time.time()
            sess.run(next_element.op)
            end = time.time()
            deltas.append((end - start) / elems_per_trial)

          median_wall_time = np.median(deltas)
          print("Unbatch (native) batch size: %d Median wall time per element:"
                " %f microseconds" % (batch_size, median_wall_time * 1e6))
          self.report_benchmark(
              iters=10000,
              wall_time=median_wall_time,
              name="benchmark_unbatch_dataset_native_batch_size_%d" %
              batch_size)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:32,代码来源:batch_dataset_op_test.py


示例6: benchmarkNativeUnbatch

  def benchmarkNativeUnbatch(self):
    batch_sizes = [1, 2, 5, 10, 20, 50]
    elems_per_trial = 10000
    with ops.Graph().as_default():
      dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
      batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
      dataset = dataset.batch(batch_size_placeholder)
      dataset = dataset.apply(batching.unbatch())
      dataset = dataset.skip(elems_per_trial)
      options = dataset_ops.Options()
      options.experimental_optimization.apply_default_optimizations = False
      dataset = dataset.with_options(options)
      iterator = dataset_ops.make_initializable_iterator(dataset)
      next_element = iterator.get_next()

      with session.Session() as sess:
        for batch_size in batch_sizes:
          deltas = []
          for _ in range(5):
            sess.run(
                iterator.initializer,
                feed_dict={batch_size_placeholder: batch_size})
            start = time.time()
            sess.run(next_element.op)
            end = time.time()
            deltas.append((end - start) / elems_per_trial)

          median_wall_time = np.median(deltas)
          self.report_benchmark(
              iters=10000,
              wall_time=median_wall_time,
              name="native_batch_size_%d" %
              batch_size)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:33,代码来源:unbatch_benchmark.py


示例7: build_dataset

  def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
    components = (
        np.arange(tensor_slice_len),
        np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis],
        np.array(multiplier) * np.arange(tensor_slice_len))

    return dataset_ops.Dataset.from_tensor_slices(components).batch(
        batch_size).apply(batching.unbatch())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:8,代码来源:unbatch_dataset_serialization_test.py


示例8: testUnbatchSingleElementTupleDataset

  def testUnbatchSingleElementTupleDataset(self):
    data = tuple([(math_ops.range(10),) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = ((dtypes.int32,),) * 3
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    self.assertDatasetProduces(data, [((i,),) * 3 for i in range(10)])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:10,代码来源:unbatch_test.py


示例9: testUnbatchScalarDataset

  def testUnbatchScalarDataset(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = (dtypes.int32,) * 3
    data = data.batch(2)
    self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, dataset_ops.get_legacy_output_types(data))

    self.assertDatasetProduces(data, [(i,) * 3 for i in range(10)])
开发者ID:aritratony,项目名称:tensorflow,代码行数:10,代码来源:unbatch_test.py


示例10: testUnbatchEmpty

  def testUnbatchEmpty(self):
    data = dataset_ops.Dataset.from_tensors(
        (constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
         constant_op.constant([], shape=[0, 4, 0])))
    data = data.apply(batching.unbatch())
    iterator = data.make_one_shot_iterator()
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:11,代码来源:batch_dataset_op_test.py


示例11: testUnbatchDatasetWithSparseTensor

  def testUnbatchDatasetWithSparseTensor(self):
    st = sparse_tensor.SparseTensorValue(
        indices=[[i, i] for i in range(10)],
        values=list(range(10)),
        dense_shape=[10, 10])
    data = dataset_ops.Dataset.from_tensors(st)
    data = data.apply(batching.unbatch())
    data = data.batch(5)
    data = data.apply(batching.unbatch())
    iterator = data.make_one_shot_iterator()
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        st_row = self.evaluate(next_element)
        self.assertEqual([i], st_row.indices)
        self.assertEqual([i], st_row.values)
        self.assertEqual([10], st_row.dense_shape)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:20,代码来源:batch_dataset_op_test.py


示例12: testUnbatchDatasetWithStrings

  def testUnbatchDatasetWithStrings(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
    expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    self.assertDatasetProduces(
        data, [(i, compat.as_bytes(str(i)), i) for i in range(10)])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:12,代码来源:unbatch_test.py


示例13: make_dataset_iterator

  def make_dataset_iterator(self, dataset):
    """Make iterators for each of the TPU hosts.

    We first unbatch the users input dataset and then rebatch it with the
    per replica batch size that is calculated using
    `global_batch_size // num_replicas_in_sync`. The currently supported cases
    are as follows:
    `dataset.batch()` is the last operation on the dataset.
    `dataset.apply(map_and_batch)` is the last operation on the dataset.
    `dataset.batch().prefetch()` are the last 2 operations on the dataset.
    `dataset.apply(map_and_batch).prefetch()` are the last 2 operations.

    Args:
      dataset: The `tf.data` dataset passed by the user.

    Returns:
      iterator: InputIterator created for each of the host machines.
    """
    # TODO(sourabhbajaj): Remove this in lieu of distributed datasets
    def _get_dataset_batch_size(dataset):
      """Get the global batch size from the dataset object."""
      # pylint: disable=protected-access
      if isinstance(dataset, dataset_ops.BatchDataset):
        return tensor_util.constant_value(dataset._batch_size)
      elif isinstance(dataset, batching._MapAndBatchDataset):
        return dataset._batch_size
      elif isinstance(dataset, dataset_ops.PrefetchDataset):
        return _get_dataset_batch_size(dataset._input_dataset)
      # pylint: enable=protected-access
      raise ValueError(
          "Unable to fetch the batch size from the input dataset. `batch` "
          "`map_and_batch` need to be the last operations on the dataset. "
          "The batch operations can be followed by a prefetch.")

    global_batch_size = _get_dataset_batch_size(dataset)
    if global_batch_size % self.num_replicas_in_sync:
      raise ValueError(
          "Batch size %s cannot be sharded evenly across replicas %s" % (
              global_batch_size, self.num_replicas_in_sync))
    per_replica_batch_size = global_batch_size // self.num_replicas_in_sync
    dataset = dataset.apply(batching.unbatch())
    dataset = dataset.batch(per_replica_batch_size, drop_remainder=True)

    worker_devices = [
        (self.get_host(hid), [self.get_host_cpu_device(hid)])
        for hid in range(self.num_hosts)
    ]
    distributed_dataset = values.MultiWorkerDataset(
        functools.partial(self._call_dataset_fn, lambda: dataset),
        worker_devices)
    # TODO(priyag): Return distribution strategy specific InputIterator
    return distributed_dataset.make_initializable_iterator()
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:52,代码来源:tpu_strategy.py


示例14: testUnbatchWithUnknownRankInput

  def testUnbatchWithUnknownRankInput(self):
    placeholder = array_ops.placeholder(dtypes.int32)
    dataset = dataset_ops.Dataset.from_tensors(placeholder).apply(
        batching.unbatch())
    iterator = dataset.make_initializable_iterator()
    next_elem = iterator.get_next()

    with self.cached_session() as sess:
      sess.run(iterator.initializer, feed_dict={placeholder: [0, 1, 2, 3]})
      for i in range(4):
        self.assertEqual(i, self.evaluate(next_elem))
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_elem)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:13,代码来源:batch_dataset_op_test.py


示例15: testUnbatchMultiElementTupleDataset

  def testUnbatchMultiElementTupleDataset(self):
    data = tuple([(math_ops.range(10 * i, 10 * i + 10),
                   array_ops.fill([10], "hi")) for i in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = ((dtypes.int32, dtypes.string),) * 3
    data = data.batch(2)
    self.assertAllEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertAllEqual(expected_types, data.output_types)

    self.assertDatasetProduces(
        data,
        [((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")) for i in range(10)])
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:13,代码来源:unbatch_test.py


示例16: _split_dataset_batch

def _split_dataset_batch(dataset, split_batch_by):
  """Divide a batch-ed dataset's batches into smaller batches."""
  # TODO(sourabhbajaj): Remove this in lieu of distributed datasets
  # pylint: disable=protected-access
  def _get_batch_dataset(d):
    """Get the underlying batch dataset from the dataset object."""
    if isinstance(d, dataset_ops.DatasetV1Adapter):
      d = d._dataset

    if isinstance(d, (dataset_ops.BatchDataset, batching._MapAndBatchDataset)):
      return d
    elif isinstance(d, dataset_ops.PrefetchDataset):
      return _get_batch_dataset(d._input_dataset)
    raise ValueError(
        "Unable to get batched dataset from the input dataset. `batch` "
        "`map_and_batch` need to be the last operations on the dataset. "
        "The batch operations can be followed by a prefetch.")

  batched_dataset = _get_batch_dataset(dataset)
  if isinstance(batched_dataset, dataset_ops.BatchDataset):
    batch_size = batched_dataset._batch_size
    drop_remainder = batched_dataset._drop_remainder
  elif isinstance(batched_dataset, batching._MapAndBatchDataset):
    batch_size = batched_dataset._batch_size_t
    drop_remainder = batched_dataset._drop_remainder_t

  prefetch_buffer = None
  if isinstance(dataset, dataset_ops.PrefetchDataset):
    prefetch_buffer = dataset._buffer_size
  elif (isinstance(dataset, dataset_ops.DatasetV1Adapter)
        and isinstance(dataset._dataset, dataset_ops.PrefetchDataset)):
    prefetch_buffer = dataset._dataset._buffer_size
  # pylint: enable=protected-access

  if tensor_util.is_tensor(batch_size):
    batch_size = tensor_util.constant_value(batch_size)

  if tensor_util.is_tensor(drop_remainder):
    drop_remainder = tensor_util.constant_value(drop_remainder)

  if batch_size % split_batch_by:
    raise ValueError(
        "Batch size %s cannot be sharded evenly across replicas %s" % (
            batch_size, split_batch_by))
  new_batch_size = batch_size // split_batch_by

  dataset = dataset.apply(batching.unbatch())
  dataset = dataset.batch(new_batch_size, drop_remainder=drop_remainder)
  if prefetch_buffer is not None:
    dataset = dataset.prefetch(prefetch_buffer)
  return dataset
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:51,代码来源:input_lib.py


示例17: _split_dataset_batch

def _split_dataset_batch(dataset, split_batch_by):
  """Divide a batch-ed dataset's batches into smaller batches."""
  batch_size, drop_remainder, prefetch_buffer = (
      _get_dataset_attributes(dataset))

  if batch_size % split_batch_by:
    raise ValueError(
        "Batch size %s cannot be sharded evenly across replicas %s" % (
            batch_size, split_batch_by))
  new_batch_size = batch_size // split_batch_by

  dataset = dataset.apply(batching.unbatch())
  dataset = dataset.batch(new_batch_size, drop_remainder=drop_remainder)
  if prefetch_buffer is not None:
    dataset = dataset.prefetch(prefetch_buffer)
  return dataset
开发者ID:ziky90,项目名称:tensorflow,代码行数:16,代码来源:input_lib.py


示例18: testUnbatchSingleElementTupleDataset

  def testUnbatchSingleElementTupleDataset(self):
    data = tuple([(math_ops.range(10),) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = ((dtypes.int32,),) * 3
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual(((i,),) * 3, self.evaluate(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:18,代码来源:batch_dataset_op_test.py


示例19: testUnbatchDatasetWithStrings

  def testUnbatchDatasetWithStrings(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
    expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual((i, compat.as_bytes(str(i)), i), self.evaluate(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:19,代码来源:batch_dataset_op_test.py


示例20: testUnbatchMultiElementTupleDataset

  def testUnbatchMultiElementTupleDataset(self):
    data = tuple([(math_ops.range(10 * i, 10 * i + 10),
                   array_ops.fill([10], "hi")) for i in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = ((dtypes.int32, dtypes.string),) * 3
    data = data.batch(2)
    self.assertAllEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertAllEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual(((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")),
                         sess.run(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:20,代码来源:batch_dataset_op_test.py



注:本文中的tensorflow.python.data.experimental.ops.batching.unbatch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python interleave_ops.parallel_interleave函数代码示例发布时间:2022-05-27
下一篇:
Python batching.map_and_batch函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap