• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python batching.unbatch函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.data.python.ops.batching.unbatch函数的典型用法代码示例。如果您正苦于以下问题:Python unbatch函数的具体用法?Python unbatch怎么用?Python unbatch使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了unbatch函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: benchmarkNativeUnbatch

  def benchmarkNativeUnbatch(self):
    batch_sizes = [1, 2, 5, 10, 20, 50]
    elems_per_trial = 10000
    with ops.Graph().as_default():
      dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
      batch_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
      dataset = dataset.batch(batch_size_placeholder)
      dataset = dataset.apply(batching.unbatch())
      dataset = dataset.skip(elems_per_trial)
      iterator = dataset.make_initializable_iterator()
      next_element = iterator.get_next()

      with session.Session() as sess:
        for batch_size in batch_sizes:
          deltas = []
          for _ in range(5):
            sess.run(
                iterator.initializer,
                feed_dict={batch_size_placeholder: batch_size})
            start = time.time()
            sess.run(next_element.op)
            end = time.time()
            deltas.append((end - start) / elems_per_trial)

          median_wall_time = np.median(deltas)
          print("Unbatch (native) batch size: %d Median wall time per element:"
                " %f microseconds" % (batch_size, median_wall_time * 1e6))
          self.report_benchmark(
              iters=10000,
              wall_time=median_wall_time,
              name="benchmark_unbatch_dataset_native_batch_size_%d" %
              batch_size)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:32,代码来源:batch_dataset_op_test.py


示例2: testUnbatchDynamicShapeMismatch

  def testUnbatchDynamicShapeMismatch(self):
    ph1 = array_ops.placeholder(dtypes.int32, shape=[None])
    ph2 = array_ops.placeholder(dtypes.int32, shape=None)
    data = dataset_ops.Dataset.from_tensors((ph1, ph2))
    data = data.apply(batching.unbatch())
    iterator = data.make_initializable_iterator()
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      # Mismatch in the 0th dimension.
      sess.run(
          iterator.initializer,
          feed_dict={
              ph1: np.arange(7).astype(np.int32),
              ph2: np.arange(8).astype(np.int32)
          })
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(next_element)

      # No 0th dimension (i.e. scalar value) for one component.
      sess.run(
          iterator.initializer,
          feed_dict={
              ph1: np.arange(7).astype(np.int32),
              ph2: 7
          })
      with self.assertRaises(errors.InvalidArgumentError):
        sess.run(next_element)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:28,代码来源:batch_dataset_op_test.py


示例3: build_dataset

  def build_dataset(self, multiplier=15.0, tensor_slice_len=2, batch_size=2):
    components = (
        np.arange(tensor_slice_len),
        np.array([[1, 2, 3]]) * np.arange(tensor_slice_len)[:, np.newaxis],
        np.array(multiplier) * np.arange(tensor_slice_len))

    return dataset_ops.Dataset.from_tensor_slices(components).batch(
        batch_size).apply(batching.unbatch())
开发者ID:jinxin0924,项目名称:tensorflow,代码行数:8,代码来源:batch_dataset_op_test.py


示例4: _apply_fn

  def _apply_fn(dataset):
    """Function from `Dataset` to `Dataset` that applies the transformation."""
    dist_estimation_batch_size = 32
    target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist")
    class_values_ds = dataset.map(class_func)
    if initial_dist is not None:
      initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist")
      acceptance_dist = _calculate_acceptance_probs(initial_dist_t,
                                                    target_dist_t)
      initial_dist_ds = dataset_ops.Dataset.from_tensors(
          initial_dist_t).repeat()
      acceptance_dist_ds = dataset_ops.Dataset.from_tensors(
          acceptance_dist).repeat()
    else:
      num_classes = (target_dist_t.shape[0].value or
                     array_ops.shape(target_dist_t)[0])
      smoothing_constant = 10
      initial_examples_per_class_seen = array_ops.fill(
          [num_classes], np.int64(smoothing_constant))

      def update_estimate_and_tile(num_examples_per_class_seen, c):
        updated_examples_per_class_seen, dist = _estimate_data_distribution(
            c, num_examples_per_class_seen)
        tiled_dist = array_ops.tile(
            array_ops.expand_dims(dist, 0), [dist_estimation_batch_size, 1])
        return updated_examples_per_class_seen, tiled_dist

      initial_dist_ds = (class_values_ds.batch(dist_estimation_batch_size)
                         .apply(scan_ops.scan(initial_examples_per_class_seen,
                                              update_estimate_and_tile))
                         .apply(batching.unbatch()))
      acceptance_dist_ds = initial_dist_ds.map(
          lambda initial: _calculate_acceptance_probs(initial, target_dist_t))

    def maybe_warn_on_large_rejection(accept_dist, initial_dist):
      proportion_rejected = math_ops.reduce_sum(
          (1 - accept_dist) * initial_dist)
      return control_flow_ops.cond(
          math_ops.less(proportion_rejected, .5),
          lambda: accept_dist,
          lambda: logging_ops.Print(  # pylint: disable=g-long-lambda
              accept_dist, [proportion_rejected, initial_dist, accept_dist],
              message="Proportion of examples rejected by sampler is high: ",
              summarize=100,
              first_n=10))

    acceptance_dist_ds = (dataset_ops.Dataset.zip((acceptance_dist_ds,
                                                   initial_dist_ds))
                          .map(maybe_warn_on_large_rejection))

    def _gather_and_copy(class_val, acceptance_prob, data):
      return (class_val, array_ops.gather(acceptance_prob, class_val), data)
    current_probabilities_and_class_and_data_ds = dataset_ops.Dataset.zip(
        (class_values_ds, acceptance_dist_ds, dataset)).map(_gather_and_copy)
    filtered_ds = (
        current_probabilities_and_class_and_data_ds
        .filter(lambda _1, p, _2: random_ops.random_uniform([], seed=seed) < p))
    return filtered_ds.map(lambda class_value, _, data: (class_value, data))
开发者ID:ebrevdo,项目名称:tensorflow,代码行数:58,代码来源:resampling.py


示例5: testUnbatchEmpty

  def testUnbatchEmpty(self):
    data = dataset_ops.Dataset.from_tensors(
        (constant_op.constant([]), constant_op.constant([], shape=[0, 4]),
         constant_op.constant([], shape=[0, 4, 0])))
    data = data.apply(batching.unbatch())
    iterator = data.make_one_shot_iterator()
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:11,代码来源:batch_dataset_op_test.py


示例6: testUnbatchDatasetWithSparseTensor

  def testUnbatchDatasetWithSparseTensor(self):
    st = sparse_tensor.SparseTensorValue(
        indices=[[i, i] for i in range(10)],
        values=list(range(10)),
        dense_shape=[10, 10])
    data = dataset_ops.Dataset.from_tensors(st)
    data = data.apply(batching.unbatch())
    data = data.batch(5)
    data = data.apply(batching.unbatch())
    iterator = data.make_one_shot_iterator()
    next_element = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        st_row = sess.run(next_element)
        self.assertEqual([i], st_row.indices)
        self.assertEqual([i], st_row.values)
        self.assertEqual([10], st_row.dense_shape)
      with self.assertRaises(errors.OutOfRangeError):
        sess.run(next_element)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:20,代码来源:batch_dataset_op_test.py


示例7: testUnbatchSingleElementTupleDataset

  def testUnbatchSingleElementTupleDataset(self):
    data = tuple([(math_ops.range(10),) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = ((dtypes.int32,),) * 3
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual(((i,),) * 3, sess.run(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:18,代码来源:batch_dataset_op_test.py


示例8: testUnbatchDatasetWithStrings

  def testUnbatchDatasetWithStrings(self):
    data = tuple([math_ops.range(10) for _ in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    data = data.map(lambda x, y, z: (x, string_ops.as_string(y), z))
    expected_types = (dtypes.int32, dtypes.string, dtypes.int32)
    data = data.batch(2)
    self.assertEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual((i, compat.as_bytes(str(i)), i), sess.run(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:19,代码来源:batch_dataset_op_test.py


示例9: testUnbatchMultiElementTupleDataset

  def testUnbatchMultiElementTupleDataset(self):
    data = tuple([(math_ops.range(10 * i, 10 * i + 10),
                   array_ops.fill([10], "hi")) for i in range(3)])
    data = dataset_ops.Dataset.from_tensor_slices(data)
    expected_types = ((dtypes.int32, dtypes.string),) * 3
    data = data.batch(2)
    self.assertAllEqual(expected_types, data.output_types)
    data = data.apply(batching.unbatch())
    self.assertAllEqual(expected_types, data.output_types)

    iterator = data.make_one_shot_iterator()
    op = iterator.get_next()

    with self.cached_session() as sess:
      for i in range(10):
        self.assertEqual(((i, b"hi"), (10 + i, b"hi"), (20 + i, b"hi")),
                         sess.run(op))

      with self.assertRaises(errors.OutOfRangeError):
        sess.run(op)
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:20,代码来源:batch_dataset_op_test.py


示例10: _estimate_initial_dist_ds

def _estimate_initial_dist_ds(
    target_dist_t, class_values_ds, dist_estimation_batch_size=32,
    smoothing_constant=10):
  num_classes = (target_dist_t.shape[0].value or
                 array_ops.shape(target_dist_t)[0])
  initial_examples_per_class_seen = array_ops.fill(
      [num_classes], np.int64(smoothing_constant))

  def update_estimate_and_tile(num_examples_per_class_seen, c):
    updated_examples_per_class_seen, dist = _estimate_data_distribution(
        c, num_examples_per_class_seen)
    tiled_dist = array_ops.tile(
        array_ops.expand_dims(dist, 0), [dist_estimation_batch_size, 1])
    return updated_examples_per_class_seen, tiled_dist

  initial_dist_ds = (class_values_ds.batch(dist_estimation_batch_size)
                     .apply(scan_ops.scan(initial_examples_per_class_seen,
                                          update_estimate_and_tile))
                     .apply(batching.unbatch()))

  return initial_dist_ds
开发者ID:AnishShah,项目名称:tensorflow,代码行数:21,代码来源:resampling.py


示例11: testUnbatchStaticShapeMismatch

 def testUnbatchStaticShapeMismatch(self):
   data = dataset_ops.Dataset.from_tensors((np.arange(7), np.arange(8),
                                            np.arange(9)))
   with self.assertRaises(ValueError):
     data.apply(batching.unbatch())
开发者ID:Jordan1237,项目名称:tensorflow,代码行数:5,代码来源:batch_dataset_op_test.py


示例12: unbatch

  def unbatch(self):
    """Deprecated: Use `Dataset.apply(tf.contrib.data.unbatch()`."""

    return self.apply(batching.unbatch())
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:4,代码来源:dataset_ops.py


示例13: StreamingFilesDataset


#.........这里部分代码省略.........
      generated. By default, it will repeat infinitely.
    filename_shuffle_buffer_size: An optional integer whose value controls the
      shuffling of the file names. If you would like to read from the files in
      the same order, set to 0 or False.
    num_parallel_reads: An optional integer controlling the number of files to
      read from concurrently. (Set to 1 for no parallelism.)
    batch_transfer_size: An optional integer controlling the batching used to
      amortize the remote function invocation overhead. Set to a very large
      number to increase throughput. Set to a very small number to reduce memory
      consumption. Set to False to skip batching.
    sloppy: (Optional.) If `False`, read input data while maintaining a
      deterministic order. (This may have significant performance impacts.)
      sloppy defaults to: True.
  Returns:
    A `tf.data.Dataset` with an infinite stream of elements generated by a
    parallel interleaving of the set of files matched (or generated) by `files`
    with a type is the output of the dataset specified by `filetype`.

  Raises:
    ValueError: if any argument is not of the expected type.
  """
  if filetype is None:
    filetype = 'tfrecord'

  if isinstance(filetype, str):
    if filetype not in _FILETYPE_MAP:
      raise ValueError('Unexpected filetype: %s' % filetype)
    reader_fn = _FILETYPE_MAP[filetype]
  elif callable(filetype):
    reader_fn = filetype
  else:
    raise ValueError('filetype should be a string or a callable')

  file_reader_job = file_reader_job or 'coordinator'

  worker_job = worker_job or 'worker'

  if filename_shuffle_buffer_size is None:
    filename_shuffle_buffer_size = 4096

  num_parallel_reads = num_parallel_reads or 8

  if batch_transfer_size is None:
    batch_transfer_size = 256

  if sloppy is None:
    sloppy = True

  with ops.device('/job:%s' % file_reader_job):
    if isinstance(files, str):
      source_dataset = dataset_ops.Dataset.list_files(files)
    elif isinstance(files, dataset_ops.Dataset):
      source_dataset = files
    else:
      raise ValueError('files was not a string or a dataset: %s' % files)

    if filename_shuffle_buffer_size:
      source_dataset = source_dataset.shuffle(
          buffer_size=filename_shuffle_buffer_size)

    # NOTE: We perform the `repeat` on the source dataset, because the output
    # dataset does not currently have enough information to recreate an iterator
    # over the source dataset when it reaches the end.
    source_dataset = source_dataset.repeat(num_epochs)

    source_dataset = source_dataset.apply(
        interleave_ops.parallel_interleave(
            reader_fn, cycle_length=num_parallel_reads, sloppy=sloppy))

    if batch_transfer_size:
      source_dataset = source_dataset.batch(batch_transfer_size)

    source_dataset = source_dataset.prefetch(1)

    source_iterator = source_dataset.make_one_shot_iterator()
    source_handle = source_iterator.string_handle()

  @function.Defun(dtypes.string)
  def LoadingFunc(h):
    remote_iterator = iterator_ops.Iterator.from_string_handle(
        h, source_dataset.output_types, source_dataset.output_shapes)
    return remote_iterator.get_next()

  def MapFn(unused_input):
    return functional_ops.remote_call(
        args=[source_handle],
        Tout=[dtypes.string],
        f=LoadingFunc,
        target='/job:%s/replica:0/task:0/cpu:0' % file_reader_job)[0]

  with ops.device('/job:%s' % worker_job):
    output_dataset = dataset_ops.Dataset.range(2).repeat().map(
        MapFn, num_parallel_calls=4 if sloppy else None)
    output_dataset = output_dataset.prefetch(1)

    if batch_transfer_size:
      # Undo the batching used during the transfer.
      output_dataset = output_dataset.apply(batching.unbatch()).prefetch(1)

  return output_dataset
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:101,代码来源:datasets.py



注:本文中的tensorflow.contrib.data.python.ops.batching.unbatch函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python grouping.group_by_reducer函数代码示例发布时间:2022-05-27
下一篇:
Python batching.map_and_batch函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap