• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python queue_runner.add_queue_runner函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.training.queue_runner.add_queue_runner函数的典型用法代码示例。如果您正苦于以下问题:Python add_queue_runner函数的具体用法?Python add_queue_runner怎么用?Python add_queue_runner使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了add_queue_runner函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _configure_readers_by

  def _configure_readers_by(self, queue):
    enqueue_ops = []
    for reader in self._readers:
      enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))

    queue_runner.add_queue_runner(
        queue_runner.QueueRunner(self._common_queue, enqueue_ops))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:parallel_reader.py


示例2: _get_stratified_batch_from_tensors

def _get_stratified_batch_from_tensors(val, label, reject_probs, batch_size,
                                       queue_threads=3):
  """Reject examples one-at-a-time based on class."""
  # Make rejection probabilities into a tensor so they can be dynamically
  # accessed by tensors.
  reject_probs = constant_op.constant(
      reject_probs, dtype=dtypes.float32, name='rejection_probabilities')

  # Make queue that will have proper class proportions. Contains exactly one
  # batch at a time.
  val_shape = val.get_shape()
  label_shape = label.get_shape()
  final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
                                    shapes=[val_shape, label_shape],
                                    dtypes=[val.dtype, label.dtype],
                                    name='batched_queue')

  # Conditionally enqueue.
  eq_tf = array_ops.reshape(math_ops.greater(
      random_ops.random_uniform([1]),
      array_ops.slice(reject_probs, [label], [1])),
                            [])
  conditional_enqueue = control_flow_ops.cond(
      eq_tf,
      lambda: final_q.enqueue([val, label]),
      control_flow_ops.no_op)
  queue_runner.add_queue_runner(queue_runner.QueueRunner(
      final_q, [conditional_enqueue] * queue_threads))

  return final_q.dequeue_many(batch_size)
开发者ID:Bala96,项目名称:tensorflow,代码行数:30,代码来源:sampling_ops.py


示例3: _apply_transform

  def _apply_transform(self, transform_input):
    filename_queue = input_ops.string_input_producer(self._work_units,
                                                     shuffle=self.shuffle,
                                                     seed=self._seed)

    if self.shuffle:
      queue = data_flow_ops.RandomShuffleQueue(
          capacity=self.queue_capacity,
          min_after_dequeue=self.min_after_dequeue,
          dtypes=[dtypes.string, dtypes.string],
          shapes=[[], []],
          seed=self.seed)
    else:
      queue = data_flow_ops.FIFOQueue(capacity=self.queue_capacity,
                                      dtypes=[dtypes.string, dtypes.string],
                                      shapes=[[], []])

    enqueue_ops = []
    for _ in range(self.num_threads):
      reader = self._reader_cls(**self._reader_kwargs)
      enqueue_ops.append(queue.enqueue(reader.read(filename_queue)))

    runner = queue_runner.QueueRunner(queue, enqueue_ops)
    queue_runner.add_queue_runner(runner)
    dequeued = queue.dequeue_many(self.batch_size)

    # pylint: disable=not-callable
    return self.return_type(*dequeued)
开发者ID:0ruben,项目名称:tensorflow,代码行数:28,代码来源:reader_source.py


示例4: _get_stratified_batch_from_tensors

def _get_stratified_batch_from_tensors(val_list, label, accept_probs,
                                       batch_size, queue_threads=3):
  """Accepts examples one-at-a-time based on class."""
  # Make queue that will have proper class proportions. Contains exactly one
  # batch at a time.
  vals_shapes = [val.get_shape() for val in val_list]
  vals_dtypes = [val.dtype for val in val_list]
  label_shape = label.get_shape()
  final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
                                    shapes=vals_shapes + [label_shape],
                                    dtypes=vals_dtypes + [label.dtype],
                                    name='batched_queue')

  # Conditionally enqueue.
  tensors_to_enqueue = val_list + [label]
  eq_tf = array_ops.reshape(math_ops.less(
      random_ops.random_uniform([1]),
      array_ops.slice(accept_probs, [label], [1])),
                            [])
  conditional_enqueue = control_flow_ops.cond(
      eq_tf,
      lambda: final_q.enqueue(tensors_to_enqueue),
      control_flow_ops.no_op)
  queue_runner.add_queue_runner(queue_runner.QueueRunner(
      final_q, [conditional_enqueue] * queue_threads))

  return final_q.dequeue_many(batch_size)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:27,代码来源:sampling_ops.py


示例5: _make_per_class_queues

def _make_per_class_queues(data, labels, num_classes, queue_capacity,
                           threads_per_queue):
  """Creates per-class-queues based on data and labels."""
  # Create one queue per class.
  queues = []
  per_data_shape = data.get_shape().with_rank_at_least(1)[1:]
  per_data_shape.assert_is_fully_defined()

  for i in range(num_classes):
    q = data_flow_ops.FIFOQueue(capacity=queue_capacity,
                                shapes=per_data_shape, dtypes=[data.dtype],
                                name='stratified_sample_class%d_queue' % i)
    logging_ops.scalar_summary('queue/stratified_sample_class%d' % i, q.size())
    queues.append(q)

  # Partition tensors according to labels.
  partitions = data_flow_ops.dynamic_partition(data, labels, num_classes)

  # Enqueue each tensor on the per-class-queue.
  for i in range(num_classes):
    enqueue_op = queues[i].enqueue_many(partitions[i]),
    queue_runner.add_queue_runner(queue_runner.QueueRunner(
        queues[i], [enqueue_op] * threads_per_queue))

  return queues
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:25,代码来源:sampling_ops.py


示例6: prefetch_queue

def prefetch_queue(tensors,
                   capacity=8,
                   num_threads=1,
                   dynamic_pad=False,
                   shared_name=None,
                   name=None):
  """Creates a queue to prefetch tensors from `tensors`.

  A queue runner for enqueuing tensors into the prefetch_queue is automatically
  added to the TF QueueRunners collection.

  Example:
  This is for example useful to pre-assemble input batches read with
  `tf.train.batch()` and enqueue the pre-assembled batches.  Ops that dequeue
  from the pre-assembled queue will not pay the cost of assembling the batch.

  images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
  batch_queue = prefetch_queue([images, labels])
  images, labels = batch_queue.dequeue()
  logits = Net(images)
  loss = Loss(logits, labels)

  Args:
    tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
    capacity: An integer. The maximum number of elements in the queue.
    num_threads: An integer.  Number of threads running the enqueue op.
    dynamic_pad: Boolean.  Whether to allow variable dimensions in input shapes.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: (Optional) A name for the operations.

  Returns:
    A queue from which you can dequeue tensors with the same type and shape
    as `tensors`.
  """
  if isinstance(tensors, dict):
    # Need to wrap the keys and values in list() since Python3 returns views.
    # We sort the keys so the order is consistent across runs.
    names = list(sorted(tensors.keys()))
    tensor_list = list([tensors[n] for n in names])
  else:
    names = None
    tensor_list = tensors

  with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
    dtypes = [t.dtype for t in tensor_list]
    shapes = [t.get_shape() for t in tensor_list]
    queue = _which_queue(dynamic_pad)(
        capacity=capacity,
        dtypes=dtypes,
        shapes=shapes,
        names=names,
        shared_name=shared_name)
    enqueue_op = queue.enqueue(tensors)
    queue_runner.add_queue_runner(
        queue_runner.QueueRunner(queue, [enqueue_op] * num_threads))
    summary.scalar(
        "fraction_of_%d_full" % capacity,
        math_ops.cast(queue.size(), _dtypes.float32) * (1. / capacity))
    return queue
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:60,代码来源:prefetch_queue.py


示例7: _make_per_class_queues

def _make_per_class_queues(tensor_list, labels, num_classes, queue_capacity, threads_per_queue):
    """Creates per-class-queues based on data and labels."""
    # Create one queue per class.
    queues = []
    data_shapes = []
    data_dtypes = []
    for data_tensor in tensor_list:
        per_data_shape = data_tensor.get_shape().with_rank_at_least(1)[1:]
        per_data_shape.assert_is_fully_defined()
        data_shapes.append(per_data_shape)
        data_dtypes.append(data_tensor.dtype)

    for i in range(num_classes):
        q = data_flow_ops.FIFOQueue(
            capacity=queue_capacity, shapes=data_shapes, dtypes=data_dtypes, name="stratified_sample_class%d_queue" % i
        )
        logging_ops.scalar_summary("queue/%s/stratified_sample_class%d" % (q.name, i), q.size())
        queues.append(q)

    # Partition tensors according to labels. `partitions` is a list of lists, of
    # size num_classes X len(tensor_list). The number of tensors in partition `i`
    # should be the same for all tensors.
    all_partitions = [data_flow_ops.dynamic_partition(data, labels, num_classes) for data in tensor_list]
    partitions = [[cur_partition[i] for cur_partition in all_partitions] for i in range(num_classes)]

    # Enqueue each tensor on the per-class-queue.
    for i in range(num_classes):
        enqueue_op = (queues[i].enqueue_many(partitions[i]),)
        queue_runner.add_queue_runner(queue_runner.QueueRunner(queues[i], [enqueue_op] * threads_per_queue))

    return queues
开发者ID:pronobis,项目名称:tensorflow,代码行数:31,代码来源:sampling_ops.py


示例8: create_input_queues

def create_input_queues(image, label, capacity=100):
    """Creates Queues a FIFO Queue out of Input tensor objects.
     
     This function is no longer used in the input pipeline.
     However it took me a while to understand queuing and it might be useful
     fot someone at some point.

    Args:
       image: an image tensor object, generated by queues.
       label: an label tensor object, generated by queues.
      
    Returns: Two FiFO Queues
    """
    
    #create input queues

    im_queue = tf.FIFOQueue(capacity, dtypes.uint8)
    enqueue_op = im_queue.enqueue(image)
    
    queue_runner.add_queue_runner(queue_runner.QueueRunner(im_queue,
                                                           [enqueue_op]))

    label_queue = tf.FIFOQueue(capacity, dtypes.uint8)
    enqueue_op = label_queue.enqueue(label)
    
    queue_runner.add_queue_runner(queue_runner.QueueRunner(label_queue,
                                                           [enqueue_op]))
                                                           
    return im_queue, label_queue
开发者ID:MarvinTeichmann,项目名称:tensorflow_examples,代码行数:29,代码来源:data_input.py


示例9: _fn

 def _fn():
   queue = data_flow_ops.FIFOQueue(
       capacity=10, dtypes=dtypes.float32, shapes=[10, 3])
   enqueue_op = queue.enqueue(array_ops.zeros([10, 3], dtype=dtypes.float32))
   queue_runner.add_queue_runner(
       queue_runner.QueueRunner(queue, [enqueue_op]))
   return queue.dequeue(), None
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:kmeans_test.py


示例10: read

    def read(self, queue, name=None):
        """Returns the next record (key, value pair) produced by the reader.

    The multiple reader instances are all configured to `read()` from the
    filenames listed in `queue` and enqueue their output into the `common_queue`
    passed to the constructor, and this method returns the next record dequeued
    from that `common_queue`.


    Readers dequeue a work unit from `queue` if necessary (e.g. when a
    reader needs to start reading from a new file since it has finished with
    the previous file).

    A queue runner for enqueing in the `common_queue` is automatically added to
    the TF QueueRunners collection.

    Args:
      queue: A Queue or a mutable string Tensor representing a handle
        to a Queue, with string work items.
      name: A name for the operation (optional).

    Returns:
      The next record (i.e. (key, value pair)) from the common_queue.
    """

        enqueue_ops = []
        for reader in self._readers:
            enqueue_ops.append(self._common_queue.enqueue(reader.read(queue)))

        queue_runner.add_queue_runner(queue_runner.QueueRunner(self._common_queue, enqueue_ops))

        return self._common_queue.dequeue(name=name)
开发者ID:brchiu,项目名称:tensorflow,代码行数:32,代码来源:parallel_reader.py


示例11: input_producer

def input_producer(input_tensor, element_shape=None, num_epochs=None,
                   shuffle=True, seed=None, capacity=32, shared_name=None,
                   summary_name=None, name=None):
  """Output the rows of `input_tensor` to a queue for an input pipeline.

  Args:
    input_tensor: A tensor with the rows to produce. Must be at least
      one-dimensional. Must either have a fully-defined shape, or
      `element_shape` must be defined.
    element_shape: (Optional.) A `TensorShape` representing the shape of a
      row of `input_tensor`, if it cannot be inferred.
    num_epochs: (Optional.) An integer. If specified `input_producer` produces
      each row of `input_tensor` `num_epochs` times before generating an
      `OutOfRange` error. If not specified, `input_producer` can cycle through
      the rows of `input_tensor` an unlimited number of times.
    shuffle: (Optional.) A boolean. If true, the rows are randomly shuffled
      within each epoch.
    seed: (Optional.) An integer. The seed to use if `shuffle` is true.
    capacity: (Optional.) The capacity of the queue to be used for buffering
      the input.
    shared_name: (Optional.) If set, this queue will be shared under the given
      name across multiple sessions.
    summary_name: (Optional.) If set, a scalar summary for the current queue
      size will be generated, using this name as part of the tag.
    name: (Optional.) A name for queue.

  Returns:
    A queue with the output rows.  A `QueueRunner` for the queue is
    added to the current `QUEUE_RUNNER` collection of the current
    graph.

  Raises:
    ValueError: If the shape of the input cannot be inferred from the arguments.
  """
  with ops.name_scope(name, "input_producer", [input_tensor]):
    input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
    element_shape = input_tensor.get_shape()[1:].merge_with(element_shape)
    if not element_shape.is_fully_defined():
      raise ValueError("Either `input_tensor` must have a fully defined shape "
                       "or `element_shape` must be specified")

    if shuffle:
      input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)

    input_tensor = limit_epochs(input_tensor, num_epochs)

    q = data_flow_ops.FIFOQueue(capacity=capacity,
                                dtypes=[input_tensor.dtype.base_dtype],
                                shapes=[element_shape],
                                shared_name=shared_name, name=name)
    enq = q.enqueue_many([input_tensor])
    queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
    if summary_name is not None:
      logging_ops.scalar_summary("queue/%s/%s" % (q.name, summary_name),
                                 math_ops.cast(q.size(), dtypes.float32) *
                                 (1. / capacity))
    return q
开发者ID:marevol,项目名称:tensorflow,代码行数:57,代码来源:input.py


示例12: _input_producer

def _input_producer(input_tensor, dtype, num_epochs, shuffle, seed, capacity, name, summary_name):
    if shuffle:
        input_tensor = random_ops.random_shuffle(input_tensor, seed=seed)
    input_tensor = limit_epochs(input_tensor, num_epochs)

    q = data_flow_ops.FIFOQueue(capacity=capacity, dtypes=[dtype], shapes=[[]], name=name)
    enq = q.enqueue_many([input_tensor])
    queue_runner.add_queue_runner(queue_runner.QueueRunner(q, [enq]))
    summary_ops.scalar_summary(
        "queue/%s/%s" % (q.name, summary_name), math_ops.cast(q.size(), dtypes.float32) * (1.0 / capacity)
    )
    return q
开发者ID:peace195,项目名称:tensorflow,代码行数:12,代码来源:input.py


示例13: prefetch_queue

def prefetch_queue(tensors,
                   capacity=8,
                   shared_name=None,
                   name=None):
  """Creates a queue to prefetech tensors from `tensors`.

  A queue runner for enqueing tensors into the prefetch_queue is automatically
  added to the TF QueueRunners collection.

  Example:
  This is for example useful to pre-assemble input batches read with
  `tf.train.batch()` and enqueue the pre-assembled batches.  Ops that dequeue
  from the pre-assembled queue will not pay the cost of assembling the batch.

  images, labels = tf.train.batch([image, label], batch_size=32, num_threads=4)
  batch_queue = prefetch_queue([images, labels])
  images, labels = batch_queue.dequeue()
  logits = Net(images)
  loss = Loss(logits, labels)

  Args:
    tensors: A list or dictionary of `Tensors` to enqueue in the buffer.
    capacity: An integer. The maximum number of elements in the queue.
    shared_name: (optional). If set, this queue will be shared under the given
      name across multiple sessions.
    name: (Optional) A name for the operations.

  Returns:
    A queue from which you can dequeue tensors with the same type and shape
    as `tensors`.
  """
  if isinstance(tensors, dict):
    # Need to wrap the keys and values in list() since Python3 returns views.
    names = list(tensors.keys())
    tensor_list = list(tensors.values())
  else:
    names = None
    tensor_list = tensors

  with ops.name_scope(name, "prefetch_queue", tensor_list) as name:
    dtypes = [t.dtype for t in tensor_list]
    shapes = [t.get_shape() for t in tensor_list]
    queue = data_flow_ops.FIFOQueue(capacity=capacity,
                                    dtypes=dtypes,
                                    shapes=shapes,
                                    names=names,
                                    shared_name=shared_name)
    enqueue_op = queue.enqueue(tensors, name=name)
    queue_runner.add_queue_runner(queue_runner.QueueRunner(queue, [enqueue_op]))
    logging_ops.scalar_summary(
        "queue/%s/fraction_of_%d_full" % (queue.name, capacity),
        math_ops.to_float(queue.size()) * (1. / capacity))
    return queue
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:53,代码来源:prefetch_queue.py


示例14: _queue_parsed_features

def _queue_parsed_features(feature_map):
  tensors_to_enqueue = []
  keys = []
  for key, tensor in six.iteritems(feature_map):
    keys.append(key)
    tensors_to_enqueue.append(tensor)
  queue_dtypes = [x.dtype for x in tensors_to_enqueue]
  input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
  queue_runner.add_queue_runner(
      queue_runner.QueueRunner(input_queue,
                               [input_queue.enqueue(tensors_to_enqueue)]))
  dequeued_tensors = input_queue.dequeue()
  return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
开发者ID:AnishShah,项目名称:tensorflow,代码行数:13,代码来源:dnn_with_layer_annotations_test.py


示例15: _conditional_batch

def _conditional_batch(tensors, accept_prob, batch_size, queue_threads=10):
  """Conditionally enqueue tensors based on accept_prob.

  Specifically, enqueue the element if accept_prob > rand_unif([0, 1]).

  Args:
      tensors: List of tensors to enqueue.
      accept_prob: Acceptance probability per example.
      batch_size: Size of batch.
      queue_threads: Number of threads enqueuing in the final queue.

  Returns:
      List of batched tensors.

  Raises:
      ValueError: `accept_prob` isn't 0D.
  """
  accept_prob.get_shape().assert_has_rank(0)
  # Determine shapes and types of to-be-enqueued-tensors.
  shapes_list = []
  dtypes_list = []
  for tensor in tensors:
    cur_shape = tensor.get_shape()
    cur_shape.assert_is_fully_defined()
    shapes_list.append(cur_shape)
    dtypes_list.append(tensor.dtype)

  final_q = data_flow_ops.FIFOQueue(capacity=batch_size,
                                    shapes=shapes_list,
                                    dtypes=dtypes_list,
                                    name='batched_queue')
  logging_ops.scalar_summary('queue/%s/size' % final_q.name, final_q.size())

  # Conditionally enqueue.
  # Reshape enqueue op to match no_op's shape.
  eq_tf = math_ops.less(random_ops.random_uniform([]), accept_prob)
  conditional_enqueue = control_flow_ops.cond(
      eq_tf,
      lambda: final_q.enqueue(tensors),
      control_flow_ops.no_op)
  queue_runner.add_queue_runner(queue_runner.QueueRunner(
      final_q, [conditional_enqueue] * queue_threads))

  out_tensor = final_q.dequeue_many(batch_size)
  # Queues return a single tensor if the list of enqued tensors is one. Since we
  # want the type to be the same in all cases, always return a list.
  if isinstance(out_tensor, ops.Tensor):
    out_tensor = [out_tensor]

  return out_tensor
开发者ID:govindap,项目名称:tensorflow,代码行数:50,代码来源:sampling_ops.py


示例16: _add_remote_queue_runner

  def _add_remote_queue_runner(self, queue, enq_ops):
    """Adds a remote queue runner to the graph.

    These queue runners differ from the standard in two ways: First,
    they never close their queue. Second, they are added to the
    `Feeder.REMOTE_QUEUE_RUNNERS` collection, rather than
    `ops.GraphKeys.QUEUE_RUNNERS`, so they can be started/stopped
    separately.

    Args:
      queue: The queue.
      enq_ops: A list of ops which perform enqueues (each on its own thread).
    """

    runner = queue_runner.QueueRunner(
        queue,
        enq_ops,
        cancel_op=self._fake_op,
        close_op=self._fake_op)
    queue_runner.add_queue_runner(
        runner, collection=Feeder.REMOTE_QUEUE_RUNNERS)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:21,代码来源:feeder.py


示例17: enqueue_data

def enqueue_data(data,
                 capacity,
                 shuffle=False,
                 min_after_dequeue=None,
                 seed=None):
  """Creates a queue filled from a numpy array or pandas `DataFrame`.

    Returns a queue filled with the rows of the given array or `DataFrame`. In
    the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to
    the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor`
    contains the row number.

  Args:
    data: a numpy `ndarray or` pandas `DataFrame` that will be read into the
      queue.
    capacity: the capacity of the queue.
    shuffle: whether or not to shuffle the rows of the array.
    min_after_dequeue: minimum number of elements that can remain in the queue
    after a dequeue operation. Only used when `shuffle` is true. If not set,
    defaults to `capacity` / 4.
    seed: used to seed RandomShuffleQueue. Only used when `shuffle` is True.

  Returns:
    A queue filled with the rows of the given array or `DataFrame`.

  Raises:
    TypeError: `data` is not a Pandas `DataFrame` or a numpy `ndarray`.
  """
  # TODO(jamieas): create multithreaded version of enqueue_data.
  if isinstance(data, np.ndarray):
    types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
    shapes = [(), data.shape[1:]]
    get_feed_fn = _ArrayFeedFn
  elif HAS_PANDAS and isinstance(data, pd.DataFrame):
    types = [dtypes.as_dtype(dt)
             for dt in [data.index.dtype] + list(data.dtypes)]
    shapes = [() for _ in types]
    get_feed_fn = _PandasFeedFn
  else:
    raise TypeError(
        "data must be either a numpy array or pandas DataFrame if pandas is "
        "installed; got {}".format(
            type(data).__name__))

  placeholders = [array_ops.placeholder(*type_and_shape)
                  for type_and_shape in zip(types, shapes)]
  if shuffle:
    min_after_dequeue = (capacity / 4 if min_after_dequeue is None else
                         min_after_dequeue)
    queue = data_flow_ops.RandomShuffleQueue(capacity,
                                             min_after_dequeue,
                                             dtypes=types,
                                             shapes=shapes,
                                             seed=seed)
  else:
    queue = data_flow_ops.FIFOQueue(capacity, dtypes=types, shapes=shapes)
  enqueue_op = queue.enqueue(placeholders)
  feed_fn = get_feed_fn(placeholders, data)
  runner = fqr.FeedingQueueRunner(queue=queue,
                                  enqueue_ops=[enqueue_op],
                                  feed_fn=feed_fn)
  queue_runner.add_queue_runner(runner)
  return queue
开发者ID:Baaaaam,项目名称:tensorflow,代码行数:63,代码来源:feeding_functions.py


示例18: queue_parsed_features

def queue_parsed_features(parsed_features,
                          keys=None,
                          feature_queue_capacity=100,
                          num_queue_runners=None,
                          num_enqueue_threads=None,
                          name=None):
  """Speeds up parsing by using queues to do it asynchronously.

  This function adds the tensors in `parsed_features` to a queue, which allows
  the parsing (or any other expensive op before this) to be asynchronous wrt the
  rest of the training graph. This greatly improves read latency and speeds up
  training since the data will already be parsed and ready when each step of
  training needs it.

  All queue runners are added to the queue runners collection, and may be
  started via `start_queue_runners`.

  All ops are added to the default graph.

  Args:
    parsed_features: A dict of string key to `Tensor` or `SparseTensor` objects.
    keys: `Tensor` of string keys.
    feature_queue_capacity: Capacity of the parsed features queue.
    num_queue_runners: Deprecated. Defaults to 2 if this and
      `num_enqueue_threads` are both `None`. This is the number of queue
      runners to start for the feature queue. Adding multiple queue runners for
      the parsed example queue helps maintain a full queue when the subsequent
      computations overall are cheaper than parsing. This argument will be
      deprecated and replaced with `num_enqueue_threads`.
    num_enqueue_threads: Number of threads to enqueue the parsed example queue.
      Using multiple threads to enqueue the parsed example queue helps maintain
      a full queue when the subsequent computations overall are cheaper than
      parsing. This argument will replace `num_queue_runners`. This and
      `num_queue_runners` can not both be set.
    name: Name of resulting op.

  Returns:
    Returns tuple of:
    - `Tensor` corresponding to `keys` if provided, otherwise `None`.
    -  A dict of string key to `Tensor` or `SparseTensor` objects corresponding
       to `parsed_features`.
  Raises:
    ValueError: for invalid inputs.
  """
  num_queue_runners, num_enqueue_threads = _check_enqueue_params(
      num_queue_runners, num_enqueue_threads)

  args = list(parsed_features.values())
  if keys is not None:
    args += [keys]

  with ops.name_scope(name, 'queue_parsed_features', args):
    # Lets also add preprocessed tensors into the queue types for each item of
    # the queue.
    tensors_to_enqueue = []
    # Each entry contains the key, and a boolean which indicates whether the
    # tensor was a sparse tensor.
    tensors_mapping = []
    # TODO(sibyl-Aix6ihai): Most of the functionality here is about pushing sparse
    # tensors into a queue. This could be taken care in somewhere else so others
    # can reuse it. Also, QueueBase maybe extended to handle sparse tensors
    # directly.
    for key in sorted(parsed_features.keys()):
      tensor = parsed_features[key]
      if isinstance(tensor, sparse_tensor.SparseTensor):
        tensors_mapping.append((key, True))
        tensors_to_enqueue.extend([tensor.indices, tensor.values, tensor.shape])
      else:
        tensors_mapping.append((key, False))
        tensors_to_enqueue.append(tensor)

    if keys is not None:
      tensors_to_enqueue.append(keys)

    queue_dtypes = [x.dtype for x in tensors_to_enqueue]
    input_queue = data_flow_ops.FIFOQueue(feature_queue_capacity, queue_dtypes)

    # Add a summary op to debug if our feature queue is full or not.
    summary.scalar('queue/parsed_features/%s/fraction_of_%d_full' %
                   (input_queue.name, feature_queue_capacity),
                   math_ops.cast(input_queue.size(), dtypes.float32) *
                   (1. / feature_queue_capacity))

    # Add multiple queue runners so that the queue is always full. Adding more
    # than two queue-runners may hog the cpu on the worker to fill up the queue.
    #
    # Note: this can result in large last batch being lost as the multiple queue
    # runner threads do not coordinate with each other. Please use
    # `num_enqueue_threads` instead.
    if num_queue_runners is not None:
      for _ in range(num_queue_runners):
        queue_runner.add_queue_runner(
            queue_runner.QueueRunner(
                input_queue, [input_queue.enqueue(tensors_to_enqueue)],
                queue_closed_exception_types=(errors.OutOfRangeError,
                                              errors.CancelledError)))
    # Use a single QueueRunner with multiple threads to enqueue so the queue is
    # always full. The threads are coordinated so the last batch will not be
    # lost.
    elif num_enqueue_threads is not None:
#.........这里部分代码省略.........
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:101,代码来源:graph_io.py


示例19: _read_keyed_batch_examples_helper

def _read_keyed_batch_examples_helper(file_pattern,
                                      batch_size,
                                      reader,
                                      randomize_input=True,
                                      num_epochs=None,
                                      queue_capacity=10000,
                                      num_threads=1,
                                      read_batch_size=1,
                                      parse_fn=None,
                                      setup_shared_queue=False,
                                      name=None):
  """Adds operations to read, queue, batch `Example` protos.

  Args:
    file_pattern: List of files or pattern of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    reader: A function or class that returns an object with
      `read` method, (filename tensor) -> (example tensor).
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If `None`, cycles through the dataset forever.
      NOTE - If specified, creates a variable that must be initialized, so call
      `tf.initialize_all_variables()` as shown in the tests.
    queue_capacity: Capacity for input queue.
    num_threads: The number of threads enqueuing examples.
    read_batch_size: An int or scalar `Tensor` specifying the number of
      records to read at once
    parse_fn: Parsing function, takes `Example` Tensor returns parsed
      representation. If `None`, no parsing is done.
    setup_shared_queue: Whether to set up a shared queue for file names.
    name: Name of resulting op.

  Returns:
    Returns tuple of:
    - `Tensor` of string keys.
    - String `Tensor` of batched `Example` proto.

  Raises:
    ValueError: for invalid inputs.
  """
  # Retrieve files to read.
  file_names = _get_file_names(file_pattern, randomize_input)

  # Check input parameters are given and reasonable.
  if (not queue_capacity) or (queue_capacity <= 0):
    raise ValueError('Invalid queue_capacity %s.' % queue_capacity)
  if (batch_size is None) or (
      (not isinstance(batch_size, ops.Tensor)) and
      (batch_size <= 0 or batch_size > queue_capacity)):
    raise ValueError(
        'Invalid batch_size %s, with queue_capacity %s.' %
        (batch_size, queue_capacity))
  if (read_batch_size is None) or (
      (not isinstance(read_batch_size, ops.Tensor)) and
      (read_batch_size <= 0)):
    raise ValueError('Invalid read_batch_size %s.' % read_batch_size)
  if (not num_threads) or (num_threads <= 0):
    raise ValueError('Invalid num_threads %s.' % num_threads)
  if (num_epochs is not None) and (num_epochs <= 0):
    raise ValueError('Invalid num_epochs %s.' % num_epochs)

  with ops.name_scope(name, 'read_batch_examples', [file_pattern]) as scope:
    with ops.name_scope('file_name_queue') as file_name_queue_scope:
      if setup_shared_queue:
        shared_file_name_queue = _get_shared_file_name_queue(
            file_names, randomize_input, num_epochs, file_name_queue_scope)
        file_name_queue = data_flow_ops.FIFOQueue(
            capacity=1, dtypes=[dtypes.string], shapes=[[]])
        enqueue_op = file_name_queue.enqueue(shared_file_name_queue.dequeue())
        queue_runner.add_queue_runner(
            queue_runner.QueueRunner(file_name_queue, [enqueue_op]))
      else:
        file_name_queue = input_ops.string_input_producer(
            constant_op.constant(
                file_names, name='input'),
            shuffle=randomize_input,
            num_epochs=num_epochs,
            name=file_name_queue_scope)

    example_list = _get_examples(file_name_queue, reader, num_threads,
                                 read_batch_size, parse_fn)

    enqueue_many = read_batch_size > 1

    if num_epochs is None:
      allow_smaller_final_batch = False
    else:
      allow_smaller_final_batch = True

    # Setup batching queue given list of read example tensors.
    if randomize_input:
      if isinstance(batch_size, ops.Tensor):
        min_after_dequeue = int(queue_capacity * 0.4)
      else:
        min_after_dequeue = max(queue_capacity - (3 * batch_size), batch_size)
      queued_examples_with_keys = input_ops.shuffle_batch_join(
          example_list, batch_size, capacity=queue_capacity,
          min_after_dequeue=min_after_dequeue,
          enqueue_many=enqueue_many, name=scope,
#.........这里部分代码省略.........
开发者ID:DavidNemeskey,项目名称:tensorflow,代码行数:101,代码来源:graph_io.py


示例20: set_many_fed_tensors

 def set_many_fed_tensors(self, tensors):
   """Sets batches fed tensors."""
   enq_op = self._local_q.enqueue_many(tensors)
   queue_runner.add_queue_runner(queue_runner.QueueRunner(
       self._local_q, [enq_op]))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:5,代码来源:feeder.py



注:本文中的tensorflow.python.training.queue_runner.add_queue_runner函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap