• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python logging.info函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.platform.logging.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了info函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: main

def main(unused_argv=None):
  if FLAGS.debug:
    logging.set_verbosity(logging.DEBUG)

  if not FLAGS.logdir:
    logging.error('A logdir must be specified. Run `tensorboard --help` for '
                  'details and examples.')
    return -1

  if FLAGS.debug:
    logging.info('Starting TensorBoard in directory %s', os.getcwd())

  path_to_run = ParseEventFilesFlag(FLAGS.logdir)
  multiplexer = event_multiplexer.AutoloadingMultiplexer(
      path_to_run=path_to_run, interval_secs=60,
      size_guidance=TENSORBOARD_SIZE_GUIDANCE)

  multiplexer.AutoUpdate(interval=30)

  factory = functools.partial(tensorboard_handler.TensorboardHandler,
                              multiplexer)
  try:
    server = ThreadedHTTPServer((FLAGS.host, FLAGS.port), factory)
  except socket.error:
    logging.error('Tried to connect to port %d, but that address is in use.',
                  FLAGS.port)
    return -2

  status_bar.SetupStatusBarInsideGoogle('TensorBoard', FLAGS.port)
  print('Starting TensorBoard on port %d' % FLAGS.port)
  print('(You can navigate to http://localhost:%d)' % FLAGS.port)
  server.serve_forever()
开发者ID:nikhilk,项目名称:tensorflow,代码行数:32,代码来源:tensorboard.py


示例2: l1_regularizer

def l1_regularizer(scale):
    """Returns a function that can be used to apply L1 regularization to weights.

  L1 regularization encourages sparsity.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.

  Returns:
    A function with signature `l1(weights, name=None)` that apply L1
    regularization.

  Raises:
    ValueError: If scale is outside of the range [0.0, 1.0] or if scale is not a
    float.
  """
    if isinstance(scale, numbers.Integral):
        raise ValueError("scale cannot be an integer: %s" % scale)
    if isinstance(scale, numbers.Real):
        if scale < 0.0:
            raise ValueError("Setting a scale less than 0 on a regularizer: %g" % scale)
        if scale >= 1.0:
            raise ValueError("Setting a scale greater than 1 on a regularizer: %g" % scale)
        if scale == 0.0:
            logging.info("Scale of 0 disables regularizer.")
            return lambda _, name=None: None

    def l1(weights, name=None):
        """Applies L1 regularization to weights."""
        with ops.op_scope([weights], name, "l1_regularizer") as scope:
            my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name="scale")
            return standard_ops.mul(my_scale, standard_ops.reduce_sum(standard_ops.abs(weights)), name=scope)

    return l1
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:34,代码来源:learn.py


示例3: Load

 def Load():
   for (path, name) in six.iteritems(path_to_run):
     logging.info('Checking for new runs in %s', path)
     multiplexer.AddRunsFromDirectory(path, name)
   t = threading.Timer(interval_secs, Load)
   t.daemon = True
   t.start()
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:7,代码来源:event_multiplexer.py


示例4: ListRecursively

def ListRecursively(top):
  """Walks a directory tree, yielding (dir_path, file_paths) tuples.

  For each top |top| and its subdirectories, yields a tuple containing the path
  to the directory and the path to each of the contained files.  Note that
  unlike os.Walk()/gfile.Walk(), this does not list subdirectories and the file
  paths are all absolute.

  Args:
    top: A path to a GCS directory.
  Returns:
    A list of (dir_path, file_paths) tuples.

  """
  if top.endswith('/'):
    wildcard = top + '**'
  else:
    wildcard = top + '/**'
  tuples = []
  try:
    file_paths = ListDirectory(wildcard)
  except subprocess.CalledProcessError as e:
    logging.info('%s, assuming it means no files were found', e)
    return []
  for file_path in file_paths:
    dir_path = os.path.dirname(file_path)
    if tuples and tuples[-1][0] == dir_path:
      tuples[-1][1].append(file_path)
    else:
      tuples.append((dir_path, [file_path]))
  return tuples
开发者ID:13683116633,项目名称:tensorflow,代码行数:31,代码来源:gcs.py


示例5: wait_for_session

  def wait_for_session(self, master, config=None):
    """Creates a new `Session` and waits for model to be ready.

    Creates a new `Session` on 'master'.  Waits for the model to be
    initialized or recovered from a checkpoint.  It's expected that
    another thread or process will make the model ready, and that this
    is intended to be used by threads/processes that participate in a
    distributed training configuration where a different thread/process
    is responsible for initializing or recovering the model being trained.

    Args:
      master: `String` representation of the TensorFlow master to use.
      config: Optional ConfigProto proto used to configure the session.

    Returns:
      sess: A `Session`.
    """
    target = self._maybe_launch_in_process_server(master)
    sess = session.Session(target, graph=self._graph, config=config)
    if self._local_init_op:
      sess.run([self._local_init_op])
    while True:
      not_ready = self._model_not_ready(sess)
      if not not_ready:
        break
      self._safe_close(sess)
      logging.info("Waiting for model to be ready: %s", not_ready)
      time.sleep(self._recovery_wait_secs)
      sess = session.Session(master, graph=self._graph)

    return sess
开发者ID:UIUC-SULLIVAN,项目名称:tensorflow,代码行数:31,代码来源:session_manager.py


示例6: _serve_static_file

  def _serve_static_file(self, path):
    """Serves the static file located at the given path.

    Args:
      path: The path of the static file, relative to the tensorboard/ directory.
    """
    # Strip off the leading forward slash.
    path = path.lstrip('/')
    if not self._path_is_safe(path):
      logging.info('path %s not safe, sending 404', path)
      # Traversal attack, so 404.
      self.send_error(404)
      return

    if path.startswith('external'):
      path = os.path.join('../', path)
    else:
      path = os.path.join('tensorboard', path)
    # Open the file and read it.
    try:
      contents = resource_loader.load_resource(path)
    except IOError:
      logging.info('path %s not found, sending 404', path)
      self.send_error(404)
      return

    self.send_response(200)

    mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
    self.send_header('Content-Type', mimetype)
    self.end_headers()
    self.wfile.write(contents)
开发者ID:adam-erickson,项目名称:tensorflow,代码行数:32,代码来源:tensorboard_handler.py


示例7: l2_regularizer

def l2_regularizer(scale):
    """Returns a function that can be used to apply L2 regularization to weights.

  Small values of L2 can help prevent overfitting the training data.

  Args:
    scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.

  Returns:
    A function with signature `l2(weights, name=None)` that applies L2
    regularization.

  Raises:
    ValueError: If scale is outside of the range [0.0, 1.0] or if scale is not a
    float.
  """
    if isinstance(scale, numbers.Integral):
        raise ValueError("scale cannot be an integer: %s" % (scale,))
    if isinstance(scale, numbers.Real):
        if scale < 0.0:
            raise ValueError("Setting a scale less than 0 on a regularizer: %g." % scale)
        if scale >= 1.0:
            raise ValueError("Setting a scale greater than 1 on a regularizer: %g." % scale)
        if scale == 0.0:
            logging.info("Scale of 0 disables regularizer.")
            return lambda _, name=None: None

    def l2(weights, name=None):
        """Applies l2 regularization to weights."""
        with ops.op_scope([weights], name, "l2_regularizer") as scope:
            my_scale = ops.convert_to_tensor(scale, dtype=weights.dtype.base_dtype, name="scale")
            return standard_ops.mul(my_scale, nn.l2_loss(weights), name=scope)

    return l2
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:34,代码来源:learn.py


示例8: predicate

 def predicate(e):
     err_str = e.message
     op = e.op
     while op is not None:
         err_str += "\nCaused by: " + op.name
         op = op._original_op
     logging.info("Searching within error strings: '%s' within '%s'", expected_err_re_or_predicate, err_str)
     return re.search(expected_err_re_or_predicate, err_str)
开发者ID:chingfongsu,项目名称:tensorflow,代码行数:8,代码来源:test_util.py


示例9: testCoNLLFormat

 def testCoNLLFormat(self):
   self.WriteContext('conll-sentence')
   logging.info('Writing conll file to: %s', self.corpus_file)
   with open(self.corpus_file, 'w') as f:
     f.write((CONLL_DOC1 + u'\n\n' + CONLL_DOC2 + u'\n')
             .replace(' ', '\t').encode('utf-8'))
   self.ValidateDocuments()
   self.BuildLexicon()
   self.ValidateTagToCategoryMap()
开发者ID:1206lyp,项目名称:models,代码行数:9,代码来源:lexicon_builder_test.py


示例10: testParseUntilNotAlive

 def testParseUntilNotAlive(self):
   """Ensures that the 'alive' condition works in the Cond ops."""
   with self.test_session(graph=tf.Graph()) as sess:
     t = self.MakeGraph(batch_size=3, beam_size=2, max_steps=5).training
     sess.run(t['inits'])
     for i in range(5):
       logging.info('run %d', i)
       tf_alive = t['alive'].eval()
       self.assertFalse(any(tf_alive))
开发者ID:1206lyp,项目名称:models,代码行数:9,代码来源:beam_reader_ops_test.py


示例11: WriteContext

 def WriteContext(self, corpus_format):
   context = task_spec_pb2.TaskSpec()
   self.AddInput('documents', self.corpus_file, corpus_format, context)
   for name in ('word-map', 'lcword-map', 'tag-map',
                'category-map', 'label-map', 'prefix-table',
                'suffix-table', 'tag-to-category'):
     self.AddInput(name, os.path.join(FLAGS.test_tmpdir, name), '', context)
   logging.info('Writing context to: %s', self.context_file)
   with open(self.context_file, 'w') as f:
     f.write(str(context))
开发者ID:1206lyp,项目名称:models,代码行数:10,代码来源:text_formats_test.py


示例12: _Load

 def _Load():
   start = time.time()
   for (path, name) in six.iteritems(path_to_run):
     multiplexer.AddRunsFromDirectory(path, name)
   multiplexer.Reload()
   duration = time.time() - start
   logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
   t = threading.Timer(LOAD_INTERVAL, _Load)
   t.daemon = True
   t.start()
开发者ID:hlt-mt,项目名称:tensorflow,代码行数:10,代码来源:tensorboard.py


示例13: convert_variables_to_constants

def convert_variables_to_constants(sess, input_graph_def, output_node_names):
  """Replaces all the variables in a graph with constants of the same values.

  If you have a trained graph containing Variable ops, it can be convenient to
  convert them all to Const ops holding the same values. This makes it possible
  to describe the network fully with a single GraphDef file, and allows the
  removal of a lot of ops related to loading and saving the variables.

  Args:
    sess: Active TensorFlow session containing the variables.
    input_graph_def: GraphDef object holding the network.
    output_node_names: List of name strings for the result nodes of the graph.

  Returns:
    GraphDef containing a simplified version of the original.
  """
  found_variables = {}
  variable_names = []
  variable_dict_names = []
  for node in input_graph_def.node:
    if node.op == "Assign":
      variable_name = node.input[0]
      variable_dict_names.append(variable_name)
      variable_names.append(variable_name + ":0")
  if variable_names:
    returned_variables = sess.run(variable_names)
  else:
    returned_variables = []
  found_variables = dict(zip(variable_dict_names, returned_variables))
  logging.info("Frozen %d variables." % len(returned_variables))

  # This graph only includes the nodes needed to evaluate the output nodes, and
  # removes unneeded nodes like those involved in saving and assignment.
  inference_graph = extract_sub_graph(input_graph_def, output_node_names)

  output_graph_def = graph_pb2.GraphDef()
  how_many_converted = 0
  for input_node in inference_graph.node:
    output_node = graph_pb2.NodeDef()
    if input_node.name in found_variables:
      output_node.op = "Const"
      output_node.name = input_node.name
      dtype = input_node.attr["dtype"]
      data = found_variables[input_node.name]
      output_node.attr["dtype"].CopyFrom(dtype)
      output_node.attr["value"].CopyFrom(attr_value_pb2.AttrValue(
          tensor=tensor_util.make_tensor_proto(data,
                                               dtype=dtype.type,
                                               shape=data.shape)))
      how_many_converted += 1
    else:
      output_node.CopyFrom(input_node)
    output_graph_def.node.extend([output_node])
  print("Converted %d variables to const ops." % how_many_converted)
  return output_graph_def
开发者ID:2php,项目名称:tensorflow,代码行数:55,代码来源:graph_util.py


示例14: CheckTokenization

 def CheckTokenization(self, sentence, tokenization):
   self.WriteContext('english-text')
   logging.info('Writing text file to: %s', self.corpus_file)
   with open(self.corpus_file, 'w') as f:
     f.write(sentence)
   sentence, _ = gen_parser_ops.document_source(
       self.context_file, batch_size=1)
   with self.test_session() as sess:
     sentence_doc = self.ReadNextDocument(sess, sentence)
     self.assertEqual(' '.join([t.word for t in sentence_doc.token]),
                      tokenization)
开发者ID:1206lyp,项目名称:models,代码行数:11,代码来源:text_formats_test.py


示例15: AddRunsFromDirectory

  def AddRunsFromDirectory(self, path, name=None):
    """Load runs from a directory; recursively walks subdirectories.

    If path doesn't exist, no-op. This ensures that it is safe to call
      `AddRunsFromDirectory` multiple times, even before the directory is made.

    If path is a directory, load event files in the directory (if any exist) and
      recursively call AddRunsFromDirectory on any subdirectories. This mean you
      can call AddRunsFromDirectory at the root of a tree of event logs and
      TensorBoard will load them all.

    If the `EventMultiplexer` is already loaded this will cause
    the newly created accumulators to `Reload()`.
    Args:
      path: A string path to a directory to load runs from.
      name: Optionally, what name to apply to the runs. If name is provided
        and the directory contains run subdirectories, the name of each subrun
        is the concatenation of the parent name and the subdirectory name. If
        name is provided and the directory contains event files, then a run
        is added called "name" and with the events from the path.

    Raises:
      ValueError: If the path exists and isn't a directory.

    Returns:
      The `EventMultiplexer`.
    """
    subdirs = []
    if gcs.IsGCSPath(path):
      subdirs = [
          subdir
          for (subdir, files) in gcs.ListRecursively(path)
          if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
      ]
    else:
      if not gfile.Exists(path):
        return  # Maybe it hasn't been created yet, fail silently to retry later
      if not gfile.IsDirectory(path):
        raise ValueError('AddRunsFromDirectory: path exists and is not a '
                         'directory, %s' % path)
      subdirs = [
          subdir
          for (subdir, _, files) in gfile.Walk(path)
          if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
      ]

    for subdir in subdirs:
      logging.info('Adding events from directory %s', subdir)
      rpath = os.path.relpath(subdir, path)
      subname = os.path.join(name, rpath) if name else rpath
      self.AddRun(subdir, name=subname)

    return self
开发者ID:4chin,项目名称:tensorflow,代码行数:53,代码来源:event_multiplexer.py


示例16: Load

  def Load(self):
    """Loads new values from disk.

    The watcher will load from one file at a time; as soon as that file stops
    yielding events, it will move on to the next file. We assume that old files
    are never modified after a newer file has been written. As a result, Load()
    can be called multiple times in a row without losing events that have not
    been yielded yet. In other words, we guarantee that every event will be
    yielded exactly once.

    Yields:
      All values that were written to disk that have not been yielded yet.
    """

    # If the loader exists, check it for a value.
    if not self._loader:
      self._InitializeLoader()

    while True:
      # Yield all the new events in the file we're currently loading from.
      for event in self._loader.Load():
        yield event

      next_path = self._GetNextPath()
      if not next_path:
        logging.info('No more files in %s', self._directory)
        # Current file is empty and there are no new files, so we're done.
        return

      # There's a new file, so check to make sure there weren't any events
      # written between when we finished reading the current file and when we
      # checked for the new one. The sequence of events might look something
      # like this:
      #
      # 1. Event #1 written to file #1.
      # 2. We check for events and yield event #1 from file #1
      # 3. We check for events and see that there are no more events in file #1.
      # 4. Event #2 is written to file #1.
      # 5. Event #3 is written to file #2.
      # 6. We check for a new file and see that file #2 exists.
      #
      # Without this loop, we would miss event #2. We're also guaranteed by the
      # loader contract that no more events will be written to file #1 after
      # events start being written to file #2, so we don't have to worry about
      # that.
      for event in self._loader.Load():
        yield event

      logging.info('Directory watcher for %s advancing to file %s',
                   self._directory, next_path)

      # Advance to the next file and start over.
      self._SetPath(next_path)
开发者ID:ray2020,项目名称:tensorflow,代码行数:53,代码来源:directory_watcher.py


示例17: wait_for_session

  def wait_for_session(self, master, config=None, max_wait_secs=float("Inf")):
    """Creates a new `Session` and waits for model to be ready.

    Creates a new `Session` on 'master'.  Waits for the model to be
    initialized or recovered from a checkpoint.  It's expected that
    another thread or process will make the model ready, and that this
    is intended to be used by threads/processes that participate in a
    distributed training configuration where a different thread/process
    is responsible for initializing or recovering the model being trained.

    NB: The amount of time this method waits for the session is bounded
    by max_wait_secs. By default, this function will wait indefinitely.

    Args:
      master: `String` representation of the TensorFlow master to use.
      config: Optional ConfigProto proto used to configure the session.
      max_wait_secs: Maximum time to wait for the session to become available.

    Returns:
      A `Session`. May be None if the operation exceeds the timeout
      specified by config.operation_timeout_in_ms.

    Raises:
      tf.DeadlineExceededError: if the session is not available after
        max_wait_secs.
    """
    target = self._maybe_launch_in_process_server(master)

    if max_wait_secs is None:
      max_wait_secs = float("Inf")
    timer = _CountDownTimer(max_wait_secs)

    while True:
      sess = session.Session(target, graph=self._graph, config=config)
      if self._local_init_op:
        sess.run([self._local_init_op])
      not_ready = self._model_not_ready(sess)
      if not not_ready:
        return sess

      self._safe_close(sess)

      # Do we have enough time left to try again?
      remaining_ms_after_wait = (
          timer.secs_remaining() - self._recovery_wait_secs)
      if remaining_ms_after_wait < 0:
        raise errors.DeadlineExceededError(
            None, None,
            "Session was not ready after waiting %d secs." % (max_wait_secs,))

      logging.info("Waiting for model to be ready: %s", not_ready)
      time.sleep(self._recovery_wait_secs)
开发者ID:2php,项目名称:tensorflow,代码行数:52,代码来源:session_manager.py


示例18: testParsingReaderOp

    def testParsingReaderOp(self):
        # Runs the reader over the test input for two epochs.
        num_steps_a = 0
        num_actions = 0
        num_word_ids = 0
        num_tag_ids = 0
        num_label_ids = 0
        batch_size = 10
        with self.test_session() as sess:
            (words, tags, labels), epochs, gold_actions = gen_parser_ops.gold_parse_reader(
                self._task_context, 3, batch_size, corpus_name="training-corpus"
            )
            while True:
                tf_gold_actions, tf_epochs, tf_words, tf_tags, tf_labels = sess.run(
                    [gold_actions, epochs, words, tags, labels]
                )
                num_steps_a += 1
                num_actions = max(num_actions, max(tf_gold_actions) + 1)
                num_word_ids = max(num_word_ids, self.GetMaxId(tf_words) + 1)
                num_tag_ids = max(num_tag_ids, self.GetMaxId(tf_tags) + 1)
                num_label_ids = max(num_label_ids, self.GetMaxId(tf_labels) + 1)
                self.assertIn(tf_epochs, [0, 1, 2])
                if tf_epochs > 1:
                    break

        # Runs the reader again, this time with a lot of added graph nodes.
        num_steps_b = 0
        with self.test_session() as sess:
            num_features = [6, 6, 4]
            num_feature_ids = [num_word_ids, num_tag_ids, num_label_ids]
            embedding_sizes = [8, 8, 8]
            hidden_layer_sizes = [32, 32]
            # Here we aim to test the iteration of the reader op in a complex network,
            # not the GraphBuilder.
            parser = graph_builder.GreedyParser(
                num_actions, num_features, num_feature_ids, embedding_sizes, hidden_layer_sizes
            )
            parser.AddTraining(self._task_context, batch_size, corpus_name="training-corpus")
            sess.run(parser.inits.values())
            while True:
                tf_epochs, tf_cost, _ = sess.run(
                    [parser.training["epochs"], parser.training["cost"], parser.training["train_op"]]
                )
                num_steps_b += 1
                self.assertGreaterEqual(tf_cost, 0)
                self.assertIn(tf_epochs, [0, 1, 2])
                if tf_epochs > 1:
                    break

        # Assert that the two runs made the exact same number of steps.
        logging.info("Number of steps in the two runs: %d, %d", num_steps_a, num_steps_b)
        self.assertEqual(num_steps_a, num_steps_b)
开发者ID:RTsien,项目名称:models,代码行数:52,代码来源:reader_ops_test.py


示例19: pin_to_cpu

def pin_to_cpu(op):
  """Returns a CPU device for the given node."""
  device = op.device if op.device is not None else ""
  dev = pydev.from_string(device)

  if not dev.device_type:
    return set_cpu0(device)
  if dev.device_type == "CPU":
    return device

  logging.info("Operation %s has been assigned to a non-CPU (%s), so "
               "it will not be pinned to the CPU.", op.name, dev.device_type)
  return device
开发者ID:gatech-cse6242,项目名称:testbed,代码行数:13,代码来源:graph_util.py


示例20: ReloadMultiplexer

def ReloadMultiplexer(multiplexer, path_to_run):
  """Loads all runs into the multiplexer.

  Args:
    multiplexer: The `EventMultiplexer` to add runs to and reload.
    path_to_run: A dict mapping from paths to run names, where `None` as the run
      name is interpreted as a run name equal to the path.
  """
  start = time.time()
  for (path, name) in six.iteritems(path_to_run):
    multiplexer.AddRunsFromDirectory(path, name)
  multiplexer.Reload()
  duration = time.time() - start
  logging.info('Multiplexer done loading. Load took %0.1f secs', duration)
开发者ID:13331151,项目名称:tensorflow,代码行数:14,代码来源:server.py



注:本文中的tensorflow.python.platform.logging.info函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python logging.vlog函数代码示例发布时间:2022-05-27
下一篇:
Python logging.error函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap