• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python distribute_coordinator.run_distribute_coordinator函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.distribute.distribute_coordinator.run_distribute_coordinator函数的典型用法代码示例。如果您正苦于以下问题:Python run_distribute_coordinator函数的具体用法?Python run_distribute_coordinator怎么用?Python run_distribute_coordinator使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了run_distribute_coordinator函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testBetweenGraphContextWithChief

  def testBetweenGraphContextWithChief(self):
    # Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
    cluster_spec = copy.deepcopy(self._cluster_spec)
    cluster_spec[CHIEF] = ["fake_chief"]

    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=True),
        cluster_spec=cluster_spec,
        rpc_layer="grpc")

    # There are one CHIEF and three workers.
    self.assertEqual(len(self._worker_context), 2)
    self.assertTrue(CHIEF in self._worker_context)
    self.assertTrue(WORKER in self._worker_context)
    self.assertEqual(len(self._worker_context[CHIEF]), 1)
    self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(self._worker_context[CHIEF][0],
                     ("grpc://fake_chief", 4, True, True))
    self.assertEqual(
        self._worker_context[WORKER][0],
        (_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
    self.assertEqual(
        self._worker_context[WORKER][1],
        (_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
    self.assertEqual(
        self._worker_context[WORKER][2],
        (_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
开发者ID:aritratony,项目名称:tensorflow,代码行数:32,代码来源:distribute_coordinator_test.py


示例2: testInGraph

 def testInGraph(self):
   """Test it runs in-graph replicated training correctly."""
   distribute_coordinator.run_distribute_coordinator(
       self._in_graph_worker_fn,
       cluster_spec=self._cluster_spec,
       between_graph=False)
   self.assertEqual(self._result_correct, 1)
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:7,代码来源:distribute_coordinator_test.py


示例3: testRpcLayerEnvironmentVariable

  def testRpcLayerEnvironmentVariable(self):
    cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
    tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}

    rpc_layer_from_coordinator = [None]

    def _run_mock_server(cluster_spec=None,
                         task_type=None,
                         task_id=None,
                         session_config=None,
                         rpc_layer=None,
                         environment=None):
      del cluster_spec, task_type, task_id, session_config, environment
      rpc_layer_from_coordinator[0] = rpc_layer
      return MockServer()

    with test.mock.patch.dict(
        "os.environ",
        {"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
            distribute_coordinator, "_run_std_server", _run_mock_server):
      distribute_coordinator.run_distribute_coordinator(
          None,
          MockStrategy(between_graph=True),
          mode=INDEPENDENT_WORKER,
          cluster_spec=cluster_spec,
          task_type="ps",
          task_id=0)
    self.assertEqual(rpc_layer_from_coordinator[0], "cake")
开发者ID:aritratony,项目名称:tensorflow,代码行数:28,代码来源:distribute_coordinator_test.py


示例4: testInGraphContextWithEval

  def testInGraphContextWithEval(self):
    # Adds a EVALUATOR job.
    cluster_spec = copy.deepcopy(self._cluster_spec)
    cluster_spec[EVALUATOR] = ["fake_evaluator"]

    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=False),
        cluster_spec=cluster_spec,
        rpc_layer=None)

    # There are one "None" task and one EVALUATOR task.
    self.assertEqual(len(self._worker_context), 2)
    self.assertTrue("None" in self._worker_context)
    self.assertTrue(EVALUATOR in self._worker_context)
    self.assertEqual(len(self._worker_context["None"]), 1)
    self.assertEqual(len(self._worker_context[EVALUATOR]), 1)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
        _bytes_to_str(self._workers[0].target)), 3, True, True))
    self.assertEqual(self._worker_context[EVALUATOR][0],
                     ("fake_evaluator", 3, True, False))
开发者ID:aritratony,项目名称:tensorflow,代码行数:25,代码来源:distribute_coordinator_test.py


示例5: _run_standalone_client

def _run_standalone_client(test_obj, strategy, cluster_spec):
  input_shape = (28, 28, 1)
  with strategy.scope():
    orig_model = _get_model(input_shape)

  def worker_fn(strategy):
    with ops.Graph().as_default():
      batch_size = 64
      steps = 2

      with strategy.scope():
        train_ds, _ = _mnist_synthetic_dataset(batch_size, steps)
        model = _clone_and_build_model(orig_model, strategy)

        orig_loss, orig_acc = model.evaluate(train_ds, steps=steps)

        # Workaround for the metrics issue (b/122928955) in async training. This
        # can only be used in standalone client mode.
        dc_context.get_current_worker_context().wait_for_other_workers()

        model.fit(x=train_ds, epochs=2, steps_per_epoch=steps)

        dc_context.get_current_worker_context().wait_for_other_workers()

        trained_loss, trained_acc = model.evaluate(train_ds, steps=steps)

      test_obj.assertLessEqual(trained_loss, orig_loss)
      test_obj.assertGreaterEqual(trained_acc, orig_acc)

  dc.run_distribute_coordinator(
      worker_fn,
      strategy,
      mode=dc.CoordinatorMode.STANDALONE_CLIENT,
      cluster_spec=cluster_spec)
开发者ID:aritratony,项目名称:tensorflow,代码行数:34,代码来源:multi_worker_test.py


示例6: testInGraphStandaloneMode

 def testInGraphStandaloneMode(self):
   """Test it runs in-graph replication in standalone client mode."""
   distribute_coordinator.run_distribute_coordinator(
       self._in_graph_worker_fn,
       MockStrategy(between_graph=False),
       cluster_spec=self._cluster_spec)
   self.assertEqual(self._result_correct, 1)
开发者ID:aritratony,项目名称:tensorflow,代码行数:7,代码来源:distribute_coordinator_test.py


示例7: testInGraphSplitMode

 def testInGraphSplitMode(self):
   """Test it runs in-graph replication in split client mode."""
   distribute_coordinator.run_distribute_coordinator(
       self._in_graph_worker_fn,
       cluster_spec=self._cluster_spec,
       between_graph=False)
   self.assertEqual(self._result_correct, 1)
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:7,代码来源:distribute_coordinator_test.py


示例8: _thread_fn

 def _thread_fn(cluster_spec):
   distribute_coordinator.run_distribute_coordinator(
       None,
       None,
       mode=INDEPENDENT_WORKER,
       cluster_spec=cluster_spec,
       task_type="ps",
       task_id=0)
开发者ID:mrlittlepig,项目名称:tensorflow,代码行数:8,代码来源:distribute_coordinator_test.py


示例9: _thread_fn

 def _thread_fn(cluster_spec):
   distribute_coordinator.run_distribute_coordinator(
       None,
       MockStrategy(between_graph=True),
       mode=INDEPENDENT_WORKER,
       cluster_spec=cluster_spec,
       task_type="ps",
       task_id=0)
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:distribute_coordinator_test.py


示例10: testBetweenGraph

  def testBetweenGraph(self):
    """Test it runs between-graph replicated training correctly."""
    distribute_coordinator.run_distribute_coordinator(
        self._between_graph_worker_fn,
        cluster_spec=self._cluster_spec,
        between_graph=True)

    # Each finished worker will increment self._result_correct.
    self.assertEqual(self._result_correct, NUM_WORKERS)
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:9,代码来源:distribute_coordinator_test.py


示例11: testBetweenGraph

  def testBetweenGraph(self):
    """Test it runs between-graph replication in standalone client mode."""
    distribute_coordinator.run_distribute_coordinator(
        self._between_graph_worker_fn,
        MockStrategy(between_graph=True),
        cluster_spec=self._cluster_spec)

    # Each finished worker will increment self._result_correct.
    self.assertEqual(self._result_correct, NUM_WORKERS)
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:distribute_coordinator_test.py


示例12: testBetweenGraphWithMonitoredSession

  def testBetweenGraphWithMonitoredSession(self):
    """Test monitored session in standalone client mode."""
    distribute_coordinator.run_distribute_coordinator(
        self._between_graph_with_monitored_session,
        MockStrategy(between_graph=True),
        cluster_spec=self._cluster_spec)

    # Each finished worker will increment self._result_correct.
    self.assertEqual(self._result_correct, NUM_WORKERS)
开发者ID:aritratony,项目名称:tensorflow,代码行数:9,代码来源:distribute_coordinator_test.py


示例13: testLocalContext

  def testLocalContext(self):
    # Dumps the task contexts to the self._task_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_task_context, cluster_spec=None, between_graph=True)

    # There is only a "None" task.
    self.assertEqual(len(self._task_context), 1)
    self.assertTrue("None" in self._task_context)
    self.assertEqual(len(self._task_context["None"]), 1)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(self._task_context["None"][0], ("local", 0, True, False))
开发者ID:dan-lennox,项目名称:tensorflow,代码行数:13,代码来源:distribute_coordinator_test.py


示例14: test_session_config_in_session_creator

  def test_session_config_in_session_creator(self):
    cluster_spec = {"worker": ["localhost:0"]}
    tf_config = {"cluster": cluster_spec}

    with test.mock.patch.dict("os.environ",
                              {"TF_CONFIG": json.dumps(tf_config)}):
      distribute_coordinator.run_distribute_coordinator(
          self._worker_fn,
          MockStrategy(between_graph=True),
          mode=INDEPENDENT_WORKER,
          cluster_spec=cluster_spec,
          task_type="worker",
          task_id=0)
    self.assertEqual(self._device_filters, ["/job:worker/task:0", "/job:ps"])
    self.assertEqual(self._intra_op_parallelism_threads, 2)
    self.assertEqual(self._inter_op_parallelism_threads, 0)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:16,代码来源:distribute_coordinator_test.py


示例15: testInGraphContext

  def testInGraphContext(self):
    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=False),
        cluster_spec=self._cluster_spec)

    # There is only a "None" task in the dumped task context.
    self.assertEqual(len(self._worker_context), 1)
    self.assertTrue("None" in self._worker_context)
    self.assertEqual(len(self._worker_context["None"]), 1)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(
        self._worker_context["None"][0],
        (_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
开发者ID:aritratony,项目名称:tensorflow,代码行数:17,代码来源:distribute_coordinator_test.py


示例16: testBetweenGraphStrategyProperties

  def testBetweenGraphStrategyProperties(self):
    # Dumps properties of the strategy objects.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_strategy_property,
        MockStrategy(between_graph=True, should_init=True),
        cluster_spec=self._cluster_spec)

    # There is only one type of task and there three such tasks.
    self.assertEqual(len(self._strategy_property), 1)
    self.assertTrue(WORKER in self._strategy_property)
    self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)

    # Check whether each task has the right properties of should_init,
    # should_checkpoint and should_save_summary.
    self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
    self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
    self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
开发者ID:aritratony,项目名称:tensorflow,代码行数:17,代码来源:distribute_coordinator_test.py


示例17: test_eval_strategy_configure

  def test_eval_strategy_configure(self):
    cluster_spec = {"evaluator": ["localhost:0"]}
    tf_config = {"cluster": cluster_spec}

    with test.mock.patch.dict("os.environ",
                              {"TF_CONFIG": json.dumps(tf_config)}):
      distribute_coordinator.run_distribute_coordinator(
          lambda _: None,
          MockStrategy(between_graph=False),
          eval_fn=self._worker_fn,
          eval_strategy=MockStrategy(between_graph=True),
          mode=INDEPENDENT_WORKER,
          cluster_spec=cluster_spec,
          task_type="evaluator",
          task_id=0)
    self.assertEqual(self._device_filters, ["/job:somejob"])
    self.assertEqual(self._intra_op_parallelism_threads, 0)
    self.assertEqual(self._inter_op_parallelism_threads, 2)
开发者ID:aritratony,项目名称:tensorflow,代码行数:18,代码来源:distribute_coordinator_test.py


示例18: test_session_config_in_std_server

  def test_session_config_in_std_server(self):
    cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
    tf_config = {"cluster": cluster_spec}

    with test.mock.patch.dict(
        "os.environ",
        {"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
            distribute_coordinator, "_run_std_server",
            self._dump_device_filters):
      distribute_coordinator.run_distribute_coordinator(
          lambda _: None,
          MockStrategy(between_graph=True),
          mode=INDEPENDENT_WORKER,
          cluster_spec=cluster_spec,
          task_type="worker",
          task_id=0)
    self.assertEqual(self._intra_op_parallelism_threads, 1)
    self.assertEqual(self._inter_op_parallelism_threads, 0)
开发者ID:aritratony,项目名称:tensorflow,代码行数:18,代码来源:distribute_coordinator_test.py


示例19: testBetweenGraphContext

  def testBetweenGraphContext(self):
    # Dumps the task contexts to the self._worker_context dict.
    distribute_coordinator.run_distribute_coordinator(
        self._dump_worker_context,
        MockStrategy(between_graph=True),
        cluster_spec=self._cluster_spec)

    # There is only one type of task and there three such tasks.
    self.assertEqual(len(self._worker_context), 1)
    self.assertTrue(WORKER in self._worker_context)
    self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)

    # Check whether each task has the right master_target, num_workers, is_chief
    # and distributed_mode.
    self.assertEqual(
        self._worker_context[WORKER][0],
        (_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
    self.assertEqual(
        self._worker_context[WORKER][1],
        (_bytes_to_str(self._workers[1].target), NUM_WORKERS, False, True))
    self.assertEqual(
        self._worker_context[WORKER][2],
        (_bytes_to_str(self._workers[2].target), NUM_WORKERS, False, True))
开发者ID:aritratony,项目名称:tensorflow,代码行数:23,代码来源:distribute_coordinator_test.py


示例20: estimator_train

def estimator_train(estimator, train_distributed_fn, hooks):
  """Run distribute coordinator for Estimator's `train` method."""
  assert estimator._config._distribute_coordinator_mode
  run_config = estimator._config
  assert estimator._config.cluster_spec
  cluster_spec = multi_worker_util.normalize_cluster_spec(
      estimator._config.cluster_spec)
  assert estimator._config._train_distribute

  if 'evaluator' in cluster_spec.jobs:
    raise ValueError("'evaluator' job is not supported if you don't use "
                     '`train_and_evaluate`')

  if (estimator._config._distribute_coordinator_mode !=  # pylint: disable=protected-access
      dc.CoordinatorMode.STANDALONE_CLIENT):
    raise ValueError('Only `STANDALONE_CLIENT` mode is supported when you call '
                     '`estimator.train`')

  if estimator._config._train_distribute.extended.experimental_between_graph:
    # TODO(yuefengz): remove this limitation once we figure out how to merge
    # return values from `_worker_fn`s.
    raise ValueError('`Estimator.train` API is not supported for %s with '
                     '`STANDALONE_CLIENT` mode.' %
                     estimator._config._train_distribute.__class__.__name__)

  def _worker_fn(strategy):
    """Function for worker task."""
    local_estimator = copy.deepcopy(estimator)
    local_estimator._config._train_distribute = strategy
    context = dc_context.get_current_worker_context()
    _init_run_config_from_worker_context(local_estimator._config, context)
    logging.info('Updated config: %s', str(vars(local_estimator._config)))
    local_estimator._train_distribution = strategy

    if context.is_chief:
      chief_hooks = hooks
    else:
      chief_hooks = []
    train_distributed_fn(local_estimator, strategy, chief_hooks)
    return local_estimator

  return dc.run_distribute_coordinator(
      _worker_fn,
      estimator._config.train_distribute,
      mode=run_config._distribute_coordinator_mode,
      cluster_spec=cluster_spec,
      session_config=run_config.session_config)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:47,代码来源:estimator_training.py



注:本文中的tensorflow.python.distribute.distribute_coordinator.run_distribute_coordinator函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap