• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python partitioned_variables.variable_axis_size_partitioner函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.partitioned_variables.variable_axis_size_partitioner函数的典型用法代码示例。如果您正苦于以下问题:Python variable_axis_size_partitioner函数的具体用法?Python variable_axis_size_partitioner怎么用?Python variable_axis_size_partitioner使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了variable_axis_size_partitioner函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testControlDepsNone

  def testControlDepsNone(self):
    with self.test_session() as session:
      c = constant_op.constant(1.0)
      with ops.control_dependencies([c]):
        # d get the control dependency.
        d = constant_op.constant(2.0)
        # Partitioned variables do not.
        var_x = variable_scope.get_variable(
            "x",
            shape=[2],
            initializer=init_ops.ones_initializer(),
            partitioner=partitioned_variables.variable_axis_size_partitioner(4))

        ops_before_read = session.graph.get_operations()
        var_x.as_tensor()  # Caches the ops for subsequent reads.
        reading_ops = [
            op for op in session.graph.get_operations()
            if op not in ops_before_read
        ]

      self.assertEqual([c.op], d.op.control_inputs)
      # Tests that no control dependencies are added to reading a partitioned
      # variable which is similar to reading a variable.
      for op in reading_ops:
        self.assertEqual([], op.control_inputs)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:partitioned_variables_test.py


示例2: benchmark_create_1000_partitions_with_100_parameter_servers

  def benchmark_create_1000_partitions_with_100_parameter_servers(self):
    workers, _ = test.create_local_cluster(num_workers=1, num_ps=100)
    worker_sessions = [session_lib.Session(w.target) for w in workers]
    worker = worker_sessions[0]
    partition_sizes = (1, 512, 1024 * 32, 1024 * 128)

    partitioned = []

    for partition_size in partition_sizes:
      # max_shard_bytes is 4, shape is 1000*partition_size float32s which should
      # partition into 1000 shards, each containing partition_size float32s.
      print("Building partitioned variable with %d floats per partition" %
            partition_size)
      with ops.device(device_setter.replica_device_setter(ps_tasks=100)):
        partitioned_ix = variable_scope.get_variable(
            "partitioned_%d" % partition_size,
            shape=[1000 * partition_size],
            dtype=dtypes.float32,
            # Each partition to have exactly N float32s
            partitioner=partitioned_variables.variable_axis_size_partitioner(
                max_shard_bytes=4 * partition_size))
        # Concatenates along axis 0
        partitioned.append(ops.convert_to_tensor(partitioned_ix))

    variables.global_variables_initializer().run(session=worker)

    for ix, partition_size in enumerate(partition_sizes):
      print("Running benchmark having partitions with %d floats" %
            partition_size)
      self.run_op_benchmark(
          worker,
          partitioned[ix],
          name=("read_concat_1000_partitions_from_"
                "100_parameter_servers_partsize_%d_floats" % partition_size))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:34,代码来源:localhost_cluster_performance_test.py


示例3: testVariableCreationInALoop

  def testVariableCreationInALoop(self):
    """Tests the variable created inside a loop can be used outside the loop."""
    with self.test_session():
      with variable_scope.variable_scope("ascope") as scope:
        def Body(i, _):
          var_x = variable_scope.get_variable(
              "x",
              shape=[2],
              initializer=init_ops.ones_initializer(),
              partitioner=partitioned_variables.variable_axis_size_partitioner(
                  4))
          return (i + 1, var_x.as_tensor())

        cond = lambda i, _: i < 2
        _, x = control_flow_ops.while_loop(
            cond, Body, (0, constant_op.constant([7, 8], dtypes.float32)))
        variables.global_variables_initializer().run()
        self.assertAllClose([1.0, 1.0], x.eval())

        scope.reuse_variables()
        var_x = variable_scope.get_variable(
            "x",
            shape=[2],
            initializer=init_ops.ones_initializer(),
            partitioner=partitioned_variables.variable_axis_size_partitioner(4))

        self.assertAllClose([1.0, 1.0], var_x.as_tensor().eval())
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:27,代码来源:partitioned_variables_test.py


示例4: Body

 def Body(i, _):
   var_x = variable_scope.get_variable(
       "x",
       shape=[2],
       initializer=init_ops.ones_initializer(),
       partitioner=partitioned_variables.variable_axis_size_partitioner(
           4))
   return (i + 1, var_x.as_tensor())
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:8,代码来源:partitioned_variables_test.py


示例5: testPartitions

 def testPartitions(self):
   shape = (10, 10)
   init = init_ops.identity_initializer()
   partitioner = partitioned_variables.variable_axis_size_partitioner(1)
   with self.test_session(graph=ops.Graph(), use_gpu=True):
     with variable_scope.variable_scope(
         "foo", partitioner=partitioner, initializer=init):
       v = array_ops.identity(variable_scope.get_variable("bar", shape=shape))
     variables.global_variables_initializer().run()
     self.assertAllClose(v.eval(), np.eye(*shape))
开发者ID:HughKu,项目名称:tensorflow,代码行数:10,代码来源:init_ops_test.py


示例6: _testVariableAxisSizePartitioner

  def _testVariableAxisSizePartitioner(self,
                                       name,
                                       axis,
                                       max_shard_bytes,
                                       expected_axis_shards,
                                       expected_partitions,
                                       max_shards=None):
    partitioner = partitioned_variables.variable_axis_size_partitioner(
        axis=axis, max_shard_bytes=max_shard_bytes, max_shards=max_shards)

    with variable_scope.variable_scope("root", partitioner=partitioner):
      v0 = variable_scope.get_variable(
          name, dtype=dtypes.float32, shape=(4, 8, 16, 32))
      v0_list = v0._get_variable_list()
      v0_part = v0._get_partitions()
      self.assertEqual(len(v0_list), expected_axis_shards)
      self.assertAllEqual(v0_part, expected_partitions)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:17,代码来源:partitioned_variables_test.py


示例7: testPartitionedVariableMasking

 def testPartitionedVariableMasking(self):
   partitioner = partitioned_variables.variable_axis_size_partitioner(40)
   with self.test_session() as session:
     with variable_scope.variable_scope("", partitioner=partitioner):
       sparsity = variables.Variable(0.5, name="Sparsity")
       weights = variable_scope.get_variable(
           "weights", initializer=math_ops.linspace(1.0, 100.0, 100))
       masked_weights = pruning.apply_mask(
           weights, scope=variable_scope.get_variable_scope())
     p = pruning.Pruning(sparsity=sparsity)
     p._spec.threshold_decay = 0.0
     mask_update_op = p.mask_update_op()
     variables.global_variables_initializer().run()
     masked_weights_val = masked_weights.eval()
     session.run(mask_update_op)
     masked_weights_val = masked_weights.eval()
     self.assertAllEqual(np.count_nonzero(masked_weights_val), 51)
开发者ID:Kongsea,项目名称:tensorflow,代码行数:17,代码来源:pruning_test.py


示例8: testReadInWhileLoop

  def testReadInWhileLoop(self):
    """Tests the value is current (not cached) when read within a loop."""
    with self.test_session():
      var_x = variable_scope.get_variable(
          "x",
          shape=[2],
          initializer=init_ops.ones_initializer(),
          partitioner=partitioned_variables.variable_axis_size_partitioner(4))

      def Body(i, _):
        # Use a SGD step to update the variable's value.
        loss = math_ops.reduce_sum(var_x)
        optimizer = gradient_descent.GradientDescentOptimizer(1.0)
        minimize = optimizer.minimize(loss * 0.7)
        with ops.control_dependencies([minimize]):
          return (i + 1, var_x.as_tensor())

      cond = lambda i, _: i < 2
      _, x = control_flow_ops.while_loop(
          cond, Body, (0, constant_op.constant([7, 8], dtypes.float32)))
      variables.global_variables_initializer().run()
      self.assertAllClose([-0.4, -0.4], x.eval())
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:22,代码来源:partitioned_variables_test.py


示例9: testConcat

  def testConcat(self):
    with self.test_session() as session:
      var_x = variable_scope.get_variable(
          "x",
          initializer=constant_op.constant([1., 2.]),
          partitioner=partitioned_variables.variable_axis_size_partitioner(4))

      c = constant_op.constant(1.0)
      with ops.control_dependencies([c]):
        ops_before_concat = session.graph.get_operations()
        value = var_x._concat()  # pylint: disable=protected-access
        concat_ops = [
            op for op in session.graph.get_operations()
            if op not in ops_before_concat
        ]

      concat_control_inputs = [
          ci for op in concat_ops for ci in op.control_inputs
      ]
      self.assertTrue(
          c.op in concat_control_inputs,
          "var_x._concat() should get control dependencies from its scope.")
      variables.global_variables_initializer().run()
      self.assertAllClose(value.eval(), var_x.as_tensor().eval())
开发者ID:AnishShah,项目名称:tensorflow,代码行数:24,代码来源:partitioned_variables_test.py


示例10: testVariableAxisSizePartitioner

  def testVariableAxisSizePartitioner(self):
    with self.test_session():
      # Create a partitioned variable of shape (4, 8, 16, 32) type float32
      # Bytes per slice along the given axes:

      # 8 * 16 * 32 * sizeof(float32) = 16384 / slice on axis 0
      # 4 * 16 * 32 * sizeof(float32) = 8192 / slice on axis 1
      # 4 * 8 * 32 * sizeof(float32) = 4096 / slice on axis 2
      # 4 * 8 * 16 * sizeof(float32) = 2048 / slice on axis 3

      # Now partition it in different ways...

      # No need to slice: bytes_per_slice * dim0 = 65536 < max_shard_bytes
      self._testVariableAxisSizePartitioner(
          "v0",
          axis=0,
          max_shard_bytes=131072,
          expected_axis_shards=1,
          expected_partitions=(1, 1, 1, 1))

      # Slice exactly once: bytes_per_slice * dim1 = 65536 = max_shard_bytes
      self._testVariableAxisSizePartitioner(
          "v1",
          axis=1,
          max_shard_bytes=65536,
          expected_axis_shards=1,
          expected_partitions=(1, 1, 1, 1))

      # Slice into 2 parts:
      # bytes_per_slice = 4096
      # slices_per_shard = 32768 / 4096 = 8
      # axis_shards = 16 / 8 = 2
      self._testVariableAxisSizePartitioner(
          "v2",
          axis=2,
          max_shard_bytes=32768,
          expected_axis_shards=2,
          expected_partitions=(1, 1, 2, 1))

      # This partitioner makes sure we maximize the number of shards along
      # axis 3. Slice it into 32 parts:
      # bytes_per_slice = 2048
      # slices_per_shard = 2048 / 2048 = 1
      # axis_shards = 32 / 1 = 32
      self._testVariableAxisSizePartitioner(
          "v3a",
          axis=3,
          max_shard_bytes=2048,
          expected_axis_shards=32,
          expected_partitions=(1, 1, 1, 32))

      # This partitioner makes sure we do not go past the bound of allowable
      # number of shards along axis 3.
      # Slice into 32 parts:
      # bytes_per_slice = 2048
      # slices_per_shard = max(1, 1024 / 2048) = 1
      # axis_shards = 32 / 1 = 32
      # Slice into max of 32 parts because: max_shard_bytes < bytes_per_slice
      self._testVariableAxisSizePartitioner(
          "v3b",
          axis=3,
          max_shard_bytes=1024,
          expected_axis_shards=32,
          expected_partitions=(1, 1, 1, 32))

      # Specify max_shards so that it won't affect sharding.
      self._testVariableAxisSizePartitioner(
          "v3c",
          axis=3,
          max_shard_bytes=1024,
          expected_axis_shards=32,
          expected_partitions=(1, 1, 1, 32),
          max_shards=33)

      # Specify max_shards so that it will affect sharding.
      self._testVariableAxisSizePartitioner(
          "v3d",
          axis=3,
          max_shard_bytes=1024,
          expected_axis_shards=2,
          expected_partitions=(1, 1, 1, 2),
          max_shards=2)

      # Use the partitioner with strings
      partitioner_axis3_str = partitioned_variables.variable_axis_size_partitioner(  # pylint: disable=line-too-long
          axis=3,
          max_shard_bytes=32768,
          bytes_per_string_element=8)

      with variable_scope.variable_scope(
          "root", partitioner=partitioner_axis3_str):
        v3str = variable_scope.get_variable(
            "v3str",
            initializer=np.array([""] * 4 * 8 * 16 * 32).reshape(4, 8, 16, 32),
            dtype=dtypes.string,
            shape=(4, 8, 16, 32))
        v3str_list = v3str._get_variable_list()
        v3str_part = v3str._get_partitions()

        # Now the estimated bytes_per_slice = 4*8*16*bytes_per_string_element
#.........这里部分代码省略.........
开发者ID:AnishShah,项目名称:tensorflow,代码行数:101,代码来源:partitioned_variables_test.py



注:本文中的tensorflow.python.ops.partitioned_variables.variable_axis_size_partitioner函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python ragged.constant函数代码示例发布时间:2022-05-27
下一篇:
Python partitioned_variables.min_max_variable_partitioner函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap