• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python control_flow_ops.group函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.ops.control_flow_ops.group函数的典型用法代码示例。如果您正苦于以下问题:Python group函数的具体用法?Python group怎么用?Python group使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了group函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: benchmarkCholeskyOp

  def benchmarkCholeskyOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), \
          session.Session() as sess, \
          ops.device("/cpu:0"):
        matrix = variables.Variable(self._GenerateMatrix(shape))
        l = linalg_ops.cholesky(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(
                l,),
            min_iters=25,
            name="cholesky_cpu_{shape}".format(shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/device:GPU:0"):
          matrix = variables.Variable(self._GenerateMatrix(shape))
          l = linalg_ops.cholesky(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(
                  l,),
              min_iters=25,
              name="cholesky_gpu_{shape}".format(shape=shape))
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:28,代码来源:cholesky_op_test.py


示例2: benchmarkMatrixBandPartOp

  def benchmarkMatrixBandPartOp(self):
    for shape_ in self.shapes:
      for limits in (-1, -1), (-1, 0), (0, -1), (2, 2):
        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/cpu:0"):
          matrix = variables.Variable(array_ops.ones(shape_))
          band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(band),
              min_iters=10,
              name="matrix_band_part_cpu_{shape}_{limits}".format(
                  shape=shape_, limits=limits))

        if test_lib.is_gpu_available(True):
          with ops.Graph().as_default(), \
              session.Session() as sess, \
              ops.device("/gpu:0"):
            matrix = variables.Variable(array_ops.ones(shape_))
            band = array_ops.matrix_band_part(matrix, limits[0], limits[1])
            variables.global_variables_initializer().run()
            self.run_op_benchmark(
                sess,
                control_flow_ops.group(band),
                min_iters=10,
                name="matrix_band_part_gpu_{shape}_{limits}".format(
                    shape=shape_, limits=limits))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:29,代码来源:matrix_band_part_op_test.py


示例3: benchmarkMatrixExponentialOp

  def benchmarkMatrixExponentialOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), \
          session.Session() as sess, \
          ops.device("/cpu:0"):
        matrix = self._GenerateMatrix(shape)
        expm = linalg_impl.matrix_exponential(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(expm),
            min_iters=25,
            name="matrix_exponential_cpu_{shape}".format(
                shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/gpu:0"):
          matrix = self._GenerateMatrix(shape)
          expm = linalg_impl.matrix_exponential(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(expm),
              min_iters=25,
              name="matrix_exponential_gpu_{shape}".format(
                  shape=shape))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:28,代码来源:matrix_exponential_op_test.py


示例4: benchmarkMatrixDeterminantOp

  def benchmarkMatrixDeterminantOp(self):
    for shape in self.shapes:
      with ops.Graph().as_default(), session.Session(
          config=benchmark.benchmark_config()) as sess, ops.device("/cpu:0"):
        matrix = self._GenerateMatrix(shape)
        d = linalg_ops.matrix_determinant(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(
                d,),
            min_iters=25,
            name="matrix_determinant_cpu_{shape}".format(shape=shape))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), session.Session(
            config=benchmark.benchmark_config()) as sess, ops.device("/gpu:0"):
          matrix = self._GenerateMatrix(shape)
          d = linalg_ops.matrix_determinant(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(
                  d,),
              min_iters=25,
              name="matrix_determinant_gpu_{shape}".format(shape=shape))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:26,代码来源:determinant_op_test.py


示例5: benchmarkMatrixInverseOp

  def benchmarkMatrixInverseOp(self):
    for adjoint in False, True:
      for size in self.sizes:
        data = self._GenerateData(size)

        with ops.Graph().as_default(), \
            session.Session() as sess, \
            ops.device("/cpu:0"):
          inv = linalg_ops.matrix_inverse(data, adjoint=adjoint)
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(inv),
              min_iters=25,
              name="matrix_inverse_cpu_{size}_{adjoint}".format(
                  size=size, adjoint="adjoint" if adjoint else "noadjoint"))

        if test.is_gpu_available(True):
          with ops.Graph().as_default(), \
              session.Session() as sess, \
              ops.device("/gpu:0"):
            inv = linalg_ops.matrix_inverse(data, adjoint=adjoint)
            self.run_op_benchmark(
                sess,
                control_flow_ops.group(inv),
                min_iters=25,
                name="matrix_inverse_gpu_{size}_{adjoint}".format(
                    size=size, adjoint="adjoint" if adjoint else "noadjoint"))
开发者ID:LiorPe,项目名称:tensorflow,代码行数:27,代码来源:matrix_inverse_op_test.py


示例6: benchmarkQROp

  def benchmarkQROp(self):
    for shape_ in self.shapes:
      with ops.Graph().as_default(), \
          session.Session(config=benchmark.benchmark_config()) as sess, \
          ops.device("/cpu:0"):
        matrix_value = np.random.uniform(
            low=-1.0, high=1.0, size=shape_).astype(np.float32)
        matrix = variables.Variable(matrix_value)
        q, r = linalg_ops.qr(matrix)
        variables.global_variables_initializer().run()
        self.run_op_benchmark(
            sess,
            control_flow_ops.group(q, r),
            min_iters=25,
            name="QR_cpu_{shape}".format(shape=shape_))

      if test.is_gpu_available(True):
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/device:GPU:0"):
          matrix_value = np.random.uniform(
              low=-1.0, high=1.0, size=shape_).astype(np.float32)
          matrix = variables.Variable(matrix_value)
          q, r = linalg_ops.qr(matrix)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(q, r),
              min_iters=25,
              name="QR_gpu_{shape}".format(shape=shape_))
开发者ID:aritratony,项目名称:tensorflow,代码行数:30,代码来源:qr_op_test.py


示例7: benchmarkMatrixSolveLsOp

  def benchmarkMatrixSolveLsOp(self):
    run_gpu_test = test_lib.is_gpu_available(True)
    regularizer = 1.0
    for matrix_shape in self.matrix_shapes:
      for num_rhs in 1, 2, matrix_shape[-1]:

        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/cpu:0"):
          matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
          x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(x),
              min_iters=25,
              store_memory_usage=False,
              name=("matrix_solve_ls_cpu_shape_{matrix_shape}_num_rhs_{num_rhs}"
                   ).format(matrix_shape=matrix_shape, num_rhs=num_rhs))

        if run_gpu_test and (len(matrix_shape) < 3 or matrix_shape[0] < 513):
          with ops.Graph().as_default(), \
                session.Session(config=benchmark.benchmark_config()) as sess, \
                ops.device("/gpu:0"):
            matrix, rhs = _GenerateTestData(matrix_shape, num_rhs)
            x = linalg_ops.matrix_solve_ls(matrix, rhs, regularizer)
            variables.global_variables_initializer().run()
            self.run_op_benchmark(
                sess,
                control_flow_ops.group(x),
                min_iters=25,
                store_memory_usage=False,
                name=("matrix_solve_ls_gpu_shape_{matrix_shape}_num_rhs_"
                      "{num_rhs}").format(
                          matrix_shape=matrix_shape, num_rhs=num_rhs))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:35,代码来源:matrix_solve_ls_op_test.py


示例8: benchmarkTridiagonalMulOp

    def benchmarkTridiagonalMulOp(self):
      devices = [('/cpu:0', 'cpu')]
      if test.is_gpu_available(cuda_only=True):
        devices += [('/gpu:0', 'gpu')]

      for device_option, size_option in itertools.product(devices, self.sizes):
        device_id, device_name = device_option
        m, batch_size, n = size_option

        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device(device_id):
          upper, diag, lower, vec = self._generateData(batch_size, m, n)
          x1 = self.baseline(upper, diag, lower, vec)
          x2 = linalg_impl.tridiagonal_matmul((upper, diag, lower),
                                              vec,
                                              diagonals_format='sequence')

          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(x1),
              min_iters=10,
              store_memory_usage=False,
              name=('tridiagonal_matmul_baseline_%s'
                    '_batch_size_%d_m_%d_n_%d' %
                    (device_name, batch_size, m, n)))

          self.run_op_benchmark(
              sess,
              control_flow_ops.group(x2),
              min_iters=10,
              store_memory_usage=False,
              name=('tridiagonal_matmul_%s_batch_size_%d_m_%d_n_%d' %
                    (device_name, batch_size, m, n)))
开发者ID:aritratony,项目名称:tensorflow,代码行数:35,代码来源:tridiagonal_matmul_op_test.py


示例9: testGroup_MultiDevice

 def testGroup_MultiDevice(self):
     with ops.Graph().as_default() as g:
         with g.device("/task:0"):
             a = constant_op.constant(0, name="a")
             b = constant_op.constant(0, name="b")
         with g.device("/task:1"):
             c = constant_op.constant(0, name="c")
             d = constant_op.constant(0, name="d")
         with g.device("/task:2"):
             control_flow_ops.group(a.op, b.op, c.op, d.op, name="root")
     gd = g.as_graph_def()
     self.assertProtoEquals(
         """
   node { name: "a" op: "Const" device: "/task:0"}
   node { name: "b" op: "Const" device: "/task:0"}
   node { name: "c" op: "Const" device: "/task:1"}
   node { name: "d" op: "Const" device: "/task:1"}
   node { name: "root/NoOp" op: "NoOp" input: "^a" input: "^b"
          device: "/task:0" }
   node { name: "root/NoOp_1" op: "NoOp" input: "^c" input: "^d"
          device: "/task:1" }
   node { name: "root" op: "NoOp" input: "^root/NoOp" input: "^root/NoOp_1"
          device: "/task:2" }
 """,
         self._StripGraph(gd),
     )
开发者ID:tensorflow,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py


示例10: benchmarkMatrixInverseOp

  def benchmarkMatrixInverseOp(self):
    for adjoint in False, True:
      for shape in self.shapes:
        with ops.Graph().as_default(), \
            session.Session(config=benchmark.benchmark_config()) as sess, \
            ops.device("/cpu:0"):
          matrix = self._GenerateMatrix(shape)
          inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
          variables.global_variables_initializer().run()
          self.run_op_benchmark(
              sess,
              control_flow_ops.group(inv),
              min_iters=25,
              name="matrix_inverse_cpu_{shape}_adjoint_{adjoint}".format(
                  shape=shape, adjoint=adjoint))

        if test.is_gpu_available(True):
          with ops.Graph().as_default(), \
              session.Session(config=benchmark.benchmark_config()) as sess, \
              ops.device("/gpu:0"):
            matrix = self._GenerateMatrix(shape)
            inv = linalg_ops.matrix_inverse(matrix, adjoint=adjoint)
            variables.global_variables_initializer().run()
            self.run_op_benchmark(
                sess,
                control_flow_ops.group(inv),
                min_iters=25,
                name="matrix_inverse_gpu_{shape}_adjoint_{adjoint}".format(
                    shape=shape, adjoint=adjoint))
开发者ID:bunbutter,项目名称:tensorflow,代码行数:29,代码来源:matrix_inverse_op_test.py


示例11: make_ops_and_vars_round_robin

  def make_ops_and_vars_round_robin(self, scope=None, cov_devices=None,
                                    inv_devices=None):
    """Make ops and vars with a round-robin device placement strategy.

    For each factor, all of that factor's cov variables and their associated
    update ops will be placed on a particular device.  A new device is chosen
    for each factor by cycling through list of devices in the cov_devices
    argument. If cov_devices is None then no explicit device placement occurs.

    An analogous strategy is followed for inverse update ops, with the list of
    devices being given by the inv_devices argument.

    Inverse variables on the other hand are not placed on any specific device
    (they will just use the current the device placement context, whatever
    that happens to be).  The idea is that the inverse variable belong where
    they will be accessed most often, which is the device that actually applies
    the preconditioner to the gradient. The user will be responsible for setting
    the device context for this.

    Args:
      scope: A string or None.  If None it will be set to the name of this
        estimator (given by the name property). All variables will be created,
        and all ops will execute, inside of a variable scope of the given
        name. (Default: None)
      cov_devices: Iterable of device strings (e.g. '/gpu:0'). Covariance
        computations will be placed on these devices in a round-robin fashion.
        Can be None, which means that no devices are specified.
      inv_devices: Iterable of device strings (e.g. '/gpu:0'). Inversion
        computations will be placed on these devices in a round-robin fashion.
        Can be None, which means that no devices are specified.

    Returns:
      cov_update_ops: List of ops that compute the cov updates. Corresponds
        one-to-one with the list of factors given by the "factors" property.
      cov_update_op: cov_update_ops grouped into a single op.
      inv_update_ops: List of ops that compute the inv updates. Corresponds
        one-to-one with the list of factors given by the "factors" property.
      inv_update_op: inv_update_ops grouped into a single op.
      cov_update_thunks: Thunks that make the ops in cov_update_ops.
      inv_update_thunks: Thunks that make the ops in inv_update_ops.
    """
    (cov_update_thunks,
     inv_update_thunks) = self.make_vars_and_create_op_thunks_round_robin(
         scope=scope,
         cov_devices=cov_devices,
         inv_devices=inv_devices)
    cov_update_ops = [thunk() for thunk in cov_update_thunks]
    inv_update_ops = [thunk() for thunk in inv_update_thunks]

    scope = self.name if scope is None else scope
    with variable_scope.variable_scope(scope):
      cov_update_op = control_flow_ops.group(cov_update_ops,
                                             name="cov_update_op")
      inv_update_op = control_flow_ops.group(inv_update_ops,
                                             name="inv_update_op")

    return (cov_update_ops, cov_update_op, inv_update_ops, inv_update_op,
            cov_update_thunks, inv_update_thunks)
开发者ID:PuchatekwSzortach,项目名称:tensorflow,代码行数:58,代码来源:estimator.py


示例12: _create_transient_vars

  def _create_transient_vars(self):
    """Creates local cache of factors, weights and gramian for rows and columns.

    Note that currently the caching strategy is as follows:
    When initiating a row (resp. column) update:
      - The column (resp. row) gramian is computed.
      - Optionally, if use_gramian_cache is True, the column (resp. row) Gramian
        is cached, while the row (resp. column) gramian is reset.
      - Optionally, if use_factors_weights_cache is True, the column (resp. row)
        factors and weights are cached, while the row (resp. column) factors and
        weights are reset.
    """

    (self._row_factors_cache, row_factors_cache_init,
     row_factors_cache_reset) = self._cached_copy(
         self._row_factors,
         "row_factors_cache",
         pass_through=not self._use_factors_weights_cache)
    (self._col_factors_cache, col_factors_cache_init,
     col_factors_cache_reset) = self._cached_copy(
         self._col_factors,
         "col_factors_cache",
         pass_through=not self._use_factors_weights_cache)
    (self._row_wt_cache, row_wt_cache_init, _) = self._cached_copy(
        self._row_weights,
        "row_wt_cache",
        pass_through=not self._use_factors_weights_cache)
    (self._col_wt_cache, col_wt_cache_init, _) = self._cached_copy(
        self._col_weights,
        "col_wt_cache",
        pass_through=not self._use_factors_weights_cache)
    (self._row_gramian_cache, row_gramian_cache_init,
     row_gramian_cache_reset) = self._cached_copy(
         self._row_gramian,
         "row_gramian_cache",
         pass_through=not self._use_gramian_cache)
    (self._col_gramian_cache, col_gramian_cache_init,
     col_gramian_cache_reset) = self._cached_copy(
         self._col_gramian,
         "col_gramian_cache",
         pass_through=not self._use_gramian_cache)

    self._row_updates_init = control_flow_ops.group(col_factors_cache_init,
                                                    row_factors_cache_reset,
                                                    col_gramian_cache_init,
                                                    row_gramian_cache_reset)
    self._col_updates_init = control_flow_ops.group(row_factors_cache_init,
                                                    col_factors_cache_reset,
                                                    row_gramian_cache_init,
                                                    col_gramian_cache_reset)

    if self._row_wt_cache is not None:
      assert self._col_wt_cache is not None
      self._worker_init = control_flow_ops.group(
          row_wt_cache_init, col_wt_cache_init, name="worker_init")
    else:
      self._worker_init = control_flow_ops.no_op(name="worker_init")
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:57,代码来源:factorization_ops.py


示例13: head_ops

  def head_ops(self, features, target, mode, train_op_fn, logits=None,
               logits_input=None):
    """Returns ops for a model_fn.

    Args:
      features: input dict.
      target: target dict or tensor.
      mode: estimator's ModeKeys
      train_op_fn: function that takes a scalar loss and returns an op to
          optimize with the loss.
      logits: logits to be used for the head.
      logits_input: tensor to build logits from.

    Returns:
      `estimator.ModelFnOps`

    Raises:
      ValueError: if mode is not recognized.
    """
    _check_logits_input_not_supported(logits, logits_input)
    if mode == estimator.ModeKeys.TRAIN:
      loss, additional_train_op = self._training_loss(features, target,
                                                      logits, logits_input)

      train_op = train_op_fn(loss)

      if additional_train_op:
        if train_op:
          train_op = control_flow_ops.group(train_op, *additional_train_op)
        else:
          train_op = control_flow_ops.group(*additional_train_op)

      return estimator.ModelFnOps(
          mode=estimator.ModeKeys.TRAIN,
          loss=loss,
          training_op=train_op,
          default_metrics=self._default_metric(),
          signature_fn=self._create_signature_fn())

    if mode == estimator.ModeKeys.INFER:
      return estimator.ModelFnOps(
          mode=estimator.ModeKeys.INFER,
          predictions=self._infer_op(logits, logits_input),
          default_metrics=self._default_metric(),
          signature_fn=self._create_signature_fn())

    if mode == estimator.ModeKeys.EVAL:
      predictions, loss = self._eval_op(features, target, logits, logits_input)
      return estimator.ModelFnOps(
          mode=estimator.ModeKeys.EVAL,
          predictions=predictions,
          loss=loss,
          default_metrics=self._default_metric(),
          signature_fn=self._create_signature_fn())

    raise ValueError("mode=%s unrecognized." % str(mode))
开发者ID:caikehe,项目名称:tensorflow,代码行数:56,代码来源:head.py


示例14: _resource_apply_sparse

 def _resource_apply_sparse(self, grad, var, indices):
   var_dtype = var.dtype.base_dtype
   lr_t = self._decayed_lr(var_dtype)
   rms = self.get_slot(var, "rms")
   rho = self._get_hyper("rho", var_dtype)
   momentum = self._get_hyper("momentum", var_dtype)
   epsilon = self._get_hyper("epsilon", var_dtype)
   if self._momentum:
     mom = self.get_slot(var, "momentum")
     if self.centered:
       mg = self.get_slot(var, "mg")
       return training_ops.resource_sparse_apply_centered_rms_prop(
           var.handle,
           mg.handle,
           rms.handle,
           mom.handle,
           lr_t,
           rho,
           momentum,
           epsilon,
           grad,
           indices,
           use_locking=self._use_locking)
     else:
       return training_ops.resource_sparse_apply_rms_prop(
           var.handle,
           rms.handle,
           mom.handle,
           lr_t,
           rho,
           momentum,
           epsilon,
           grad,
           indices,
           use_locking=self._use_locking)
   else:
     rms_scaled_g_values = (grad * grad) * (1. - rho)
     rms_t = state_ops.assign(rms, rms * rho, use_locking=self._use_locking)
     with ops.control_dependencies([rms_t]):
       rms_t = self._resource_scatter_add(rms, indices, rms_scaled_g_values)
       rms_slice = array_ops.gather(rms_t, indices)
     denom_slice = rms_slice
     if self.centered:
       mg = self.get_slot(var, "mg")
       mg_scaled_g_values = grad * (1. - rho)
       mg_t = state_ops.assign(mg, mg * rho, use_locking=self._use_locking)
       with ops.control_dependencies([mg_t]):
         mg_t = self._resource_scatter_add(mg, indices, mg_scaled_g_values)
         mg_slice = array_ops.gather(mg_t, indices)
         denom_slice = rms_slice - math_ops.square(mg_slice)
     var_update = self._resource_scatter_add(
         var, indices, -lr_t * grad / (math_ops.sqrt(denom_slice) + epsilon))
     if self.centered:
       return control_flow_ops.group(*[var_update, rms_t, mg_t])
     return control_flow_ops.group(*[var_update, rms_t])
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:55,代码来源:rmsprop.py


示例15: testPassingList

 def testPassingList(self):
   with ops.Graph().as_default() as g:
     a = constant_op.constant(0, name="a")
     b = constant_op.constant(0, name="b")
     control_flow_ops.group([a.op, b.op], name="root")
   gd = g.as_graph_def()
   self.assertProtoEquals("""
     node { name: "a" op: "Const"}
     node { name: "b" op: "Const"}
     node { name: "root" op: "NoOp" input: "^a" input: "^b" }
   """, self._StripGraph(gd))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:11,代码来源:control_flow_ops_test.py


示例16: testGroup_OneDevice

 def testGroup_OneDevice(self):
   with ops.Graph().as_default() as g:
     with g.device("/task:0"):
       a = constant_op.constant(0, name="a")
       b = constant_op.constant(0, name="b")
     control_flow_ops.group(a.op, b.op, name="root")
   gd = g.as_graph_def()
   self.assertProtoEquals("""
     node { name: "a" op: "Const" device: "/task:0" }
     node { name: "b" op: "Const" device: "/task:0" }
     node { name: "root" op: "NoOp" input: "^a" input: "^b" device: "/task:0" }
   """, self._StripGraph(gd))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:12,代码来源:control_flow_ops_test.py


示例17: testGroup_NoDevices

 def testGroup_NoDevices(self):
   with ops.Graph().as_default() as g:
     a = constant_op.constant(0, name="a")
     b = constant_op.constant(0, name="b")
     c = constant_op.constant(0, name="c")
     control_flow_ops.group(a.op, b.op, c.op, name="root")
   gd = g.as_graph_def()
   self.assertProtoEquals("""
     node { name: "a" op: "Const"}
     node { name: "b" op: "Const"}
     node { name: "c" op: "Const"}
     node { name: "root" op: "NoOp" input: "^a" input: "^b" input: "^c" }
   """, self._StripGraph(gd))
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:13,代码来源:control_flow_ops_test.py


示例18: randn_sampler_switchover

def randn_sampler_switchover(shape, num_iters, use_gpu=False):
  # Benchmark by constructing samplers on the threshold of using the randn
  # rejection sampling and check that this threshold is set correctly by
  # benchmarking with bounds just above and below this threshold.
  # The uniform and randn samplers should have about the same performance
  # at this point.

  stddev_inside_bounds_before_using_randn = (
      _get_stddev_inside_bounds_before_using_randn(use_gpu))

  epsilon = 0.001

  np.random.seed(1618)  # Make it reproducible.

  # No CSE/CF.
  optimizer_options = config_pb2.OptimizerOptions(
      opt_level=config_pb2.OptimizerOptions.L0)
  config = config_pb2.ConfigProto(
      graph_options=config_pb2.GraphOptions(
          optimizer_options=optimizer_options))

  with session.Session(config=config) as sess:
    with ops.device("/cpu:0" if not use_gpu else "/gpu:0"):
      uniform_sampler_op = control_flow_ops.group(
          random_ops.parameterized_truncated_normal(
              shape,
              means=0.,
              stddevs=1.0,
              minvals=-stddev_inside_bounds_before_using_randn + epsilon,
              maxvals=0.01))
      randn_sampler_op = control_flow_ops.group(
          random_ops.parameterized_truncated_normal(
              shape,
              means=0.,
              stddevs=1.0,
              minvals=-stddev_inside_bounds_before_using_randn - epsilon,
              maxvals=0.01))

    # Burn-in to avoid session setup costs in the timing.
    sess.run(uniform_sampler_op)
    sess.run(uniform_sampler_op)
    uniform_dt = timeit.timeit(
        lambda: sess.run(uniform_sampler_op), number=num_iters)

    sess.run(randn_sampler_op)
    sess.run(randn_sampler_op)
    randn_dt = timeit.timeit(
        lambda: sess.run(randn_sampler_op), number=num_iters)

    return randn_dt, uniform_dt
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:50,代码来源:parameterized_truncated_normal_op_test.py


示例19: build_graph

def build_graph(device, dtype, data_format, input_shape, filter_shape, strides,
                padding, num_iters, warmup_iters):
  """builds a graph containing a sequence of conv2d operations.

  Args:
    device: String, the device to run on.
    dtype: Data type for the convolution.
    data_format: A string from: "NHWC" or "NCHW". Data format for input and
                 output data.
    input_shape: Shape of the input tensor.
    filter_shape: Shape of the filter tensor.
    strides: A list of ints. 1-D of length 4. The stride of sliding
             window for each dimension of input.
    padding: A string from: "SAME", "VALID". The type of padding
             algorithm to use.
    num_iters: number of iterations to run conv2d.
    warmup_iters: number of iterations for warmup runs.

  Returns:
    An array of tensors to run()
  """
  with ops.device("/%s:0" % device):
    inp = variables.Variable(
        random_ops.truncated_normal(input_shape, dtype=dtype))
    filt = variables.Variable(
        random_ops.truncated_normal(filter_shape, dtype=dtype))

    outputs = []
    conv2d_op = nn_ops.conv2d(
        inp, filt, strides, padding, data_format=data_format)
    outputs.append(conv2d_op)
    for _ in range(1, num_iters):
      with ops.control_dependencies([conv2d_op]):
        conv2d_op = nn_ops.conv2d(
            inp, filt, strides, padding, data_format=data_format)
        outputs.append(conv2d_op)

    warmup_groups = []
    warmup_conv2d_op = nn_ops.conv2d(
        inp, filt, strides, padding, data_format=data_format)
    warmup_groups.append(warmup_conv2d_op)
    for _ in range(1, warmup_iters):
      with ops.control_dependencies([warmup_conv2d_op]):
        warmup_conv2d_op = nn_ops.conv2d(
            inp, filt, strides, padding, data_format=data_format)
        warmup_groups.append(warmup_conv2d_op)
    return control_flow_ops.group(*warmup_groups), control_flow_ops.group(
        *outputs)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:48,代码来源:conv2d_benchmark.py


示例20: restore

 def restore(self, restored_tensors, restored_shapes):
   if (self._cudnn_rnn.direction == CUDNN_RNN_UNIDIRECTION and
       self._cudnn_rnn.rnn_mode == CUDNN_LSTM):
     if len(restored_tensors) % 4 != 0:
       raise ValueError(
           "Invalid count of restored_tensors, expecting a multiple of 4.")
     weights = restored_tensors[:len(restored_tensors) // 4]
     biases = restored_tensors[len(restored_tensors) // 4:]
   elif (self._cudnn_rnn.direction == CUDNN_RNN_UNIDIRECTION and
         self._cudnn_rnn.rnn_mode == CUDNN_GRU):
     if len(restored_tensors) % 8 != 0:
       raise ValueError(
           "Invalid count of restored_tensors, expecting a multiple of 8.")
     weights = restored_tensors[:len(restored_tensors) // 8 * 3]
     biases = restored_tensors[len(restored_tensors) // 8 * 3:]
   else:
     weights = restored_tensors[:len(restored_tensors) // 2]
     biases = restored_tensors[len(restored_tensors) // 2:]
   weights, biases = self._untransform_canonical(weights, biases)
   params = self._canonical_to_params(weights, biases)
   if not isinstance(params, tuple):
     params = (params,)
   assign_ops = [
       state_ops.assign(variable, param, validate_shape=False)
       for variable, param in zip(self._variables, params)
   ]
   return control_flow_ops.group(*assign_ops)
开发者ID:Dr4KK,项目名称:tensorflow,代码行数:27,代码来源:cudnn_rnn_ops.py



注:本文中的tensorflow.python.ops.control_flow_ops.group函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python control_flow_ops.merge函数代码示例发布时间:2022-05-27
下一篇:
Python control_flow_ops.exit函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap