• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.as_dtype函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.as_dtype函数的典型用法代码示例。如果您正苦于以下问题:Python as_dtype函数的具体用法?Python as_dtype怎么用?Python as_dtype使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了as_dtype函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _setup_training

    def _setup_training(self):
        """Sets up graph, model and trainer."""
        self._graph = tf.Graph()
        with self._graph.as_default():
            tf.set_random_seed(self.tf_random_seed)
            self._global_step = tf.Variable(0, name="global_step", trainable=False)

            # Setting up input and output placeholders.
            input_shape = [None] + self._data_feeder.input_shape[1:]
            output_shape = [None] + self._data_feeder.output_shape[1:]
            self._inp = tf.placeholder(
                tf.as_dtype(self._data_feeder.input_dtype), input_shape,
                name="input")
            self._out = tf.placeholder(
                tf.as_dtype(self._data_feeder.output_dtype), output_shape,
                name="output")

            # Create model's graph.
            self._model_predictions, self._model_loss = self.model_fn(self._inp, self._out)

            # Create trainer and augment graph with gradients and optimizer.
            self._trainer = TensorFlowTrainer(self._model_loss,
                self._global_step, self.optimizer, self.learning_rate)
            self._session = tf.Session(self.tf_master,
             config=tf.ConfigProto(log_device_placement=self.log_device_placement))
开发者ID:priyamuurali,项目名称:skflow,代码行数:25,代码来源:__init__.py


示例2: _setup_training

    def _setup_training(self):
        """Sets up graph, model and trainer."""
        self._graph = tf.Graph()
        self._graph.add_to_collection("IS_TRAINING", True)
        with self._graph.as_default():
            tf.set_random_seed(self.tf_random_seed)
            self._global_step = tf.Variable(
                0, name="global_step", trainable=False)

            # Setting up input and output placeholders.
            input_shape = [None] + self._data_feeder.input_shape[1:]
            output_shape = [None] + self._data_feeder.output_shape[1:]
            self._inp = tf.placeholder(
                tf.as_dtype(self._data_feeder.input_dtype), input_shape,
                name="input")
            self._out = tf.placeholder(
                tf.as_dtype(self._data_feeder.output_dtype), output_shape,
                name="output")

            # If class weights are provided, add them to the graph.
            # Different loss functions can use this tensor by name.
            if self.class_weight:
                self._class_weight_node = tf.constant(
                    self.class_weight, name='class_weight')

            # Add histograms for X and y if they are floats.
            if self._data_feeder.input_dtype in (np.float32, np.float64):
                tf.histogram_summary("X", self._inp)
            if self._data_feeder.output_dtype in (np.float32, np.float64):
                tf.histogram_summary("y", self._out)

            # Create model's graph.
            self._model_predictions, self._model_loss = self.model_fn(
                self._inp, self._out)

            # Create summary to monitor loss
            tf.scalar_summary("loss", self._model_loss)

            # Set up a single operator to merge all the summaries
            self._summaries = tf.merge_all_summaries()

            # Create trainer and augment graph with gradients and optimizer.
            # Additionally creates initialization ops.
            self._trainer = TensorFlowTrainer(
                loss=self._model_loss, global_step=self._global_step,
                optimizer=self.optimizer, learning_rate=self.learning_rate)

            # Create model's saver capturing all the nodes created up until now.
            self._saver = tf.train.Saver(
                max_to_keep=self.max_to_keep,
                keep_checkpoint_every_n_hours=self.keep_checkpoint_every_n_hours)

            # Enable monitor to create validation data dict with appropriate tf placeholders
            self._monitor.create_val_feed_dict(self._inp, self._out)

            # Create session to run model with.
            if self.config_addon is None:
                self.config_addon = ConfigAddon(verbose=self.verbose)
            self._session = tf.Session(self.tf_master, config=self.config_addon.config)
开发者ID:6779660,项目名称:tensorflow,代码行数:59,代码来源:base.py


示例3: _setup_training

    def _setup_training(self):
        """Sets up graph, model and trainer."""
        self._graph = tf.Graph()
        self._graph.add_to_collection("IS_TRAINING", True)
        with self._graph.as_default():
            tf.set_random_seed(self.tf_random_seed)
            self._global_step = tf.Variable(
                0, name="global_step", trainable=False)

            # Setting up input and output placeholders.
            input_shape = [None] + self._data_feeder.input_shape[1:]
            output_shape = [None] + self._data_feeder.output_shape[1:]
            self._inp = tf.placeholder(
                tf.as_dtype(self._data_feeder.input_dtype), input_shape,
                name="input")
            self._out = tf.placeholder(
                tf.as_dtype(self._data_feeder.output_dtype), output_shape,
                name="output")

            # Add histograms for X and y if they are floats.
            if self._data_feeder.input_dtype in (np.float32, np.float64):
                tf.histogram_summary("X", self._inp)
            if self._data_feeder.output_dtype in (np.float32, np.float64):
                tf.histogram_summary("y", self._out)

            # Create model's graph.
            # pdb.set_trace()
            self._model_predictions, self._model_loss = self.model_fn(
                self._inp, self._out)

            # Create summary to monitor loss
            tf.scalar_summary("loss", self._model_loss)

            # Set up a single operator to merge all the summaries
            self._summaries = tf.merge_all_summaries()

            with tf.name_scope("test") as scope:
                if self.n_classes > 0:
                    correct_prediction = tf.equal(tf.argmax(self._out,1), tf.argmax(self._model_predictions,1))
                    self._cv_accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
                    self._cv_accuracy_summary = tf.scalar_summary("cv accuracy", self._cv_accuracy)

            # Create trainer and augment graph with gradients and optimizer.
            # Additionally creates initialization ops.
            self._trainer = TensorFlowTrainer(
                loss=self._model_loss, global_step=self._global_step,
                optimizer=self.optimizer, learning_rate=self.learning_rate)

            # Create model's saver capturing all the nodes created up until now.
            self._saver = tf.train.Saver(
                max_to_keep=self.max_to_keep,
                keep_checkpoint_every_n_hours=self.keep_checkpoint_every_n_hours)

            # Create session to run model with.
            self._session = tf.Session(self.tf_master,
                                       config=tf.ConfigProto(
                                           log_device_placement=self.verbose > 1,
                                           inter_op_parallelism_threads=self.num_cores,
                                           intra_op_parallelism_threads=self.num_cores))
开发者ID:Phybbit,项目名称:skflow,代码行数:59,代码来源:base.py


示例4: testAllTypesConvertibleToNumpyDtype

 def testAllTypesConvertibleToNumpyDtype(self):
     for datatype_enum in types_pb2.DataType.values():
         if not _is_numeric_dtype_enum(datatype_enum):
             continue
         dtype = tf.as_dtype(datatype_enum)
         numpy_dtype = dtype.as_numpy_dtype
         _ = np.empty((1, 1, 1, 1), dtype=numpy_dtype)
         if dtype.base_dtype != tf.bfloat16:
             # NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
             self.assertEqual(tf.as_dtype(datatype_enum).base_dtype, tf.as_dtype(numpy_dtype))
开发者ID:paolodedios,项目名称:tensorflow,代码行数:10,代码来源:dtypes_test.py


示例5: testIsInteger

 def testIsInteger(self):
   self.assertEqual(tf.as_dtype("int8").is_integer, True)
   self.assertEqual(tf.as_dtype("int16").is_integer, True)
   self.assertEqual(tf.as_dtype("int32").is_integer, True)
   self.assertEqual(tf.as_dtype("int64").is_integer, True)
   self.assertEqual(tf.as_dtype("uint8").is_integer, True)
   self.assertEqual(tf.as_dtype("complex64").is_integer, False)
   self.assertEqual(tf.as_dtype("float").is_integer, False)
   self.assertEqual(tf.as_dtype("double").is_integer, False)
   self.assertEqual(tf.as_dtype("string").is_integer, False)
   self.assertEqual(tf.as_dtype("bool").is_integer, False)
开发者ID:DapengLan,项目名称:tensorflow,代码行数:11,代码来源:dtypes_test.py


示例6: testIsFloating

 def testIsFloating(self):
   self.assertEqual(tf.as_dtype("int8").is_floating, False)
   self.assertEqual(tf.as_dtype("int16").is_floating, False)
   self.assertEqual(tf.as_dtype("int32").is_floating, False)
   self.assertEqual(tf.as_dtype("int64").is_floating, False)
   self.assertEqual(tf.as_dtype("uint8").is_floating, False)
   self.assertEqual(tf.as_dtype("complex64").is_floating, False)
   self.assertEqual(tf.as_dtype("float32").is_floating, True)
   self.assertEqual(tf.as_dtype("float64").is_floating, True)
   self.assertEqual(tf.as_dtype("string").is_floating, False)
   self.assertEqual(tf.as_dtype("bool").is_floating, False)
开发者ID:DapengLan,项目名称:tensorflow,代码行数:11,代码来源:dtypes_test.py


示例7: testIsUnsigned

 def testIsUnsigned(self):
   self.assertEqual(tf.as_dtype("int8").is_unsigned, False)
   self.assertEqual(tf.as_dtype("int16").is_unsigned, False)
   self.assertEqual(tf.as_dtype("int32").is_unsigned, False)
   self.assertEqual(tf.as_dtype("int64").is_unsigned, False)
   self.assertEqual(tf.as_dtype("uint8").is_unsigned, True)
   self.assertEqual(tf.as_dtype("float32").is_unsigned, False)
   self.assertEqual(tf.as_dtype("float64").is_unsigned, False)
   self.assertEqual(tf.as_dtype("bool").is_unsigned, False)
   self.assertEqual(tf.as_dtype("string").is_unsigned, False)
   self.assertEqual(tf.as_dtype("complex64").is_unsigned, False)
开发者ID:DapengLan,项目名称:tensorflow,代码行数:11,代码来源:dtypes_test.py


示例8: fprop

 def fprop(self, x, **kwargs):
     del kwargs
     with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE):
         w1 = tf.constant([[1.5, .3], [-2, 0.3]],
                          dtype=tf.as_dtype(x.dtype))
         w2 = tf.constant([[-2.4, 1.2], [0.5, -2.3]],
                          dtype=tf.as_dtype(x.dtype))
     h1 = tf.nn.sigmoid(tf.matmul(x, w1))
     res = tf.matmul(h1, w2)
     return {self.O_LOGITS: res,
             self.O_PROBS: tf.nn.softmax(res)}
开发者ID:limin24kobe,项目名称:cleverhans,代码行数:11,代码来源:test_attacks.py


示例9: __init__

    def __init__(self, band_count, images, labels, dtype=tf.float32):
        """
        Construct a DataSet.
        `dtype` can be either `uint8` to leave the input as `[0, 255]`,
        or `float32` to rescale into `[0, 1]`.
        """
        dtype = tf.as_dtype(dtype).base_dtype
        if dtype not in (tf.uint8, tf.float32):
            raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype)
        assert images.shape[0] == labels.shape[0], (
                            'images.shape: %s labels.shape: %s' % (images.shape, labels.shape))
        self._num_examples = images.shape[0]

        # Convert shape from [num examples, rows, columns, depth]
        # to [num examples, rows*columns] (assuming depth == 1)
        assert images.shape[3] == band_count

        # Store the width and height of the images before flattening it, if only for reference.
        image_height, image_width = images.shape[1], images.shape[2]
        self.original_image_width = image_width
        self.original_image_height = image_height

        images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]*images.shape[3])
        if dtype == tf.float32:
            # Convert from [0, 255] -> [0.0, 1.0]
            images = images.astype(numpy.float32)
            images = numpy.multiply(images, 1.0 / 255.0)
        self._images = images
        self._labels = labels
        self._epochs_completed = 0
        self._index_in_epoch = 0
开发者ID:giserh,项目名称:DeepOSM,代码行数:31,代码来源:DataSet.py


示例10: random_sign_uniform

def random_sign_uniform(
    shape, minval=None, maxval=None, dtype=tf.float32, seed=None):
  """Tensor with (possibly complex) random entries from a "sign Uniform".

  Letting `Z` be a random variable equal to `-1` and `1` with equal probability,
  Samples from this `Op` are distributed like

  ```
  Z * X, where X ~ Uniform[minval, maxval], if dtype is real,
  Z * (X + iY),  where X, Y ~ Uniform[minval, maxval], if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    minval:  `0-D` `Tensor` giving the minimum values.
    maxval:  `0-D` `Tensor` giving the maximum values.
    dtype:  `TensorFlow` `dtype` or Python dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  dtype = tf.as_dtype(dtype)

  with tf.name_scope("random_sign_uniform"):
    unsigned_samples = random_uniform(
        shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
    if seed is not None:
      seed += 12
    signs = tf.sign(tf.random_uniform(shape, minval=-1., maxval=1., seed=seed))
    return unsigned_samples * tf.cast(signs, unsigned_samples.dtype)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:31,代码来源:linear_operator_test_util.py


示例11: random_normal

def random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None):
  """Tensor with (possibly complex) Gaussian entries.

  Samples are distributed like

  ```
  N(mean, stddev^2), if dtype is real,
  X + iY,  where X, Y ~ N(mean, stddev^2) if dtype is complex.
  ```

  Args:
    shape:  `TensorShape` or Python list.  Shape of the returned tensor.
    mean:  `Tensor` giving mean of normal to sample from.
    stddev:  `Tensor` giving stdev of normal to sample from.
    dtype:  `TensorFlow` `dtype` or numpy dtype
    seed:  Python integer seed for the RNG.

  Returns:
    `Tensor` with desired shape and dtype.
  """
  dtype = tf.as_dtype(dtype)

  with tf.name_scope("random_normal"):
    samples = tf.random_normal(
        shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
    if dtype.is_complex:
      if seed is not None:
        seed += 1234
      more_samples = tf.random_normal(
          shape, mean=mean, stddev=stddev, dtype=dtype.real_dtype, seed=seed)
      samples = tf.complex(samples, more_samples)
    return samples
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:32,代码来源:linear_operator_test_util.py


示例12: __init__

    def __init__(self, instances, labels, xlength, ylength, imagedata, 
                 patch_image_width, dtype = tf.float32):

        dtype = tf.as_dtype(dtype).base_dtype
        if dtype not in (tf.uint8, tf.float32):
          raise TypeError('Invalid image dtype %r, expected uint8 or float32'
                            % dtype)
        assert instances.shape[0] == labels.shape[0], (
          'instances.shape: %s labels.shape: %s' % (instances.shape,
                                                    labels.shape))
        self._num_examples = instances.shape[0]

        if dtype == tf.float32:
          for i in range(len(imagedata)):
            imagedata[i] = imagedata[i].astype(np.float32)
            imagedata[i] = np.multiply(imagedata[i], 1.0 / 255.0)

        self._instances        = instances
        self._labels           = labels
        self._epochs_completed = 0
        self._index_in_epoch   = 0
        self._xlength          = xlength
        self._ylength          = ylength
        self._imagedata        = imagedata
        self._patch_image_width= patch_image_width
开发者ID:liuyifly06,项目名称:bubblecount,代码行数:25,代码来源:dataset.py


示例13: __init__

  def __init__(self, expression_profile, labels, feature_name, label_name, dtype=tf.float32):
    """Construct a DataSet.
    `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.
    """
    dtype = tf.as_dtype(dtype).base_dtype
    if dtype not in (tf.uint8, tf.float32):
      raise TypeError('Invalid Dataset input dtype %r, expected uint8 or float32' %
                      dtype)
    assert expression_profile.shape[0] == labels.shape[0], (
      'expression_profile.shape: %s labels.shape: %s' % (expression_profile.shape,
                                             labels.shape))
    self._num_examples = expression_profile.shape[0]
    # Convert shape from [num examples, rows, columns, depth]
    # to [num examples, rows*columns] (assuming depth == 1)
    assert expression_profile.shape[1] == 77
    expression_profile = expression_profile.reshape(expression_profile.shape[0], expression_profile.shape[1])

    self._expression_profile = expression_profile
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
    self._feature_name = feature_name
    self._label_name = label_name
开发者ID:shkr,项目名称:tensorflow_examples,代码行数:25,代码来源:input_protein_data.py


示例14: _ProcessHealthPill

  def _ProcessHealthPill(self, wall_time, step, device_name, node_name,
                         output_slot, elements):
    """Processes a health pill value by adding it to accumulated state.

    Args:
      wall_time: The time at which the health pill was created. Provided by the
        debugger.
      step: The step at which the health pill was created. Provided by the
        debugger.
      device_name: The name of the node's device.
      node_name: The name of the node for this health pill.
      output_slot: The output slot for this health pill.
      elements: An ND array of 20 floats. The elements of the health pill.
    """
    # Key by the node name for fast retrieval of health pills by node name. The
    # array is cast to a list so that it is JSON-able. The debugger data plugin
    # serves a JSON response.
    self._health_pills.AddItem(node_name,
                               HealthPillEvent(
                                   wall_time=wall_time,
                                   step=step,
                                   device_name=device_name,
                                   node_name=node_name,
                                   output_slot=output_slot,
                                   dtype=repr(tf.as_dtype(elements[12])),
                                   shape=list(elements[14:]),
                                   value=list(elements)))
开发者ID:adityaatluri,项目名称:tensorflow,代码行数:27,代码来源:event_accumulator.py


示例15: __init__

  def __init__(self, images, labels, fake_data=False, one_hot=False,
               dtype=tf.float32):
    """Construct a DataSet.

    one_hot arg is used only if fake_data is true.  `dtype` can be either
    `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
    `[0, 1]`.
    """
    dtype = tf.as_dtype(dtype).base_dtype
    if dtype not in (tf.uint8, tf.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)
    if fake_data:
      self._num_examples = 10000
      self.one_hot = one_hot
    else:
      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape,
                                                 labels.shape))
      self._num_examples = images.shape[0]

    self._images = images
    self._labels = labels
    self._epochs_completed = 0
    self._index_in_epoch = 0
开发者ID:j-pong,项目名称:tensorflow_test,代码行数:25,代码来源:BEDLDC_input_distance.py


示例16: __init__

 def __init__(self, images, labels, fake_data=False, one_hot=False,
              dtype=tf.float32):
   """Construct a DataSet.
   one_hot arg is used only if fake_data is true.  `dtype` can be either
   `uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
   `[0, 1]`.
   """
   dtype = tf.as_dtype(dtype).base_dtype
   if dtype not in (tf.uint8, tf.float32):
     raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                     dtype)
   if fake_data:
     self._num_examples = 10000
     self.one_hot = one_hot
   else:
     assert images.shape[0] == labels.shape[0], (
         'images.shape: %s labels.shape: %s' % (images.shape,
                                                labels.shape))
     self._num_examples = images.shape[0]
     # Convert shape from [num examples, rows, columns, depth]
     # to [num examples, rows*columns] (assuming depth == 1)
     assert images.shape[3] == 1
     images = images.reshape(images.shape[0],
                             images.shape[1] * images.shape[2])
     if dtype == tf.float32:
       # Convert from [0, 255] -> [0.0, 1.0].
       images = images.astype(numpy.float32)
       images = numpy.multiply(images, 1.0 / 255.0)
   self._images = images
   self._labels = labels
   self._epochs_completed = 0
   self._index_in_epoch = 0
开发者ID:TextMiningCapstone,项目名称:Library,代码行数:32,代码来源:input_data.py


示例17: __init__

  def __init__(self, images, labels, preprocess="scale", da=False,
               dtype=tf.float32):

    dtype = tf.as_dtype(dtype).base_dtype
    if dtype not in (tf.uint8, tf.float32):
      raise TypeError('Invalid image dtype %r, expected uint8 or float32' %
                      dtype)

      assert images.shape[0] == labels.shape[0], (
          'images.shape: %s labels.shape: %s' % (images.shape,
                                                 labels.shape))
      
    self._num_examples = images.shape[0]


    if dtype == tf.float32:
        # Convert from [0, 255] -> [0.0, 1.0].
        images = images.astype(np.float32)
        
        if preprocess=="IMAGENET":
            
            VGG_MEAN=np.array([123.68, 116.779, 103.99])
            
            images[:,:,:,0]-= VGG_MEAN[0]
            images[:,:,:,1]-= VGG_MEAN[1]
            images[:,:,:,2]-= VGG_MEAN[2]
            
        elif preprocess=="scale":            
            images = np.multiply(images, 1.0 / 255.0)
        
    self._images = images
    self._labels = labels
    self._DA=da
    self._epochs_completed = 0
    self._index_in_epoch = 0
开发者ID:jeandut,项目名称:tensorflow-models,代码行数:35,代码来源:dataset_class.py


示例18: _compute_health_pill

  def _compute_health_pill(self, x):
    x_clean = x[np.where(
        np.logical_and(
            np.logical_not(np.isnan(x)), np.logical_not(np.isinf(x))))]
    if np.size(x_clean):
      x_min = np.min(x_clean)
      x_max = np.max(x_clean)
      x_mean = np.mean(x_clean)
      x_var = np.var(x_clean)
    else:
      x_min = np.inf
      x_max = -np.inf
      x_mean = np.nan
      x_var = np.nan

    return np.array([
        1.0,  # Assume is initialized.
        np.size(x),
        np.sum(np.isnan(x)),
        np.sum(x == -np.inf),
        np.sum(np.logical_and(x < 0.0, x != -np.inf)),
        np.sum(x == 0.0),
        np.sum(np.logical_and(x > 0.0, x != np.inf)),
        np.sum(x == np.inf),
        x_min,
        x_max,
        x_mean,
        x_var,
        float(tf.as_dtype(x.dtype).as_datatype_enum),
        float(len(x.shape)),
    ] + list(x.shape))
开发者ID:jlewi,项目名称:tensorboard,代码行数:31,代码来源:session_debug_test.py


示例19: _build_graph

 def _build_graph(self, tf_graph, scope, model_dir):
   """Construct a TensorGraph containing the policy and loss calculations."""
   state_shape = self._env.state_shape
   state_dtype = self._env.state_dtype
   if not self._state_is_list:
     state_shape = [state_shape]
     state_dtype = [state_dtype]
   features = []
   for s, d in zip(state_shape, state_dtype):
     features.append(Feature(shape=[None] + list(s), dtype=tf.as_dtype(d)))
   policy_layers = self._policy.create_layers(features)
   action_prob = policy_layers['action_prob']
   value = policy_layers['value']
   search_prob = Label(shape=(None, self._env.n_actions))
   search_value = Label(shape=(None,))
   loss = MCTSLoss(
       self.value_weight,
       in_layers=[action_prob, value, search_prob, search_value])
   graph = TensorGraph(
       batch_size=self.max_search_depth,
       use_queue=False,
       graph=tf_graph,
       model_dir=model_dir)
   for f in features:
     graph._add_layer(f)
   graph.add_output(action_prob)
   graph.add_output(value)
   graph.set_loss(loss)
   graph.set_optimizer(self._optimizer)
   with graph._get_tf("Graph").as_default():
     with tf.variable_scope(scope):
       graph.build()
   if len(graph.rnn_initial_states) > 0:
     raise ValueError('MCTS does not support policies with recurrent layers')
   return graph, features, action_prob, value, search_prob, search_value
开发者ID:AhlamMD,项目名称:deepchem,代码行数:35,代码来源:mcts.py


示例20: testDtypeErrors

  def testDtypeErrors(self):
    def _TryMakingScalarSummary(dtype):
      base = dtype.base_dtype
      if base == tf.bool:
        v = False
      elif base == tf.string:
        v = ''
      elif base.is_complex:
        v = complex(0, 0)
      else:
        v = base.min
      c = tf.constant(v, dtype)
      return tf.summary.scalar('name', c)

    for datatype_enum in types_pb2.DataType.values():
      if datatype_enum == types_pb2.DT_INVALID:
        continue
      dtype = tf.as_dtype(datatype_enum)
      if dtype.is_quantized:
        # Quantized ops are funky, and not expected to work.
        continue
      if dtype.is_integer or dtype.is_floating:
        _TryMakingScalarSummary(dtype)
        # No exception should be thrown
      else:
        with self.assertRaises(ValueError):
          _TryMakingScalarSummary(dtype)
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:27,代码来源:summary_test.py



注:本文中的tensorflow.as_dtype函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.assert_equal函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.argmin函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap