• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.to_int64函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.to_int64函数的典型用法代码示例。如果您正苦于以下问题:Python to_int64函数的具体用法?Python to_int64怎么用?Python to_int64使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了to_int64函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: unpool_layer2x2

    def unpool_layer2x2(self, x, raveled_argmax, out_shape):
        argmax = self.unravel_argmax(raveled_argmax, tf.to_int64(out_shape))
        output = tf.zeros([out_shape[1], out_shape[2], out_shape[3]])

        height = tf.shape(output)[0]
        width = tf.shape(output)[1]
        channels = tf.shape(output)[2]

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [((width + 1) // 2) * ((height + 1) // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, (height + 1) // 2, (width + 1) // 2, 1])

        t2 = tf.squeeze(argmax)
        t2 = tf.pack((t2[0], t2[1]), axis=0)
        t2 = tf.transpose(t2, perm=[3, 1, 2, 0])

        t = tf.concat(3, [t2, t1])
        indices = tf.reshape(t, [((height + 1) // 2) * ((width + 1) // 2) * channels, 3])

        x1 = tf.squeeze(x)
        x1 = tf.reshape(x1, [-1, channels])
        x1 = tf.transpose(x1, perm=[1, 0])
        values = tf.reshape(x1, [-1])

        delta = tf.SparseTensor(indices, values, tf.to_int64(tf.shape(output)))
        return tf.expand_dims(tf.sparse_tensor_to_dense(tf.sparse_reorder(delta)), 0)
开发者ID:BenJamesbabala,项目名称:Tensorflow-DeconvNet-Segmentation,代码行数:28,代码来源:DeconvNet.py


示例2: unpool_layer2x2_batch

    def unpool_layer2x2_batch(self, bottom, argmax):
        bottom_shape = tf.shape(bottom)
        top_shape = [bottom_shape[0], bottom_shape[1] * 2, bottom_shape[2] * 2, bottom_shape[3]]

        batch_size = top_shape[0]
        height = top_shape[1]
        width = top_shape[2]
        channels = top_shape[3]

        argmax_shape = tf.to_int64([batch_size, height, width, channels])
        argmax = self.unravel_argmax(argmax, argmax_shape)

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [batch_size * (width // 2) * (height // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, batch_size, height // 2, width // 2, 1])
        t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])

        t2 = tf.to_int64(tf.range(batch_size))
        t2 = tf.tile(t2, [channels * (width // 2) * (height // 2)])
        t2 = tf.reshape(t2, [-1, batch_size])
        t2 = tf.transpose(t2, perm=[1, 0])
        t2 = tf.reshape(t2, [batch_size, channels, height // 2, width // 2, 1])

        t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])

        t = tf.concat(4, [t2, t3, t1])
        indices = tf.reshape(t, [(height // 2) * (width // 2) * channels * batch_size, 4])

        x1 = tf.transpose(bottom, perm=[0, 3, 1, 2])
        values = tf.reshape(x1, [-1])
        return tf.scatter_nd(indices, values, tf.to_int64(top_shape))
开发者ID:BenJamesbabala,项目名称:Tensorflow-DeconvNet-Segmentation,代码行数:33,代码来源:DeconvNetPipeline.py


示例3: one_hot_matrix

def one_hot_matrix(tensor_in, num_classes, on_value=1.0, off_value=0.0):
    """Encodes indices from given tensor as one-hot tensor.

    TODO(ilblackdragon): Ideally implementation should be
    part of TensorFlow with Eigen-native operation.

    Args:
        tensor_in: Input tensor of shape [N1, N2].
        num_classes: Number of classes to expand index into.
        on_value: Tensor or float, value to fill-in given index.
        off_value: Tensor or float, value to fill-in everything else.
    Returns:
        Tensor of shape [N1, N2, num_classes] with 1.0 for each id in original
        tensor.
    """
    tensor_in = tf.convert_to_tensor(tensor_in)
    sparse_values = tf.to_int64(tf.reshape(tensor_in, [-1, 1]))
    size = tf.shape(sparse_values)[0]
    dims = tf.shape(tensor_in)
    indices = tf.to_int64(tf.reshape(tf.range(0, size), [-1, 1]))
    indices_values = tf.concat(1, [indices, sparse_values])
    outshape = tf.to_int64(expand_concat(0, [size, num_classes]))
    one_hot_vector = tf.sparse_to_dense(indices_values, outshape, on_value, off_value)
    ret = tf.reshape(one_hot_vector, tf.concat(0, [dims, [num_classes]]))
    ret.set_shape(tensor_in.get_shape().concatenate(num_classes))
    return ret
开发者ID:twinklestar93,项目名称:skflow,代码行数:26,代码来源:array_ops.py


示例4: _build_once

  def _build_once(self, dataset, feature_transformer):
    with tf.device(self._local_device):
      tr_batch = dataset()
      te_batch = dataset()
      num_classes = tr_batch.label_onehot.shape.as_list()[1]
      all_batch = utils.structure_map_multi(lambda x: tf.concat(x, 0),
                                            [tr_batch, te_batch])
      features = feature_transformer(all_batch)
      trX, teX = utils.structure_map_split(lambda x: tf.split(x, 2, axis=0),
                                           features)
      trY = tf.to_int64(tr_batch.label)
      trY_onehot = tf.to_int32(tr_batch.label_onehot)
      teY = tf.to_int64(te_batch.label)
      teY_shape = teY.shape.as_list()

      def blackbox((trX, trY, teX, teY)):
        trY = tf.to_int32(tf.rint(trY))
        teY = tf.to_int32(tf.rint(teY))
        tf_fn = build_fit(
            self._local_device,
            self._get_model,
            num_classes=num_classes,
            probs=self.probs)
        if self.probs:
          trP, teP, teP_probs = tf_fn(trX, trY, teX)
        else:
          trP, teP = tf_fn(trX, trY, teX)

        teY.set_shape(teY_shape)
        if self.probs:
          onehot = tf.one_hot(teY, num_classes)
          crossent = -tf.reduce_sum(onehot * teP_probs, [1])
          return tf.reduce_mean(crossent)
        else:
          # use error rate as the loss if no surrogate is avalible.
          return 1 - tf.reduce_mean(
              tf.to_float(tf.equal(teY, tf.to_int32(teP))))

      test_loss = blackbox((trX, tf.to_float(trY), teX, tf.to_float(teY)))

      stats = {}

      tf_fn = build_fit(
          self._local_device,
          self._get_model,
          num_classes=num_classes,
          probs=self.probs)
      if self.probs:
        trP, teP, teP_probs = tf_fn(trX, trY, teX)
      else:
        trP, teP = tf_fn(trX, trY, teX)
      stats["%s/accuracy_train" % self.name] = tf.reduce_mean(
          tf.to_float(tf.equal(tf.to_int32(trY), tf.to_int32(trP))))
      stats["%s/accuracy_test" % self.name] = tf.reduce_mean(
          tf.to_float(tf.equal(tf.to_int32(teY), tf.to_int32(teP))))
      stats["%s/test_loss" % self.name] = test_loss
      return test_loss, stats
开发者ID:ALISCIFP,项目名称:models,代码行数:57,代码来源:sklearn.py


示例5: preprocess_example

 def preprocess_example(self, example, mode, unused_hparams):
   # Just resize with area.
   if self._was_reversed:
     example["inputs"] = tf.to_int64(
         tf.image.resize_images(example["inputs"], [32, 32],
                                tf.image.ResizeMethod.AREA))
   else:
     example = imagenet_preprocess_example(example, mode)
     example["inputs"] = tf.to_int64(
         tf.image.resize_images(example["inputs"], [32, 32]))
   return example
开发者ID:zeyu-h,项目名称:tensor2tensor,代码行数:11,代码来源:image.py


示例6: build

    def build(self):
        print('Building model')
        self.x_embeddings = tf.Variable(
            tf.random_normal([self.alphabet_src_size, self.embedd_dims],
            stddev=0.1), name='x_embeddings')
        self.t_embeddings = tf.Variable(
            tf.random_normal([self.alphabet_tar_size, self.embedd_dims],
            stddev=0.1), name='t_embeddings')

        X_embedded = tf.gather(self.x_embeddings, self.Xs, name='embed_X')
        t_embedded = tf.gather(self.t_embeddings, self.ts_go, name='embed_t')

        with tf.variable_scope('dense_out'):
            W_out = tf.get_variable('W_out', [self.word_encoder_units*2, self.alphabet_tar_size])
            b_out = tf.get_variable('b_out', [self.alphabet_tar_size])

        # forward encoding
        char_enc_state, char_enc_out = encoder(X_embedded, self.X_len, 'char_encoder', self.char_encoder_units)
        char2word = _grid_gather(char_enc_out, self.X_spaces)
        char2word.set_shape([None, None, self.char_encoder_units])
        word_enc_state, word_enc_out = encoder(char2word, self.X_spaces_len, 'word_encoder', self.word_encoder_units)

        # backward encoding words
        char2word = tf.reverse_sequence(char2word, tf.to_int64(self.X_spaces_len), 1)
        char2word.set_shape([None, None, self.char_encoder_units])
        word_enc_state_bck, word_enc_out_bck = encoder(char2word, self.X_spaces_len, 'word_encoder_backwards', self.word_encoder_units)
        word_enc_out_bck = tf.reverse_sequence(word_enc_out_bck, tf.to_int64(self.X_spaces_len), 1)

        word_enc_state = tf.concat(1, [word_enc_state, word_enc_state_bck])
        word_enc_out = tf.concat(2, [word_enc_out, word_enc_out_bck])

        # decoding
        dec_state, dec_out, valid_dec_out, valid_attention_tracker = (
            attention_decoder(word_enc_out, self.X_spaces_len, word_enc_state,
                              t_embedded, self.t_len, self.attn_units,
                              self.t_embeddings, W_out, b_out))

        out_tensor = tf.reshape(dec_out, [-1, self.word_encoder_units*2])
        out_tensor = tf.matmul(out_tensor, W_out) + b_out
        out_shape = tf.concat(0, [tf.expand_dims(tf.shape(self.X_len)[0], 0),
                                  tf.expand_dims(tf.shape(t_embedded)[1], 0),
                                  tf.expand_dims(tf.constant(self.alphabet_tar_size), 0)])
        self.valid_attention_tracker = valid_attention_tracker.pack()
        self.out_tensor = tf.reshape(out_tensor, out_shape)
        self.out_tensor.set_shape([None, None, self.alphabet_tar_size])

        valid_out_tensor = tf.reshape(valid_dec_out, [-1, self.word_encoder_units*2])
        valid_out_tensor = tf.matmul(valid_out_tensor, W_out) + b_out
        self.valid_out_tensor = tf.reshape(valid_out_tensor, out_shape)

        self.out = None

        # add TensorBoard summaries for all variables
        tf.contrib.layers.summarize_variables()
开发者ID:Styrke,项目名称:master-code,代码行数:54,代码来源:default.py


示例7: preprocess_example

 def preprocess_example(self, example, mode, _):
   # Just resize with area.
   if self._was_reversed:
     example["inputs"] = tf.to_int64(
         tf.image.resize_images(example["inputs"], self.rescale_size,
                                tf.image.ResizeMethod.AREA))
   else:
     example = imagenet_preprocess_example(example, mode)
     example["inputs"] = tf.to_int64(
         tf.image.resize_images(example["inputs"], self.rescale_size))
   return example
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:11,代码来源:imagenet.py


示例8: model_single

def model_single(input_dims, output_dims, scale_frac, scales, nkNN):
    """
    Forms the knn model.

    Arguments:
    input_dim -- the dimension of the input data
    output_dim -- the number of classes
    scale_frac -- the fraction of events to use for finding widths
    scales -- list of distribution widths for each dimension
    nkNN -- the number of nearest neighbours to find

    Returns:
    A tensor with the number of neighbours in each class.
    """
    training = tf.placeholder(tf.float32, shape=(None, input_dims))
    one_hot = tf.placeholder(tf.float32, shape=(None, output_dims))
    test = tf.placeholder(tf.float32, shape=(1, input_dims))
    distances = metric_single(training, test, scale_frac, scales)

    remaining_training = tf.identity(training)
    remaining_one_hot = tf.identity(one_hot)
    remaining_distances = tf.identity(distances)

    for i in range(nkNN):
        # Gets the location of training entry currently closest to the test
        # entry.
        min_slice = tf.to_int64(tf.concat(0, [tf.argmin(remaining_distances, 0), [-1]]))

        # Cuts the nearest neighbour out of the training set.
        start = tf.slice(remaining_training, tf.to_int64([0, 0]), min_slice)
        end = tf.slice(remaining_training, min_slice + [1, 1], [-1, -1])
        remaining_training = tf.concat(0, [start, end])
        # Cuts the nearest neighbour out of the distances set.
        start = tf.slice(remaining_distances, tf.to_int64([0, 0]), min_slice)
        end = tf.slice(remaining_distances, min_slice + [1, 1], [-1, -1])
        remaining_distances = tf.concat(0, [start, end])

        # Cuts the nearest neighbour's class and records it.
        start = tf.slice(remaining_one_hot, tf.to_int64([0, 0]), min_slice)
        end = tf.slice(remaining_one_hot, min_slice + [1, 1], [-1, -1])
        class_slice = tf.slice(remaining_one_hot, min_slice + [0, 1], [1, -1])
        remaining_one_hot = tf.concat(0, [start, end])
        if i == 0:
            neighbour_one_hot = class_slice
        else:
            neighbour_one_hot = tf.concat(0, [neighbour_one_hot, class_slice])

    return training, one_hot, test, tf.reduce_sum(neighbour_one_hot, reduction_indices=0)
开发者ID:AidanGG,项目名称:tensorflow_tmva,代码行数:48,代码来源:knn.py


示例9: tensors_to_item

  def tensors_to_item(self, keys_to_tensors):
    """Maps the given dictionary of tensors to a concatenated list of bboxes.

    Args:
      keys_to_tensors: a mapping of TF-Example keys to parsed tensors.

    Returns:
      [time, num_boxes, 4] tensor of bounding box coordinates, in order
          [y_min, x_min, y_max, x_max]. Whether the tensor is a SparseTensor
          or a dense Tensor is determined by the return_dense parameter. Empty
          positions in the sparse tensor are filled with -1.0 values.
    """
    sides = []
    for key in self._full_keys:
      value = keys_to_tensors[key]
      expanded_dims = tf.concat(
          [tf.to_int64(tf.shape(value)),
           tf.constant([1], dtype=tf.int64)], 0)
      side = tf.sparse_reshape(value, expanded_dims)
      sides.append(side)
    bounding_boxes = tf.sparse_concat(2, sides)
    if self._return_dense:
      bounding_boxes = tf.sparse_tensor_to_dense(
          bounding_boxes, default_value=self._default_value)
    return bounding_boxes
开发者ID:Exscotticus,项目名称:models,代码行数:25,代码来源:tf_sequence_example_decoder.py


示例10: preprocess_example

 def preprocess_example(self, example, mode, unused_hparams):
   example["inputs"].set_shape([_CIFAR10_IMAGE_SIZE, _CIFAR10_IMAGE_SIZE, 3])
   example["inputs"] = tf.to_int64(example["inputs"])
   if mode == tf.estimator.ModeKeys.TRAIN:
     example["inputs"] = image_utils.random_shift(
         example["inputs"], wsr=0.1, hsr=0.1)
   return example
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:7,代码来源:cifar.py


示例11: mnist_training

def mnist_training(logits, labels, learning_rate):
    """Build the training graph.

    Args:
        logits: Logits tensor, float - [BATCH_SIZE, NUM_CLASSES].
        labels: Labels tensor, int32 - [BATCH_SIZE], with values in the
          range [0, NUM_CLASSES).
        learning_rate: The learning rate to use for gradient descent.
    Returns:
        train_op: The Op for training.
        loss: The Op for calculating loss.
    """
    # Create an operation that calculates loss.
    labels = tf.to_int64(labels)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits, labels, name='xentropy')
    loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    # Create the gradient descent optimizer with the given learning rate.
    optimizer = tf.train.GradientDescentOptimizer(learning_rate)
    # Create a variable to track the global step.
    global_step = tf.Variable(0, name='global_step', trainable=False)
    # Use the optimizer to apply the gradients that minimize the loss
    # (and also increment the global step counter) as a single training step.
    train_op = optimizer.minimize(loss, global_step=global_step)

    # Uncomment the following line to see what we have constructed.
    # tf.train.write_graph(tf.get_default_graph().as_graph_def(),
    #                      "/tmp", "train.pbtxt", as_text=True)

    return train_op, loss
开发者ID:ccortezb,项目名称:pipeline,代码行数:30,代码来源:mnist_hidden.py


示例12: _compute_sparse_average_correct

def _compute_sparse_average_correct(input_, labels, per_example_weights, topk=1):
    """Returns the numerator and denominator of classifier accuracy."""
    labels = tf.to_int64(labels)
    labels.get_shape().assert_is_compatible_with([input_.get_shape()[0], None])
    if topk == 1:
        predictions = tf.reshape(tf.argmax(input_, 1), [-1, 1])
        in_topk = tf.reduce_any(tf.equal(labels, predictions), reduction_indices=[1])
    else:
        # Use broadcasting to check if ANY of the predictions are in the top k.
        # TODO(eiderman): For a multi-label top k, what does accuracy mean?
        predictions = tf.reshape(tf.nn.top_k(input_, topk)[1], [-1, 1, topk])
        labels = tf.expand_dims(labels, [-1])

        in_topk = tf.reduce_any(tf.equal(tf.cast(labels, predictions.dtype), predictions), reduction_indices=[1, 2])
    correct_predictions = tf.to_float(in_topk)

    # If individual examples are weighted, then we want to normalize by that.
    if per_example_weights is not None:
        per_example_weights = _convert_and_assert_per_example_weights_compatible(
            input_, per_example_weights, dtype=None
        )
        float_weights = tf.to_float(per_example_weights)
        # TODO(eiderman): This should use an op that doesn't support broadcasting.
        correct_predictions *= float_weights
        num_examples = tf.reduce_sum(float_weights)
    else:
        # shape only holds ints, but we want to always return the same type
        # for num_examples to make everything compatible.
        num_examples = tf.to_float(tf.gather(tf.shape(input_), 0))
    return tf.reduce_sum(correct_predictions), num_examples
开发者ID:google,项目名称:prettytensor,代码行数:30,代码来源:pretty_tensor_loss_methods.py


示例13: main

def main(args):
    # load the dataset
    dataset = mnist.get_split('test', FLAGS.data_dir)

    # load batch
    images, labels = load_batch(
        dataset,
        FLAGS.batch_size,
        is_training=False)

    # get the model prediction
    predictions = lenet(images)

    # convert prediction values for each class into single class prediction
    predictions = tf.to_int64(tf.argmax(predictions, 1))

    # streaming metrics to evaluate
    metrics_to_values, metrics_to_updates = metrics.aggregate_metric_map({
        'mse': metrics.streaming_mean_squared_error(predictions, labels),
        'accuracy': metrics.streaming_accuracy(predictions, labels),
    })

    # write the metrics as summaries
    for metric_name, metric_value in metrics_to_values.iteritems():
        tf.summary.scalar(metric_name, metric_value)

    # evaluate on the model saved at the checkpoint directory
    # evaluate every eval_interval_secs
    slim.evaluation.evaluation_loop(
        '',
        FLAGS.checkpoint_dir,
        FLAGS.log_dir,
        num_evals=FLAGS.num_evals,
        eval_op=metrics_to_updates.values(),
        eval_interval_secs=FLAGS.eval_interval_secs)
开发者ID:maplewzx,项目名称:tf-slim-mnist,代码行数:35,代码来源:mnist_eval.py


示例14: generate_single_output

def generate_single_output(encoder_state, attention_states, sequence_length, 
                           targets, num_classes, buckets, 
                           use_mean_attention=False,
                           softmax_loss_function=None, per_example_loss=False, 
                           name=None, use_attention=False):
  all_inputs = targets
  with tf.name_scope(name, "model_with_buckets", all_inputs):
    with tf.variable_scope(tf.get_variable_scope(),
                                       reuse=None):
      single_outputs = attention_single_output_decoder(encoder_state, 
                                                      attention_states, 
                                                      output_size=num_classes,
                                                      num_heads=1,
                                                      sequence_length=sequence_length,
                                                      use_attention=use_attention)
      _, _, _, bucket_outputs = single_outputs
        
      if softmax_loss_function is None:
        assert len(bucket_outputs) == len(targets) == 1
        # We need to make target and int64-tensor and set its shape.
        bucket_target = tf.reshape(tf.to_int64(targets[0]), [-1])
        crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=bucket_outputs[0], labels=bucket_target)
      else:
        assert len(bucket_outputs) == len(targets) == 1
        crossent = softmax_loss_function(bucket_outputs[0], targets[0])
       
      batch_size = tf.shape(targets[0])[0]
      loss = tf.reduce_sum(crossent) / tf.cast(batch_size, tf.float32)

  return bucket_outputs, loss
开发者ID:HadoopIt,项目名称:rnn-nlu,代码行数:31,代码来源:seq_classification.py


示例15: make_multiscale_dilated

def make_multiscale_dilated(image, resolutions, num_channels=3):
  """Returns list of scaled images, one for each resolution.

  Resizes by skipping every nth pixel.

  Args:
    image: Tensor of shape [height, height, num_channels].
    resolutions: List of heights that image's height is resized to. The function
      assumes VALID padding, so the original image's height must be divisible
      by each resolution's height to return the exact resolution size.
    num_channels: Number of channels in image.

  Returns:
    List of Tensors, one for each resolution with shape given by
    [resolutions[i], resolutions[i], num_channels] if resolutions properly
    divide the original image's height; otherwise shape height and width is up
    to valid skips.
  """
  image_height = common_layers.shape_list(image)[0]
  scaled_images = []
  for height in resolutions:
    dilation_rate = image_height // height  # assuming height = width
    scaled_image = image[::dilation_rate, ::dilation_rate]
    scaled_image = tf.to_int64(scaled_image)
    scaled_image.set_shape([None, None, num_channels])
    scaled_images.append(scaled_image)
  return scaled_images
开发者ID:kltony,项目名称:tensor2tensor,代码行数:27,代码来源:image_utils.py


示例16: _create_predictions

  def _create_predictions(self, decoder_output, features, labels, losses=None):
    """Creates the dictionary of predictions that is returned by the model.
    """
    predictions = {}

    # Add features and, if available, labels to predictions
    predictions.update(_flatten_dict({"features": features}))
    if labels is not None:
      predictions.update(_flatten_dict({"labels": labels}))

    if losses is not None:
      predictions["losses"] = _transpose_batch_time(losses)

    # Decoders returns output in time-major form [T, B, ...]
    # Here we transpose everything back to batch-major for the user
    output_dict = collections.OrderedDict(
        zip(decoder_output._fields, decoder_output))
    decoder_output_flat = _flatten_dict(output_dict)
    
    decoder_output_flat = {
        k: _transpose_batch_time(v)
        for k, v in decoder_output_flat.items()
    }
    predictions.update(decoder_output_flat)

    # If we predict the ids also map them back into the vocab and process them
    if "predicted_ids" in predictions.keys():
      vocab_tables = graph_utils.get_dict_from_collection("vocab_tables")
      target_id_to_vocab = vocab_tables["target_id_to_vocab"]
      predicted_tokens = target_id_to_vocab.lookup(
          tf.to_int64(predictions["predicted_ids"]))
      # Raw predicted tokens
      predictions["predicted_tokens"] = predicted_tokens

    return predictions
开发者ID:clren,项目名称:conv_seq2seq,代码行数:35,代码来源:seq2seq_model.py


示例17: ndlstm_base_dynamic

def ndlstm_base_dynamic(inputs, noutput, scope=None, reverse=False):
  """Run an LSTM, either forward or backward.

  This is a 1D LSTM implementation using dynamic_rnn and
  the TensorFlow LSTM op.

  Args:
    inputs: input sequence (length, batch_size, ninput)
    noutput: depth of output
    scope: optional scope name
    reverse: run LSTM in reverse

  Returns:
    Output sequence (length, batch_size, noutput)
  """
  with tf.variable_scope(scope, "SeqLstm", [inputs]):
    # TODO(tmb) make batch size, sequence_length dynamic
    # example: sequence_length = tf.shape(inputs)[0]
    _, batch_size, _ = _shape(inputs)
    lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(noutput, state_is_tuple=False)
    state = tf.zeros([batch_size, lstm_cell.state_size])
    sequence_length = int(inputs.get_shape()[0])
    sequence_lengths = tf.to_int64(tf.fill([batch_size], sequence_length))
    if reverse:
      inputs = tf.reverse(inputs, [True, False, False])
    outputs, _ = tf.nn.dynamic_rnn(lstm_cell,
                                   inputs,
                                   sequence_lengths,
                                   state,
                                   time_major=True)
    if reverse:
      outputs = tf.reverse(outputs, [True, False, False])
    return outputs
开发者ID:brchiu,项目名称:tensorflow,代码行数:33,代码来源:lstm1d.py


示例18: generate_top_k_scores_and_ids

def generate_top_k_scores_and_ids(logits, top_k):
  """This function computes top K ids and scores from logits tensor.

  Args:
    logits: logit tensor computed in the serving graph.
    top_k: number of top K elements to rank.

  Returns:
    predictions: scores of top K items.
    output_alternatives: ids of the top K items.
  """

  probabilities = tf.nn.softmax(
      logits, name=tf.contrib.learn.PredictionKey.PROBABILITIES)
  top_k_scores, top_k_ids = tf.nn.top_k(
      input=probabilities, k=top_k)
  top_k_ids = tf.contrib.lookup.index_to_string(
      tf.to_int64(top_k_ids),
      mapping=tf.constant([str(i) for i in xrange(MOVIE_VOCAB_SIZE)]))
  predictions = {
      # served as "scores" by Servo in the ClassificationResult
      tf.contrib.learn.PredictionKey.PROBABILITIES:
          top_k_scores,
      # served as "classes" by Servo in the ClassificationResult
      tf.contrib.learn.PredictionKey.CLASSES:
          top_k_ids
  }
  output_alternatives = {DEFAULT_OUTPUT_ALTERNATIVE: (
      tf.contrib.learn.ProblemType.CLASSIFICATION,
      predictions)}
  return predictions, output_alternatives
开发者ID:cottrell,项目名称:notebooks,代码行数:31,代码来源:task.py


示例19: resize_video_frames

def resize_video_frames(images, size):
  resized_images = []
  for image in images:
    resized_images.append(
        tf.to_int64(tf.image.resize_images(
            image, [size, size], tf.image.ResizeMethod.BILINEAR)))
  return resized_images
开发者ID:kltony,项目名称:tensor2tensor,代码行数:7,代码来源:video_utils.py


示例20: key_func

 def key_func(src, tgt):
     src_len = src[-1]
     tgt_len = tgt[-1]
     num_buckets = 4
     bucket_width = 5
     bucket_id = tf.maximum(src_len // bucket_width, tgt_len // bucket_width)
     return tf.to_int64(tf.minimum(num_buckets, bucket_id))
开发者ID:piBrain,项目名称:aura-ml,代码行数:7,代码来源:driver.py



注:本文中的tensorflow.to_int64函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.trainable_variables函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.to_int32函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap