• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.round函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.round函数的典型用法代码示例。如果您正苦于以下问题:Python round函数的具体用法?Python round怎么用?Python round使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了round函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: compute_center_coords

    def compute_center_coords(self, y_true, y_pred):
        batch_size = tf.shape(y_pred)[0]
        h = tf.shape(y_pred)[1]
        w = tf.shape(y_pred)[2]
        n_chans = tf.shape(y_pred)[3]
        n_dims = 5

        # weighted center of mass
        x = tf.cast(tf.tile(tf.reshape(self.xs, [1, h, w]), [batch_size, 1, 1]), tf.float32)
        y = tf.cast(tf.tile(tf.reshape(self.ys, [1, h, w]), [batch_size, 1, 1]), tf.float32)

        eps = 1e-8
        # grayscale
        pred_gray = tf.reduce_mean(y_pred, axis=-1)  # should be batch_size x h x w
        # normalize
        pred_gray = pred_gray - tf.reduce_min(pred_gray, axis=[1, 2], keepdims=True)
        pred_gray = pred_gray / (eps + tf.reduce_max(pred_gray, axis=[1, 2], keepdims=True))
        pred_gray = tf.clip_by_value(pred_gray, 0., 1.)

        # make each of these (batch_size, 1)
        weighted_x = tf.round(tf.expand_dims(
            tf.reduce_sum(x * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        weighted_y = tf.round(tf.expand_dims(
            tf.reduce_sum(y * pred_gray, axis=[1, 2]) / (eps + tf.reduce_sum(pred_gray, axis=[1, 2])), axis=-1))
        batch_indices = tf.reshape(tf.linspace(0., tf.cast(batch_size, tf.float32) - 1., batch_size), [batch_size, 1])
        indices = tf.cast(tf.concat([batch_indices, weighted_y, weighted_x], axis=-1), tf.int32)
        #center_rgb = transform_network_utils.interpolate([y_true,  weighted_x, weighted_y], constant_vals=1.)
        center_rgb = tf.gather_nd(y_true, indices)
        center_rgb = tf.reshape(center_rgb, [batch_size, n_chans])

        center_point_xyrgb = tf.concat([
                        weighted_x, weighted_y, center_rgb
                    ], axis=-1)

        return pred_gray, center_point_xyrgb
开发者ID:xamyzhao,项目名称:evolving_wilds,代码行数:35,代码来源:metrics.py


示例2: _smallest_size_at_least

def _smallest_size_at_least(height, width, target_height, target_width):
    """Computes new shape with the smallest side equal to `smallest_side`.

    Computes new shape with the smallest side equal to `smallest_side` while
    preserving the original aspect ratio.

    Args:
      height: an int32 scalar tensor indicating the current height.
      width: an int32 scalar tensor indicating the current width.
      smallest_side: A python integer or scalar `Tensor` indicating the size of
        the smallest side after resize.

    Returns:
      new_height: an int32 scalar tensor indicating the new height.
      new_width: and int32 scalar tensor indicating the new width.
    """
    target_height = tf.convert_to_tensor(target_height, dtype=tf.int32)
    target_width = tf.convert_to_tensor(target_width, dtype=tf.int32)

    height = tf.to_float(height)
    width = tf.to_float(width)
    target_height = tf.to_float(target_height)
    target_width = tf.to_float(target_width)

    scale = tf.cond(tf.greater(target_height / height, target_width / width),
                    lambda: target_height / height,
                    lambda: target_width / width)
    new_height = tf.to_int32(tf.round(height * scale))
    new_width = tf.to_int32(tf.round(width * scale))
    return new_height, new_width
开发者ID:Lagogoy,项目名称:Deep-Learning-21-Examples,代码行数:30,代码来源:vgg_preprocessing.py


示例3: make_tf_top

def make_tf_top(x_shape, loss='sigmoid_ce'):
  """
    builds the top layer, i.e. the loss layer. 
  """
  with tf.name_scope('top') as scope:
    x = tf.placeholder(tf.float32, shape=x_shape, name='input')
    y = tf.placeholder(tf.float32, shape=x_shape, name='output')

    if loss=='sigmoid_ce':
      L = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(x, y))
      correct_prediction = tf.equal(tf.round( tf.sigmoid(x) ), tf.round( y ))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
      accuracy_summary = [tf.summary.scalar('accuracy', accuracy)]
    elif loss=='softmax_ce':
      L = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(x, y))
      correct_prediction = tf.equal(tf.argmax( tf.nn.softmax(x), 1 ), tf.argmax( y, 1 ))
      accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
      accuracy_summary = [tf.summary.scalar('accuracy', accuracy)]
    elif loss=='sigmoid_l2':
      L = tf.nn.l2_loss(tf.sigmoid(x) - y)
      accuracy = None
      accuracy_summary = []
    elif loss=='l2':
      L = tf.nn.l2_loss(x - y)
      accuracy = None
      accuracy_summary = []

    loss_summary = tf.summary.scalar('log_loss', tf.log(L))
    dx = tf.gradients(L, x)[0]

    return L, dx, tf.summary.merge([loss_summary] + accuracy_summary), accuracy
开发者ID:jsseely,项目名称:tensorflow-target-prop,代码行数:31,代码来源:tprop_train_stable.py


示例4: get_exemplar_images

def get_exemplar_images(images, exemplar_size, targets_pos=None):
  """Crop exemplar image from input images"""
  with tf.name_scope('get_exemplar_image'):
    batch_size, x_height, x_width = images.get_shape().as_list()[:3]
    z_height, z_width = exemplar_size

    if targets_pos is None:
      target_pos_single = [[get_center(x_height), get_center(x_width)]]
      targets_pos_ = tf.tile(target_pos_single, [batch_size, 1])
    else:
      targets_pos_ = targets_pos

    # convert to top-left corner based coordinates
    top = tf.to_int32(tf.round(targets_pos_[:, 0] - get_center(z_height)))
    bottom = tf.to_int32(top + z_height)
    left = tf.to_int32(tf.round(targets_pos_[:, 1] - get_center(z_width)))
    right = tf.to_int32(left + z_width)

    def _slice(x):
      f, t, l, b, r = x
      c = f[t:b, l:r]
      return c

    exemplar_img = tf.map_fn(_slice, (images, top, left, bottom, right), dtype=images.dtype)
    exemplar_img.set_shape([batch_size, z_height, z_width, 3])
    return exemplar_img
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:26,代码来源:infer_utils.py


示例5: blend_images

def blend_images(data_folder1, data_folder2, out_folder, alpha=.5):
    filename_queue = tf.placeholder(dtype=tf.string)
    label = tf.placeholder(dtype=tf.int32)
    tensor_image = tf.read_file(filename_queue)

    image = tf.image.decode_jpeg(tensor_image, channels=3)

    multiplier = tf.div(tf.constant(224, tf.float32),
                        tf.cast(tf.maximum(tf.shape(image)[0], tf.shape(image)[1]), tf.float32))
    x = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[0], tf.float32), multiplier)), tf.int32)
    y = tf.cast(tf.round(tf.mul(tf.cast(tf.shape(image)[1], tf.float32), multiplier)), tf.int32)
    image = tf.image.resize_images(image, [x, y])

    image = tf.image.rot90(image, k=label)

    image = tf.image.resize_image_with_crop_or_pad(image, 224, 224)
    sess = tf.Session()
    sess.run(tf.local_variables_initializer())
    for root, folders, files in os.walk(data_folder1):
        for each in files:
            if each.find('.jpg') >= 0:
                img1 = Image.open(os.path.join(root, each))
                img2_path = os.path.join(root.replace(data_folder1, data_folder2), each.split("-")[-1])
                rotation = int(each.split("-")[1])
                img2 = sess.run(image, feed_dict={filename_queue: img2_path, label: rotation})
                imsave(os.path.join(os.getcwd(), "temp", "temp.jpg"), img2)
                img2 = Image.open(os.path.join(os.getcwd(), "temp", "temp.jpg"))
                out_image = Image.blend(img1, img2, alpha)
                outfile = os.path.join(root.replace(data_folder1, out_folder), each)
                if not os.path.exists(os.path.split(outfile)[0]):
                    os.makedirs(os.path.split(outfile)[0])
                out_image.save(outfile)
            else:
                print(each)
    sess.close()
开发者ID:Sabrewarrior,项目名称:PhotoOrientation,代码行数:35,代码来源:misc.py


示例6: get_adv_loss

def get_adv_loss(logits_r, logits_f, labels_r, labels_f):
    dis_r_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_r, labels=tf.cast(labels_r, tf.float32)))
    dis_f_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_f, labels=tf.cast(labels_f, tf.float32)))

    dis_loss = dis_r_loss + dis_f_loss

    dis_r_acc = slim.metrics.accuracy(tf.cast(tf.round(tf.nn.sigmoid(logits_r)), tf.int32), tf.round(labels_r))
    dis_f_acc = slim.metrics.accuracy(tf.cast(tf.round(tf.nn.sigmoid(logits_f)), tf.int32), tf.round(labels_f))

    dis_acc = (dis_r_acc + dis_f_acc) / 2

    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_f, labels=tf.cast(labels_r, tf.float32)))

    return dis_loss, gen_loss, dis_acc
开发者ID:seindlut,项目名称:deep_p2s,代码行数:14,代码来源:build_subnet.py


示例7: sample_img

def sample_img(img, n_samples):
    sx = tf.random_uniform((n_samples,), 0, 1) * 27
    sy = tf.random_uniform((n_samples,), 0, 1) * 27
    sx_lower = tf.cast(tf.floor(sx), tf.int32)
    sx_upper = tf.cast(tf.ceil(sx), tf.int32)

    sy_lower = tf.cast(tf.floor(sy), tf.int32)
    sy_upper = tf.cast(tf.ceil(sy), tf.int32)

    sx_nearest = tf.cast(tf.round(sx), tf.int32)
    sy_nearest = tf.cast(tf.round(sy), tf.int32)
    inds = tf.pack([sx_nearest, sy_nearest])
    samples = tf.gather(tf.reshape(img, (-1,)), sx_nearest + sy_nearest*28)
    return sx/27, sy/27, samples
开发者ID:lukemetz,项目名称:cppn,代码行数:14,代码来源:model.py


示例8: __call__

 def __call__(self, img):
   scale = 1.0 + tf.random_uniform([], -self.max_stretch, self.max_stretch)
   img_shape = tf.shape(img)
   ts = tf.to_int32(tf.round(tf.to_float(img_shape[:2]) * scale))
   resize_method_map = {'bilinear': tf.image.ResizeMethod.BILINEAR,
                        'bicubic': tf.image.ResizeMethod.BICUBIC}
   return tf.image.resize_images(img, ts, method=resize_method_map[self.interpolation])
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:7,代码来源:transforms.py


示例9: get_scheduled_sample_inputs

  def get_scheduled_sample_inputs(
      self, done_warm_start, groundtruth_items, generated_items, batch_size):

    with tf.variable_scope("scheduled_sampling", reuse=tf.AUTO_REUSE):
      if self.hparams.mode != tf.estimator.ModeKeys.TRAIN:
        feedself = True
      else:
        # Scheduled sampling:
        # Calculate number of ground-truth frames to pass in.
        feedself = False
        iter_num = tf.train.get_global_step()
        # TODO(mbz): what should it be if it's undefined?
        if iter_num is None:
          iter_num = _LARGE_STEP_NUMBER
        k = self.hparams.scheduled_sampling_k
        num_ground_truth = tf.to_int32(
            tf.round(
                tf.to_float(batch_size) *
                (k / (k + tf.exp(tf.to_float(iter_num) / tf.to_float(k))))))

      if feedself and done_warm_start:
        # Feed in generated stuff.
        output_items = generated_items
      elif done_warm_start:
        output_items = []
        for item_gt, item_gen in zip(groundtruth_items, generated_items):
          # Scheduled sampling
          output_items.append(self.scheduled_sample(
              item_gt, item_gen, batch_size, num_ground_truth))
      else:
        # Feed in ground_truth
        output_items = groundtruth_items

      return output_items
开发者ID:kltony,项目名称:tensor2tensor,代码行数:34,代码来源:next_frame.py


示例10: __init__

    def __init__(self, args):
        with tf.device(args.device):
            def circle(x):
                spherenet = tf.square(x)
                spherenet = tf.reduce_sum(spherenet, 1)
                lam = tf.sqrt(spherenet)
                return x/tf.reshape(lam,[int(lam.get_shape()[0]), 1])

            def modes(x):
                shape = x.get_shape()
                return tf.round(x*2)/2.0#+tf.random_normal(shape, 0, 0.04)

            if args.distribution == 'circle':
                x = tf.random_normal([args.batch_size, 2])
                x = circle(x)
            elif args.distribution == 'modes':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                x = modes(x)
            elif args.distribution == 'modal-gaussian':
                x = tf.random_uniform([args.batch_size, 2], -1, 1)
                y = tf.random_normal([args.batch_size, 2], stddev=0.04, mean=0.15)
                x = tf.round(x) + y
            elif args.distribution == 'sin':
                x = tf.random_uniform((1, args.batch_size), -10.5, 10.5 )
                x = tf.transpose(x)
                r_data = tf.random_normal((args.batch_size,1), mean=0, stddev=0.1)
                xy = tf.sin(0.75*x)*7.0+x*0.5+r_data*1.0
                x = tf.concat([xy,x], 1)/16.0

            elif args.distribution == 'static-point':
                x = tf.ones([args.batch_size, 2])

            self.x = x
            self.xy = tf.zeros_like(self.x)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:34,代码来源:2d-distribution.py


示例11: toMnistCoordinates_tf

def toMnistCoordinates_tf(coordinate_tanhed, img_size):
    '''
    Transform coordinate in [-1,1] to mnist
    :param coordinate_tanhed: vector in [-1,1] x [-1,1]
    :return: vector in the corresponding mnist coordinate
    '''
    return tf.round(((coordinate_tanhed + 1) / 2.0) * img_size)
开发者ID:jlindsey15,项目名称:mathCognition_RAM,代码行数:7,代码来源:ram_touch.py


示例12: build_fcn_net

    def build_fcn_net(self, inp, use_dice=False):
        bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1', training=True)
        dnn1 = tf.layers.dense(
            bn1, 200, activation=None, kernel_initializer=get_tf_initializer(), name='f1')
        if use_dice:
            dnn1 = dice(dnn1, name='dice_1')
        else:
            dnn1 = prelu(dnn1)
        dnn2 = tf.layers.dense(
            dnn1, 80, activation=None, kernel_initializer=get_tf_initializer(), name='f2')
        if use_dice:
            dnn2 = dice(dnn2, name='dice_2')
        else:
            dnn2 = prelu(dnn2)
        dnn3 = tf.layers.dense(
            dnn2, 2, activation=None, kernel_initializer=get_tf_initializer(), name='f3')
        self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

        with tf.name_scope('Metrics'):
            ctr_loss = - \
                tf.reduce_mean(tf.log(self.y_hat) * self.tensors.target)
            self.loss = ctr_loss
            if self.use_negsampling:
                self.loss += self.aux_loss
            else:
                self.aux_loss = tf.constant(0.0)

            # Accuracy metric
            self.accuracy = tf.reduce_mean(
                tf.cast(tf.equal(tf.round(self.y_hat), self.tensors.target), tf.float32))
开发者ID:q64545,项目名称:x-deeplearning,代码行数:30,代码来源:model.py


示例13: build_fcn_net

    def build_fcn_net(self,inp,use_dice=False):
        bn1 = tf.layers.batch_normalization(inputs=inp,name='bn1')
        dnn1 = tf.layers.dense(bn1,200,activation=None,name='f1')

        if use_dice:
            dnn1 = dice(dnn1,name='dice_1')
        else:
            dnn1 = prelu(dnn1,'prelu1')

        dnn2 = tf.layers.dense(dnn1,80,activation=None,name='f2')
        if use_dice:
            dnn2 = dice(dnn2,name='dice_2')
        else:
            dnn2 = prelu(dnn2,name='prelu2')

        dnn3 = tf.layers.dense(dnn2,2,activation=None,name='f3')
        self.y_hat = tf.nn.softmax(dnn3) + 0.00000001

        with tf.name_scope('Metrics'):
            ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph)
            self.loss = ctr_loss
            if self.use_negsampling:
                self.loss += self.aux_loss
            tf.summary.scalar('loss',self.loss)
            self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)

            self.accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.round(self.y_hat),self.target_ph),tf.float32))
            tf.summary.scalar('accuracy',self.accuracy)

        self.merged = tf.summary.merge_all()
开发者ID:zeroToAll,项目名称:tensorflow_practice,代码行数:30,代码来源:model.py


示例14: almost_equal

def almost_equal(a, b):
    """
    :param a: tensor :param b: tensor
    :returns equivalent to numpy: a == b, if a and b were ndarrays
    """
    not_almost_equal = tf.abs(tf.sign(tf.round(a - b)))
    return tf.abs(not_almost_equal - 1)
开发者ID:lobachevzky,项目名称:movies,代码行数:7,代码来源:ops.py


示例15: calc_smape_rounded

def calc_smape_rounded(true, predicted, weights):
    """
    Calculates SMAPE on rounded submission values. Should be close to official SMAPE in competition
    :param true:
    :param predicted:
    :param weights: Weights mask to exclude some values
    :return:
    """
    n_valid = tf.reduce_sum(weights)
    true_o = tf.round(tf.expm1(true))
    pred_o = tf.maximum(tf.round(tf.expm1(predicted)), 0.0)
    summ = tf.abs(true_o) + tf.abs(pred_o)
    zeros = summ < 0.01
    raw_smape = tf.abs(pred_o - true_o) / summ * 2.0
    smape = tf.where(zeros, tf.zeros_like(summ, dtype=tf.float32), raw_smape)
    return tf.reduce_sum(smape * weights) / n_valid
开发者ID:JXieHao,项目名称:kaggle-web-traffic,代码行数:16,代码来源:model.py


示例16: _ProcessSingleScale

  def _ProcessSingleScale(scale_index,
                          boxes,
                          features,
                          scales,
                          scores,
                          reuse=True):
    """Resize the image and run feature extraction and keypoint selection.

       This function will be passed into tf.while_loop() and be called
       repeatedly. The input boxes are collected from the previous iteration
       [0: scale_index -1]. We get the current scale by
       image_scales[scale_index], and run image resizing, feature extraction and
       keypoint selection. Then we will get a new set of selected_boxes for
       current scale. In the end, we concat the previous boxes with current
       selected_boxes as the output.

    Args:
      scale_index: A valid index in the image_scales.
      boxes: Box tensor with the shape of [N, 4].
      features: Feature tensor with the shape of [N, depth].
      scales: Scale tensor with the shape of [N].
      scores: Attention score tensor with the shape of [N].
      reuse: Whether or not the layer and its variables should be reused.

    Returns:
      scale_index: The next scale index for processing.
      boxes: Concatenated box tensor with the shape of [K, 4]. K >= N.
      features: Concatenated feature tensor with the shape of [K, depth].
      scales: Concatenated scale tensor with the shape of [K].
      scores: Concatenated attention score tensor with the shape of [K].
    """
    scale = tf.gather(image_scales, scale_index)
    new_image_size = tf.to_int32(tf.round(original_image_shape_float * scale))
    resized_image = tf.image.resize_bilinear(image_tensor, new_image_size)

    attention, feature_map = model_fn(
        resized_image, normalized_image=True, reuse=reuse)

    rf_boxes = CalculateReceptiveBoxes(
        tf.shape(feature_map)[1],
        tf.shape(feature_map)[2], rf, stride, padding)
    # Re-project back to the original image space.
    rf_boxes = tf.divide(rf_boxes, scale)
    attention = tf.reshape(attention, [-1])
    feature_map = tf.reshape(feature_map, [-1, feature_depth])

    # Use attention score to select feature vectors.
    indices = tf.reshape(tf.where(attention >= abs_thres), [-1])
    selected_boxes = tf.gather(rf_boxes, indices)
    selected_features = tf.gather(feature_map, indices)
    selected_scores = tf.gather(attention, indices)
    selected_scales = tf.ones_like(selected_scores, tf.float32) / scale

    # Concat with the previous result from different scales.
    boxes = tf.concat([boxes, selected_boxes], 0)
    features = tf.concat([features, selected_features], 0)
    scales = tf.concat([scales, selected_scales], 0)
    scores = tf.concat([scores, selected_scores], 0)

    return scale_index + 1, boxes, features, scales, scores
开发者ID:812864539,项目名称:models,代码行数:60,代码来源:feature_extractor.py


示例17: build_graph

    def build_graph(self, nn_im_w, nn_im_h, num_colour_channels=3, weights=None, biases=None):
        num_outputs = 1 #ofc
        self.nn_im_w = nn_im_w
        self.nn_im_h = nn_im_h

        if weights is None:
            weights = [None, None, None, None, None]
        if biases is None:
            biases = [None, None, None, None, None]

        with tf.device('/cpu:0'):
            # Placeholder variables for the input image and output images
            self.x = tf.placeholder(tf.float32, shape=[None, nn_im_w*nn_im_h*3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, num_outputs])
            self.threshold = tf.placeholder(tf.float32)

            # Build the convolutional and pooling layers
            conv1_output_channels = 32
            conv2_output_channels = 16
            conv3_output_channels = 8

            conv_layer_1_input = tf.reshape(self.x, [-1, nn_im_h, nn_im_w, num_colour_channels]) #The resized input image
            self.build_conv_layer(conv_layer_1_input, num_colour_channels, conv1_output_channels, initial_weights=weights[0], initial_biases=biases[0]) # layer 1
            self.build_conv_layer(self.layers[0][0], conv1_output_channels, conv2_output_channels, initial_weights=weights[1], initial_biases=biases[1])# layer 2
            self.build_conv_layer(self.layers[1][0], conv2_output_channels, conv3_output_channels, initial_weights=weights[2], initial_biases=biases[2])# layer 3

            # Build the fully connected layer
            convnet_output_w = nn_im_w//8
            convnet_output_h = nn_im_h//8

            fully_connected_layer_input = tf.reshape(self.layers[2][0], [-1, convnet_output_w * convnet_output_h * conv3_output_channels])
            self.build_fully_connected_layer(fully_connected_layer_input, convnet_output_w, convnet_output_h, conv3_output_channels, initial_weights=weights[3], initial_biases=biases[3])

            # The dropout stage and readout layer
            self.keep_prob, self.h_drop = self.dropout(self.layers[3][0])
            self.y_conv,_,_ = self.build_readout_layer(self.h_drop, num_outputs, initial_weights=weights[4], initial_biases=biases[4])

            self.mean_error =  tf.sqrt(tf.reduce_mean(tf.square(self.y_ - self.y_conv)))
            self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.mean_error)

            self.accuracy = (1.0 - tf.reduce_mean(tf.abs(self.y_ - tf.round(self.y_conv))))


            positive_examples = tf.greater_equal(self.y_, 0.5)
            negative_examples = tf.logical_not(positive_examples)
            positive_classifications = tf.greater_equal(self.y_conv, self.threshold)
            negative_classifications = tf.logical_not(positive_classifications)

            self.true_positive = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, positive_classifications),tf.int32)) # count the examples that are positive and classified as positive
            self.false_positive = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, positive_classifications),tf.int32)) # count the examples that are negative but classified as positive

            self.true_negative = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, negative_classifications),tf.int32)) # count the examples that are negative and classified as negative
            self.false_negative = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, negative_classifications),tf.int32)) # count the examples that are positive but classified as negative

            self.positive_count = tf.reduce_sum(tf.cast(positive_examples, tf.int32)) # count the examples that are positive
            self.negative_count = tf.reduce_sum(tf.cast(negative_examples, tf.int32)) # count the examples that are negative

            self.confusion_matrix = tf.reshape(tf.pack([self.true_positive, self.false_positive, self.false_negative, self.true_negative]), [2,2])

        self.sess.run(tf.initialize_all_variables())
开发者ID:JTKBowers,项目名称:CNN-people-detect,代码行数:60,代码来源:Model.py


示例18: add_evaluation_step

def add_evaluation_step(result_tensor, ground_truth_tensor):
  """Inserts the operations we need to evaluate the accuracy of our results.

  Args:
    result_tensor: The new final node that produces results.
    ground_truth_tensor: The node we feed ground truth data
    into.

  Returns:
    Nothing.
  """
  with tf.name_scope('accuracy'):
    with tf.name_scope('correct_prediction'):
      # tf.argmax(result_tensor, 1) = return index of maximal value (= 1 in a 1-of-N encoding vector) in each row (axis = 1)
      # But we have more ones (indicating multiple labels) in one row of result_tensor due to the multi-label classification
      # correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \
      #   tf.argmax(ground_truth_tensor, 1))

      # ground_truth is not a binary tensor, it contains the probabilities of each label = we need to tf.round() it
      # to acquire a binary tensor allowing comparison by tf.equal()
      # See: http://stackoverflow.com/questions/39219414/in-tensorflow-how-can-i-get-nonzero-values-and-their-indices-from-a-tensor-with

      correct_prediction = tf.equal(tf.round(result_tensor), ground_truth_tensor)
    with tf.name_scope('accuracy'):
      # Mean accuracy over all labels:
      # http://stackoverflow.com/questions/37746670/tensorflow-multi-label-accuracy-calculation
      evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.scalar_summary('accuracy', evaluation_step)
  return evaluation_step
开发者ID:samhains,项目名称:Multi-label-Inception-net,代码行数:29,代码来源:retrain.py


示例19: get_sampled_frame

  def get_sampled_frame(self, pred_frame):
    """Samples the frame based on modality.

      if the modality is L2/L1 then the next predicted frame is the
      next frame and there is no sampling but in case of Softmax loss
      the next actual frame should be sampled from predicted frame.

      This enables multi-frame target prediction with Softmax loss.

    Args:
      pred_frame: predicted frame.

    Returns:
      sampled frame.

    """
    # TODO(lukaszkaiser): the logic below heavily depend on the current
    # (a bit strange) video modalities - we should change that.

    if self.is_per_pixel_softmax:
      frame_shape = common_layers.shape_list(pred_frame)
      target_shape = frame_shape[:-1] + [self.hparams.problem.num_channels]
      sampled_frame = tf.reshape(pred_frame, target_shape + [256])
      sampled_frame = pixels_from_softmax(
          sampled_frame, temperature=self.hparams.pixel_sampling_temperature)
      # TODO(lukaszkaiser): this should be consistent with modality.bottom()
      sampled_frame = common_layers.standardize_images(sampled_frame)
    else:
      x = common_layers.convert_real_to_rgb(pred_frame)
      x = x - tf.stop_gradient(x + tf.round(x))
      x = common_layers.convert_rgb_to_real(x)
      return x
    return sampled_frame
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:33,代码来源:base.py


示例20: testEvaluatePerfectModel

  def testEvaluatePerfectModel(self):
    checkpoint_dir = os.path.join(self.get_temp_dir(),
                                  'evaluate_perfect_model_repeated')

    # Train a Model to completion:
    self._train_model(checkpoint_dir, num_steps=300)

    # Run
    inputs = tf.constant(self._inputs, dtype=tf.float32)
    labels = tf.constant(self._labels, dtype=tf.float32)
    logits = logistic_classifier(inputs)
    predictions = tf.round(logits)

    accuracy, update_op = tf.contrib.metrics.streaming_accuracy(
        predictions, labels)

    final_values = tf.contrib.training.evaluate_repeatedly(
        checkpoint_dir=checkpoint_dir,
        eval_ops=update_op,
        final_ops={'accuracy': accuracy},
        hooks=[
            tf.contrib.training.StopAfterNEvalsHook(1),
        ],
        max_number_of_evaluations=1)
    self.assertTrue(final_values['accuracy'] > .99)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:25,代码来源:evaluation_test.py



注:本文中的tensorflow.round函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.rsqrt函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.reverse_sequence函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap