• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.scalar_mul函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.scalar_mul函数的典型用法代码示例。如果您正苦于以下问题:Python scalar_mul函数的具体用法?Python scalar_mul怎么用?Python scalar_mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了scalar_mul函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: create_cost

def create_cost(c, w, b, nb, length, weight_spacing = 1.0, weight_bending = 1.0, gamma = 1.0, kappa = 2.0):
  #tangents
  t  = create_tangent(c);
  tn = create_normalize_tangent(t);
  nl = create_normal(tn);
  nr = tf.scalar_mul(-1.0, nl);
  
  l,r = create_left_right(c,w,nl);

  cost_left = create_cost_soft_min_aligned_distance(l, b, nl, nb, k = kappa, gamma = gamma);
  cost_right= create_cost_soft_min_aligned_distance(r, b, nr, nb, k = kappa, gamma = gamma);
  cost = tf.add(cost_left, cost_right);
  
  #spacing and bending
  if weight_spacing != 0:
    cost_spacing = tf.scalar_mul(weight_spacing, create_cost_spacing(t, length));
    cost = tf.add(cost, cost_spacing);
  else:
    cost_spacing = tf.constant(0);
  
  if weight_bending != 0:
    cost_bending = tf.scalar_mul(weight_bending, create_cost_bending(tn));
    cost = tf.add(cost, cost_bending);
  else:
    cost_bending = tf.constant(0);
  
  return (cost, cost_left, cost_right, cost_spacing, cost_bending, nl, l, r);
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:27,代码来源:machine_vision_c.py


示例2: build_graph

    def build_graph(self,test_decoder_logits):
        print('starting building graph [sentiment-discriminator]')
        with tf.variable_scope("sentiment") as scope:
            self.inputs = tf.slice(test_decoder_logits,[0,0,0],[self.batch_size,self.max_length,self.vocab_size])
            # variable
            weights = {
                'w2v' : tf.get_variable(initializer = tf.random_uniform_initializer(-0.1, 0.1, dtype=tf.float32),shape = [self.vocab_size, self.embedding_dim], name='w2v'),
                'out_1' : tf.get_variable(initializer = tf.random_normal_initializer(), shape = [self.unit_size*2, 1], name='w_out_1'),
            }
            biases = {
            'out_1' : tf.get_variable(initializer = tf.random_normal_initializer(), shape=[1], name='b_out_1'),
            }
            # structure
            def BiRNN(x):
                x = tf.unstack(x, self.max_length, 1)
                lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.unit_size, forget_bias=1.0)
                lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.unit_size,forget_bias=1.0)
                outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype = tf.float32 )
                return outputs[-1]

            self.inputs_softmax = tf.nn.softmax(tf.scalar_mul(tf.constant(5.0, shape=[]),self.inputs))
            y_list=[]
            for i in range(self.inputs.get_shape().as_list()[0]):
                y = tf.matmul(self.inputs_softmax[i], weights['w2v'])
                y = tf.reshape(y, [1, self.max_length, self.embedding_dim])
                y_list.append(y)
            embbed_layer = tf.concat(y_list,0)
            layer_1 = BiRNN(embbed_layer)
            pred = tf.matmul(layer_1, weights['out_1']) + biases['out_1'] 
            # get score
            self.score = tf.sigmoid(pred)
开发者ID:HTY886,项目名称:PPGN,代码行数:31,代码来源:discrim.py


示例3: predict_slim

def predict_slim(sample_images, print_func=print):
    """
    Code modified from here: [https://github.com/tensorflow/models/issues/429]
    """
    # Setup preprocessing
    input_tensor = tf.placeholder(tf.float32, shape=(None, 299, 299, 3), name='input_image')
    scaled_input_tensor = tf.scalar_mul((1.0 / 255), input_tensor)
    scaled_input_tensor = tf.subtract(scaled_input_tensor, 0.5)
    scaled_input_tensor = tf.multiply(scaled_input_tensor, 2.0)

    # Setup session
    sess = tf.Session()
    arg_scope = slim_irv2.inception_resnet_v2_arg_scope()
    with slim.arg_scope(arg_scope):
        _, end_points = slim_irv2.inception_resnet_v2(scaled_input_tensor, is_training=False)

    # Load the model
    print_func("Loading TF-slim checkpoint...")
    saver = tf.train.Saver()
    saver.restore(sess, SLIM_CKPT)

    # Make prediction
    predict_values = []
    for image in sample_images:
        im = Image.open(image).resize((299, 299))
        arr = np.expand_dims(np.array(im), axis=0)
        y_pred = sess.run([end_points['Predictions']], feed_dict={input_tensor: arr})
        y_pred = y_pred[0].ravel()

        y_pred = y_pred[1:] / y_pred[1:].sum()  # remove background class and renormalize
        print_func("{} class={} prob={}".format(image, np.argmax(y_pred), np.max(y_pred)))
        predict_values.append(y_pred)

    return predict_values
开发者ID:StevenLOL,项目名称:keras-inception-resnet-v2,代码行数:34,代码来源:test_inception_resnet_v2.py


示例4: thresholding

def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:29,代码来源:bpm_estimator.py


示例5: focal_loss

  def focal_loss(onehot_labels, cls_preds,
                 alpha=0.25, gamma=2.0, name=None, scope=None):
    """Compute softmax focal loss between logits and onehot labels
    logits and onehot_labels must have same shape [batchsize, num_classes] and
    the same data type (float16, 32, 64)
    Args:
      onehot_labels: Each row labels[i] must be a valid probability distribution
      cls_preds: Unscaled log probabilities
      alpha: The hyperparameter for adjusting biased samples, default is 0.25
      gamma: The hyperparameter for penalizing the easy labeled samples
      name: A name for the operation (optional)
    Returns:
      A 1-D tensor of length batch_size of same type as logits with softmax focal loss
    """
    with tf.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc:
      logits = tf.convert_to_tensor(cls_preds)
      onehot_labels = tf.convert_to_tensor(onehot_labels)

      precise_logits = tf.cast(logits, tf.float32) if (
        logits.dtype == tf.float16) else logits
      onehot_labels = tf.cast(onehot_labels, precise_logits.dtype)
      predictions = tf.nn.sigmoid(logits)
      predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1. - predictions)
      # add small value to avoid 0
      epsilon = 1e-8
      alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
      alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1 - alpha_t)
      losses = tf.reduce_sum(
        -alpha_t * tf.pow(1. - predictions_pt, gamma) * onehot_labels * tf.log(predictions_pt + epsilon),
        name=name, axis=1)
      return losses
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:31,代码来源:network.py


示例6: clip_norm

def clip_norm(g, c, n):
    if c <= 0:  # if clipnorm == 0 no need to add ops to the graph
        return g

    # tf require using a special op to multiply IndexedSliced by scalar
    if K.backend() == 'tensorflow':
        condition = n >= c
        then_expression = tf.scalar_mul(c / n, g)
        else_expression = g

        # saving the shape to avoid converting sparse tensor to dense
        if isinstance(then_expression, tf.Tensor):
            g_shape = copy.copy(then_expression.get_shape())
        elif isinstance(then_expression, tf.IndexedSlices):
            g_shape = copy.copy(then_expression.dense_shape)
        if condition.dtype != tf.bool:
            condition = tf.cast(condition, 'bool')
        g = tf.cond(condition,
                    lambda: then_expression,
                    lambda: else_expression)
        if isinstance(then_expression, tf.Tensor):
            g.set_shape(g_shape)
        elif isinstance(then_expression, tf.IndexedSlices):
            g._dense_shape = g_shape
    else:
        g = K.switch(K.greater_equal(n, c), g * c / n, g)
    return g
开发者ID:cbentes,项目名称:keras,代码行数:27,代码来源:optimizers.py


示例7: test_decoder_loop

 def test_decoder_loop(prev,i):
     factor = tf.constant(5,shape=(),dtype=tf.float32)
     prev = tf.scalar_mul(factor,tf.add(tf.matmul(prev,weight_output),bias_output))
     prev_index = tf.nn.softmax(prev) 
     pred_prev = tf.matmul(prev_index,word_embedding_matrix)
     next_input = pred_prev
     return next_input
开发者ID:HTY886,项目名称:PPGN,代码行数:7,代码来源:decoder.py


示例8: init_training_graph

    def init_training_graph(self):

        with tf.name_scope('Evaluation'):
            logits = self.last
            prob_b = tf.squeeze(logits, squeeze_dims=[1,2])
            self.predictions = tf.argmax(prob_b, axis=1)
            
            with tf.name_scope('Loss'):
                
                self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prob_b,
                                                                          labels=tf.cast(self.train_labels_node, tf.int32),
                                                                          name="entropy")))
                tf.summary.scalar("entropy", self.loss)

            with tf.name_scope('Accuracy'):

                LabelInt = tf.cast(self.train_labels_node, tf.int64)
                CorrectPrediction = tf.equal(self.predictions, LabelInt)
                self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
                tf.summary.scalar("accuracy", self.accuracy)

            with tf.name_scope('Prediction'):

                self.TP = tf.count_nonzero(self.predictions * LabelInt)
                self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
                self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
                self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)

            with tf.name_scope('Precision'):

                self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
                tf.summary.scalar('Precision', self.precision)

            with tf.name_scope('Recall'):

                self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
                tf.summary.scalar('Recall', self.recall)

            with tf.name_scope('F1'):

                num = tf.multiply(self.precision, self.recall)
                dem = tf.add(self.precision, self.recall)
                self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
                tf.summary.scalar('F1', self.F1)

            with tf.name_scope('MeanAccuracy'):
                
                Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
                self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)

            #self.batch = tf.Variable(0, name = "batch_iterator")

            self.train_prediction = tf.nn.softmax(logits)

            self.test_prediction = tf.nn.softmax(logits)

        tf.global_variables_initializer().run()

        print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:59,代码来源:vgg16.py


示例9: create_left_right

def create_left_right(c, w, nrm): 
  left = tf.transpose(tf.mul(tf.transpose(nrm), w));
  right= tf.scalar_mul(-1.0, left);
  
  left = tf.add(left, c);
  right= tf.add(right,c);
  
  return left,right
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:8,代码来源:machine_vision_d.py


示例10: main

def main(_):
  print('loading word embeddings from %s' % FLAGS.embedding_file)
  weight_matrix, word_idx = sentiment.load_embeddings(FLAGS.embedding_file)

  train_file = os.path.join(FLAGS.tree_dir, 'train.txt')
  print('loading training trees from %s' % train_file)
  train_trees = sentiment.load_trees(train_file)

  dev_file = os.path.join(FLAGS.tree_dir, 'dev.txt')
  print('loading dev trees from %s' % dev_file)
  dev_trees = sentiment.load_trees(dev_file)

  with tf.Session() as sess:
    print('creating the model')
    keep_prob = tf.placeholder_with_default(1.0, [])
    train_feed_dict = {keep_prob: FLAGS.keep_prob}
    word_embedding = sentiment.create_embedding(weight_matrix)
    compiler, metrics = sentiment.create_model(
        word_embedding, word_idx, FLAGS.lstm_num_units, keep_prob)
    loss = tf.reduce_sum(compiler.metric_tensors['all_loss'])
    opt = tf.train.AdagradOptimizer(FLAGS.learning_rate)
    grads_and_vars = opt.compute_gradients(loss)
    found = 0
    for i, (grad, var) in enumerate(grads_and_vars):
      if var == word_embedding.weights:
        found += 1
        grad = tf.scalar_mul(FLAGS.embedding_learning_rate_factor, grad)
        grads_and_vars[i] = (grad, var)
    assert found == 1  # internal consistency check
    train = opt.apply_gradients(grads_and_vars)
    saver = tf.train.Saver()

    print('initializing tensorflow')
    sess.run(tf.global_variables_initializer())

    with compiler.multiprocessing_pool():
      print('training the model')
      train_set = compiler.build_loom_inputs(train_trees)
      dev_feed_dict = compiler.build_feed_dict(dev_trees)
      dev_hits_best = 0.0
      for epoch, shuffled in enumerate(td.epochs(train_set, FLAGS.epochs), 1):
        train_loss = 0.0
        for batch in td.group_by_batches(shuffled, FLAGS.batch_size):
          train_feed_dict[compiler.loom_input_tensor] = batch
          _, batch_loss = sess.run([train, loss], train_feed_dict)
          train_loss += batch_loss
        dev_metrics = sess.run(metrics, dev_feed_dict)
        dev_loss = dev_metrics['all_loss']
        dev_accuracy = ['%s: %.2f' % (k, v * 100) for k, v in
                        sorted(dev_metrics.items()) if k.endswith('hits')]
        print('epoch:%4d, train_loss: %.3e, dev_loss: %.3e, dev_accuracy: [%s]'
              % (epoch, train_loss, dev_loss, ' '.join(dev_accuracy)))
        dev_hits = dev_metrics['root_hits']
        if dev_hits > dev_hits_best:
          dev_hits_best = dev_hits
          save_path = saver.save(sess, FLAGS.checkpoint_base, global_step=epoch)
          print('model saved in file: %s' % save_path)
开发者ID:wangbosdqd,项目名称:fold,代码行数:57,代码来源:train.py


示例11: _add_focal_losses

  def _add_focal_losses(self, sigma_rpn=3.0):
    with tf.variable_scope('loss_' + self._tag) as scope:
      # RPN, class loss
      rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])
      rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])
      rpn_select = tf.where(tf.not_equal(rpn_label, -1))
      rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
      rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
      rpn_cross_entropy = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))

      # RPN, bbox loss
      rpn_bbox_pred = self._predictions['rpn_bbox_pred']
      rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
      rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
      rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']

      rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
                                          rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])

      # RCNN, class loss
      alpha_scale = 0.25
      gamma = 2
      epsilon = 1e-8
      cls_score = self._predictions["cls_score"]
      label = tf.reshape(self._proposal_targets["labels"], [-1])
      label = tf.one_hot(label, depth=self._num_classes)
      cls_pred = tf.nn.sigmoid(cls_score)
      predictions_pt = tf.where(tf.equal(label, 1), cls_pred, 1-cls_pred)
      alpha_t = tf.ones_like(label, dtype=tf.float32)
      alpha_t = tf.scalar_mul(alpha_scale, alpha_t)
      alpha_t = tf.where(tf.equal(label, 1.0), alpha_t, 1. - alpha_t)
      cross_entropy = tf.reduce_mean(-alpha_t*tf.pow(1 - predictions_pt, gamma)*tf.log(predictions_pt + epsilon))

      # RCNN, bbox loss
      bbox_pred = self._predictions['bbox_pred']
      bbox_targets = self._proposal_targets['bbox_targets']
      bbox_inside_weights = self._proposal_targets['bbox_inside_weights']
      bbox_outside_weights = self._proposal_targets['bbox_outside_weights']

      loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)

      self._losses['cross_entropy'] = cross_entropy
      self._losses['loss_box'] = loss_box
      self._losses['rpn_cross_entropy'] = rpn_cross_entropy
      self._losses['rpn_loss_box'] = rpn_loss_box

      self._losses['rpn_loss'] = rpn_loss_box + rpn_cross_entropy
      self._losses['class_loss'] = cross_entropy + loss_box
      loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box
      self._losses['total_loss'] = loss

      self._event_summaries.update(self._losses)

    return loss
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:55,代码来源:network.py


示例12: create_cost

def create_cost(center, xy, width, persitaltic, bend, bend_profiles, contour, contour_normals, length, weight_spacing = 1.0, weight_bending = 1.0, gamma = 1.0, kappa = 2.0):

  c = tf.add(center, xy);
  
  #tangent and normals  
  t  = create_tangent(c);
  tn = create_normalize_tangent(t);
  ta = create_average_tangent(tn);
  nl = create_normal(ta);

  #bend the center
  c = create_bend(center, nl, bend, bend_profiles);

  #peristaltically move the center
  c = create_peristaltic(c, ta, persitaltic);

  #tangents
  t  = create_tangent(c);
  tn = create_normalize_tangent(t);
  ta = create_average_tangent(tn);
  nl = create_normal(ta);
  nr = tf.scalar_mul(-1.0, nl);  
  l,r = create_left_right(c,width,nl);

  cost_left = create_cost_soft_min_aligned_distance(l, contour, nl, contour_normals, k = kappa, gamma = gamma);
  cost_right= create_cost_soft_min_aligned_distance(r, contour, nr, contour_normals, k = kappa, gamma = gamma);
  cost = tf.add(cost_left, cost_right);
  
  #spacing and bending
  if weight_spacing != 0:
    cost_spacing = tf.scalar_mul(weight_spacing, create_cost_spacing(t, length));
    cost = tf.add(cost, cost_spacing);
  else:
    cost_spacing = tf.constant(0);
  
  if weight_bending != 0:
    cost_bending = tf.scalar_mul(weight_bending, create_cost_bending(tn));
    cost = tf.add(cost, cost_bending);
  else:
    cost_bending = tf.constant(0);
  
  return (cost, cost_left, cost_right, cost_spacing, cost_bending, c, l, r, nl);  
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:42,代码来源:machine_vision_d.py


示例13: focal_loss3

def focal_loss3(cls_score, label, num_classes):
    alpha_scale = 0.25
    gamma = 2
    epsilon = 1e-8
    label = tf.one_hot(label, depth=num_classes)
    cls_pred = tf.nn.sigmoid(cls_score)
    predictions_pt = tf.where(tf.equal(label, 1), cls_pred, 1 - cls_pred)
    alpha_t = tf.ones_like(label, dtype=tf.float32)
    alpha_t = tf.scalar_mul(alpha_scale, alpha_t)
    alpha_t = tf.where(tf.equal(label, 1.0), alpha_t, 1. - alpha_t)
    losses = tf.reduce_mean(-alpha_t * tf.pow(1 - predictions_pt, gamma) * tf.log(predictions_pt + epsilon), axis=1)
    return losses
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:12,代码来源:focal_loss_test.py


示例14: _compute_update

  def _compute_update(self, param, grad, state):
    """Compute updates of parameters."""

    # get the learning rate at the current index, if the index
    # is greater than the number of available learning rates,
    # use the last one
    index = tf.minimum(state["itr"], self.max_index)
    learning_rate = tf.gather(self.learning_rates, index)

    # update the parameters: parameter - learning_rate * gradient
    updated_param = param - tf.scalar_mul(learning_rate, grad)

    return updated_param, {"itr": state["itr"] + 1}
开发者ID:ALISCIFP,项目名称:models,代码行数:13,代码来源:learning_rate_schedule.py


示例15: entropy_matrix

def entropy_matrix(graph, P):
    """

    :param graph:
    :param P:
    :return:
    """
    with graph.as_default():
        shape = P.get_shape().as_list()
        one_diagonal = tf.diag(diagonal=[1.0 for _ in range(shape[0])])
        P_mod = tf.add(P, one_diagonal)
        H = tf.reduce_sum(tf.scalar_mul(-1.0, tf.mul(P, tf.log(P_mod))), 1)
    return H
开发者ID:shkr,项目名称:tensorflow_examples,代码行数:13,代码来源:tSNE.py


示例16: build_model

    def build_model(self):

        self.images = tf.placeholder(tf.float32, [self.batch_size] + [self.output_size, self.output_size, self.c_dim], name='real_images')
        self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + [self.output_size, self.output_size, self.c_dim], name='sample_images')
        self.z = tf.placeholder(tf.float32, [None, self.z_dim], name='z')

        self.z_sum = tf.summary.histogram("z", self.z)

        self.G = self.generator(self.z)
        self.D = self.discriminator(self.images)
        self.D_ = self.discriminator(self.G, reuse=True)
        self.sampler = self.sampler(self.z)

        self.d_sum = tf.summary.histogram("d", self.D)
        self.d__sum = tf.summary.histogram("d_", self.D_)
        self.G_sum = tf.summary.image("G", self.G)

        self.d_loss_real = tf.reduce_mean(tf.scalar_mul(-1, self.D))
        self.d_loss_fake = tf.reduce_mean(self.D_)
        self.g_loss = tf.reduce_mean(tf.scalar_mul(-1, self.D_))

        self.d_loss_real_sum = tf.summary.scalar("d_loss_real", self.d_loss_real)
        self.d_loss_fake_sum = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
                                                    
        self.d_loss = self.d_loss_real + self.d_loss_fake

        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.epoch = tf.Variable(-1, name='epoch', trainable=False)
        self.increment_epoch = tf.assign(self.epoch, self.epoch+1)

        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        self.saver = tf.train.Saver(max_to_keep=1000)
开发者ID:yao-matrix,项目名称:WassersteinGAN-TensorFlow,代码行数:39,代码来源:model.py


示例17: init_training_graph

    def init_training_graph(self):
        with tf.name_scope('Evaluation'):
            self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
            self.predictions = tf.argmax(self.logits, axis=3)
            
            with tf.name_scope('Loss'):
                self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
                                                                          labels=tf.squeeze(tf.cast(self.train_labels_node, tf.int32), squeeze_dims=[3]),
                                                                          name="entropy")))
                tf.summary.scalar("entropy", self.loss)

            with tf.name_scope('Accuracy'):

                LabelInt = tf.squeeze(tf.cast(self.train_labels_node, tf.int64), squeeze_dims=[3])
                CorrectPrediction = tf.equal(self.predictions, LabelInt)
                self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
                tf.summary.scalar("accuracy", self.accuracy)

            with tf.name_scope('ClassPrediction'):
                flat_LabelInt = tf.reshape(LabelInt, [-1])
                flat_predictions = tf.reshape(self.predictions, [-1])
                self.cm = tf.confusion_matrix(flat_LabelInt, flat_predictions, self.NUM_LABELS)
                flatten_confusion_matrix = tf.reshape(self.cm, [-1])
                total = tf.reduce_sum(self.cm)
                for i in range(self.NUM_LABELS):
                    name = "Label_{}".format(i)
                    TP, TN, FP, FN = GetCMInfo_TF(self.cm, i, self.NUM_LABELS)

                    precision =  tf.divide(TP, tf.add(TP, FP))
                    recall = tf.divide(TP, tf.add(TP, FN))
                    num = tf.multiply(precision, recall)
                    dem = tf.add(precision, recall)
                    F1 = tf.scalar_mul(2, tf.divide(num, dem))
                    Nprecision = tf.divide(TN, tf.add(TN, FN))
                    MeanAcc = tf.divide(tf.add(precision, Nprecision) ,2)

                    tf.summary.scalar(name + '_Precision', precision)
                    tf.summary.scalar(name + '_Recall', recall)
                    tf.summary.scalar(name + '_F1', F1)
                    tf.summary.scalar(name + '_Performance', MeanAcc)
                confusion_image = tf.reshape( tf.cast( self.cm, tf.float32),
                                            [1, self.NUM_LABELS, self.NUM_LABELS, 1])
                tf.summary.image('confusion', confusion_image)

            self.train_prediction = tf.nn.softmax(self.logits)

            self.test_prediction = self.train_prediction

        tf.global_variables_initializer().run()

        print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:51,代码来源:UNetMultiClass_v2.py


示例18: SimStep

def SimStep(x,In_Yr,In_Yp,In_k,In_dt): 
    Yr=tf.constant(In_Yr,tf.float32)# Reactant ratios for each reaction
    Yp=tf.constant(In_Yp,tf.float32)# Product ratios for each reaction
    k=tf.constant(In_k,tf.float32)# Reaction constants for each reaction
    dt=tf.constant(In_dt,tf.float32) # time lapse for each simulation step
    s1=tf.pow(x,Yr)
    s2=tf.reduce_prod(s1,1)
    r=k*s2#Reacion rates 
    s4=tf.scalar_mul(dt,r)
    Yd=Yp-Yr # Change in concentrations attribute to each reaction
    dxij=s4*tf.transpose(Yd)# concentration changes each reaction in this step***
    dx=tf.reduce_sum(dxij,1) #sum of concentration changes from all reactions in this step***
    xp=x+dx#New concentration after steps  
    return(xp)
开发者ID:sagieppel,项目名称:Chemical-kinetics-rate-equation-with-tensorflow,代码行数:14,代码来源:Chemical+Kinetics+With+Tensorflow.py


示例19: probability_matrix

def probability_matrix(graph, beta, D):
    """
    :param D:
    :return:
    """
    with graph.as_default():
        shape = D.get_shape().as_list()
        beta_matrix = tf.tile(tf.expand_dims(beta, 1), multiples=[1, shape[1]])
        sqrt_matrix = tf.tile(tf.constant(0.5, dtype=tf.float32, shape=[1,1]), multiples=[shape[0], shape[1]])
        zero_diagonal = tf.exp(tf.diag(diagonal=[-np.inf for _ in range(shape[0])]))
        exp = tf.mul(tf.exp(tf.mul(beta_matrix, tf.scalar_mul(-1.0, tf.pow(D, sqrt_matrix)))), zero_diagonal)
        sum_exp = tf.reduce_sum(exp, reduction_indices=1)
        sum_exp_matrix = tf.tile(tf.expand_dims(sum_exp, 1), multiples=[1, shape[1]])
        return tf.div(exp, sum_exp_matrix)
开发者ID:shkr,项目名称:tensorflow_examples,代码行数:14,代码来源:tSNE.py


示例20: __init__

    def __init__(self, x, weights: Weights, b: float, hp: Hyperparameters) -> None:
        self.x = tf.transpose(x)
        self.weights = weights
        self.b = b

        with tf.variable_scope('Learning'):
            self.alpha_ma_w = tf.scalar_mul(scalar=hp.alpha_ma, x=self.weights.w)
            self.one_minus_alpha_ma_x = tf.scalar_mul(scalar=1. - hp.alpha_ma, x=self.x)
            self.new_w = self.alpha_ma_w + self.one_minus_alpha_ma_x
            self.learn_weights_op = self.weights.assign(self.new_w)

        with tf.variable_scope('Layer'):
            with tf.variable_scope('Wx_plus_b'):
                z = tf.add(tf.matmul(x, weights.w), self.b)
            activation = tf.nn.relu(z, name='Activation')
            excitatory, inhibitory = weights.split(activation)

            self.excitatory_y = excitatory
            self.inhibitory_y = tf.scalar_mul(scalar=-1., x = inhibitory)

            self.y = tf.concat([self.excitatory_y, self.inhibitory_y], axis=1)
            util.variable_summaries(self.y)
            print(self.y, self.y.shape)
开发者ID:DimanNe,项目名称:scripts,代码行数:23,代码来源:learning.py



注:本文中的tensorflow.scalar_mul函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.scalar_summary函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.rsqrt函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap