• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python tensorflow.add_n函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.add_n函数的典型用法代码示例。如果您正苦于以下问题:Python add_n函数的具体用法?Python add_n怎么用?Python add_n使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了add_n函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: loss

def loss(logits, labels, lambs):
    # put a sigfunction on logits and then transpose
    logits = tf.transpose(framwork.sig_func(logits))
    # according to the labels, erase rows which is not in labels
    labels_unique = tf.constant(range(NUM_CLASSES), dtype=tf.int32)
    labels_num = NUM_CLASSES
    # logits = tf.gather(logits, indices=labels_unique)
    # lambs = tf.gather(lambs, indices=labels_unique)
    # set the value of each row to True when it occurs in labels
    template = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, BATCH_SIZE])
    labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
    indict_logic = tf.equal(labels_expand, template)
    # split the tensor along rows
    logit_list = tf.split(0, labels_num, logits)
    indict_logic_list = tf.split(0, labels_num, indict_logic)
    lambda_list = tf.split(0, NUM_CLASSES, lambs)
    # loss_list = list()
    # for i in range(self.image_classes):
    #     loss_list.append(framwork.loss_func(logit_list[i], indict_logic_list[i], lambda_list[i]))
    loss_list = map(framwork.loss_func, logit_list, indict_logic_list, lambda_list)
    losses = tf.add_n(loss_list)
    tf.add_to_collection('losses', losses)
    # The total loss is defined as the cross entropy loss plus all of the weight
    # decay terms (L2 loss).
    return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:chengyang317,项目名称:cifar10,代码行数:25,代码来源:infor.py


示例2: _hourglass

	def _hourglass(self, inputs, n, numOut, name = 'hourglass'):
		""" Hourglass Module
		Args:
			inputs	: Input Tensor
			n		: Number of downsampling step
			numOut	: Number of Output Features (channels)
			name	: Name of the block
		"""
		with tf.name_scope(name):
			# Upper Branch
			up_1 = self._residual(inputs, numOut, name = 'up_1')
			# Lower Branch
			low_ = tf.contrib.layers.max_pool2d(inputs, [2,2], [2,2], padding='VALID')
			low_1= self._residual(low_, numOut, name = 'low_1')
			
			if n > 0:
				low_2 = self._hourglass(low_1, n-1, numOut, name = 'low_2')
			else:
				low_2 = self._residual(low_1, numOut, name = 'low_2')
				
			low_3 = self._residual(low_2, numOut, name = 'low_3')
			up_2 = tf.image.resize_nearest_neighbor(low_3, tf.shape(low_3)[1:3]*2, name = 'upsampling')
			if self.modif:
				# Use of RELU
				return tf.nn.relu(tf.add_n([up_2,up_1]), name='out_hg')
			else:
				return tf.add_n([up_2,up_1], name='out_hg')
开发者ID:wjgaas,项目名称:FashionAI_keypoint,代码行数:27,代码来源:hourglass_tiny.py


示例3: _tower_loss

def _tower_loss(iterator, num_of_classes, ignore_label, scope, reuse_variable):
  """Calculates the total loss on a single tower running the deeplab model.

  Args:
    iterator: An iterator of type tf.data.Iterator for images and labels.
    num_of_classes: Number of classes for the dataset.
    ignore_label: Ignore label for the dataset.
    scope: Unique prefix string identifying the deeplab tower.
    reuse_variable: If the variable should be reused.

  Returns:
     The total loss for a batch of data.
  """
  with tf.variable_scope(
      tf.get_variable_scope(), reuse=True if reuse_variable else None):
    _build_deeplab(iterator, {common.OUTPUT_TYPE: num_of_classes}, ignore_label)

  losses = tf.losses.get_losses(scope=scope)
  for loss in losses:
    tf.summary.scalar('Losses/%s' % loss.op.name, loss)

  regularization_loss = tf.losses.get_regularization_loss(scope=scope)
  tf.summary.scalar('Losses/%s' % regularization_loss.op.name,
                    regularization_loss)

  total_loss = tf.add_n([tf.add_n(losses), regularization_loss])
  return total_loss
开发者ID:Exscotticus,项目名称:models,代码行数:27,代码来源:train.py


示例4: solve

def solve(global_step):
    """add solver to losses"""
    # learning reate
    lr = _configure_learning_rate(82783, global_step)
    optimizer = _configure_optimizer(lr)
    tf.summary.scalar('learning_rate', lr)

    # compute and apply gradient
    losses = tf.get_collection(tf.GraphKeys.LOSSES)
    regular_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    regular_loss = tf.add_n(regular_losses)
    out_loss = tf.add_n(losses)
    total_loss = tf.add_n(losses + regular_losses)

    tf.summary.scalar('total_loss', total_loss)
    tf.summary.scalar('out_loss', out_loss)
    tf.summary.scalar('regular_loss', regular_loss)

    update_ops = []
    variables_to_train = _get_variables_to_train()
    # update_op = optimizer.minimize(total_loss)
    gradients = optimizer.compute_gradients(total_loss, var_list=variables_to_train)
    grad_updates = optimizer.apply_gradients(gradients, 
            global_step=global_step)
    update_ops.append(grad_updates)
    
    # update moving mean and variance
    if FLAGS.update_bn:
        update_bns = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        update_bn = tf.group(*update_bns)
        update_ops.append(update_bn)

    return tf.group(*update_ops)
开发者ID:imyourm8,项目名称:FastMaskRCNN,代码行数:33,代码来源:train.py


示例5: _shake_shake_block

def _shake_shake_block(x, output_filters, stride, is_training):
  """Builds a full shake-shake sub layer."""
  batch_size = tf.shape(x)[0]

  # Generate random numbers for scaling the branches
  rand_forward = [
      tf.random_uniform(
          [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
      for _ in range(2)
  ]
  rand_backward = [
      tf.random_uniform(
          [batch_size, 1, 1, 1], minval=0, maxval=1, dtype=tf.float32)
      for _ in range(2)
  ]
  # Normalize so that all sum to 1
  total_forward = tf.add_n(rand_forward)
  total_backward = tf.add_n(rand_backward)
  rand_forward = [samp / total_forward for samp in rand_forward]
  rand_backward = [samp / total_backward for samp in rand_backward]
  zipped_rand = zip(rand_forward, rand_backward)

  branches = []
  for branch, (r_forward, r_backward) in enumerate(zipped_rand):
    with tf.variable_scope('branch_{}'.format(branch)):
      b = _shake_shake_branch(x, output_filters, stride, r_forward, r_backward,
                              is_training)
      branches.append(b)
  res = _shake_shake_skip_connection(x, output_filters, stride)
  return res + tf.add_n(branches)
开发者ID:812864539,项目名称:models,代码行数:30,代码来源:shake_shake.py


示例6: _build

  def _build(self, dataset, feature_transformer):
    if self.samples_per_class is not None:
      if dataset not in self.dataset_map:
        # datasets are outside of frames from while loops
        with tf.control_dependencies(None):
          self.dataset_map[dataset] = utils.sample_n_per_class(
              dataset, self.samples_per_class)

      dataset = self.dataset_map[dataset]

    stats = collections.defaultdict(list)
    losses = []
    # TODO(lmetz) move this to ingraph control flow?
    for _ in xrange(self.averages):
      loss, stat = self._build_once(dataset, feature_transformer)
      losses.append(loss)
      for k, v in stat.items():
        stats[k].append(v)
    stats = {k: tf.add_n(v) / float(len(v)) for k, v in stats.items()}

    summary_updates = []
    for k, v in stats.items():
      tf.summary.scalar(k, v)

    with tf.control_dependencies(summary_updates):
      return tf.add_n(losses) / float(len(losses))
开发者ID:ALISCIFP,项目名称:models,代码行数:26,代码来源:linear_regression.py


示例7: _create_gumbel_control_variate

  def _create_gumbel_control_variate(self, logQHard, temperature=None):
    '''Calculate gumbel control variate.
    '''
    if temperature is None:
      temperature = self.hparams.temperature

    logQ, softSamples = self._recognition_network(sampler=functools.partial(
        self._random_sample_soft, temperature=temperature))
    softELBO, _ = self._generator_network(softSamples, logQ)
    logQ = tf.add_n(logQ)

    # Generate the softELBO_v (should be the same value but different grads)
    logQ_v, softSamples_v = self._recognition_network(sampler=functools.partial(
        self._random_sample_soft_v, temperature=temperature))
    softELBO_v, _ = self._generator_network(softSamples_v, logQ_v)
    logQ_v = tf.add_n(logQ_v)

    # Compute losses
    learning_signal = tf.stop_gradient(softELBO_v)

    # Control variate
    h = (tf.stop_gradient(learning_signal) * tf.add_n(logQHard)
          - softELBO + softELBO_v)

    extra = (softELBO_v, -softELBO + softELBO_v)

    return h, extra
开发者ID:ALISCIFP,项目名称:models,代码行数:27,代码来源:rebar.py


示例8: _make_objectives

  def _make_objectives(self):
    # TODO: Hacky, will cause clashes if multiple DPG instances.
    policy_params = self._policy_params()
    critic_params = [var for var in tf.all_variables()
                     if "critic/" in var.name]
    self.policy_params = policy_params
    self.critic_params = critic_params

    # Policy objective: maximize on-policy critic activations
    mean_critic_over_time = tf.add_n(self.critic_on) / self.seq_length
    mean_critic = tf.reduce_mean(mean_critic_over_time)
    self.policy_objective = -mean_critic

    # DEV
    tf.scalar_summary("critic(a_pred).mean", mean_critic)

    # Critic objective: minimize MSE of off-policy Q-value predictions
    q_errors = [tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(critic_off_t, q_targets_t))#tf.square(critic_off_t - q_targets_t))
                for critic_off_t, q_targets_t
                in zip(self.critic_off_pre, self.q_targets)]
    self.critic_objective = tf.add_n(q_errors) / self.seq_length
    tf.scalar_summary("critic_objective", self.critic_objective)

    mean_critic_off = tf.reduce_mean(tf.add_n(self.critic_off)) / self.seq_length
    tf.scalar_summary("critic(a_explore).mean", mean_critic_off)

    tf.scalar_summary("a_pred.mean", tf.reduce_mean(tf.add_n(self.a_pred)) / self.seq_length)
    tf.scalar_summary("a_pred.maxabs", tf.reduce_max(tf.abs(tf.pack(self.a_pred))))
开发者ID:hans,项目名称:rlcomp,代码行数:28,代码来源:dpg.py


示例9: after_apply

  def after_apply(self):
    self._moving_averager = tf.train.ExponentialMovingAverage(decay=self._beta, zero_debias=self._zero_debias)
    assert self._grads != None and len(self._grads) > 0
    after_apply_ops = []

    # get per var g**2 and norm**2
    self._grad_squared = []
    self._grad_norm_squared = []
    for v, g in zip(self._tvars, self._grads):
      with ops.colocate_with(v):
        self._grad_squared.append(tf.square(g) )
    self._grad_norm_squared = [tf.reduce_sum(grad_squared) for grad_squared in self._grad_squared]

    # the following running average on squared norm of gradient is shared by grad_var and dist_to_opt
    avg_op = self._moving_averager.apply(self._grad_norm_squared)
    with tf.control_dependencies([avg_op] ):
      self._grad_norm_squared_avg = [self._moving_averager.average(val) for val in self._grad_norm_squared]
      self._grad_norm_squared = tf.add_n(self._grad_norm_squared)
      self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg)
    after_apply_ops.append(avg_op)

    with tf.control_dependencies([avg_op] ):
      curv_range_ops = self.curvature_range()
      after_apply_ops += curv_range_ops
      grad_var_ops = self.grad_variance()
      after_apply_ops += grad_var_ops
      dist_to_opt_ops = self.dist_to_opt() 
      after_apply_ops += dist_to_opt_ops

    return tf.group(*after_apply_ops)
开发者ID:tigercut,项目名称:MobileNet,代码行数:30,代码来源:yellowfin.py


示例10: _read

 def _read(self, keys, redundant_states):
     read = _comp_mul(keys, redundant_states)
     if self._num_copies > 1:
         xs_real = tf.split(1, self._num_copies, _comp_real(read))
         xs_imag = tf.split(1, self._num_copies, _comp_imag(read))
         read = (tf.add_n(xs_real)/self._num_copies, tf.add_n(xs_imag)/self._num_copies)
     return read
开发者ID:BinbinBian,项目名称:dual_am_rnn,代码行数:7,代码来源:dual_assoc_rnn.py


示例11: _full_batch_training_op

  def _full_batch_training_op(self, inputs, cluster_idx_list, cluster_centers):
    """Creates an op for training for full batch case.

    Args:
      inputs: list of input Tensors.
      cluster_idx_list: A vector (or list of vectors). Each element in the
        vector corresponds to an input row in 'inp' and specifies the cluster id
        corresponding to the input.
      cluster_centers: Tensor Ref of cluster centers.

    Returns:
      An op for doing an update of mini-batch k-means.
    """
    cluster_sums = []
    cluster_counts = []
    epsilon = tf.constant(1e-6, dtype=inputs[0].dtype)
    for inp, cluster_idx in zip(inputs, cluster_idx_list):
      with ops.colocate_with(inp):
        cluster_sums.append(tf.unsorted_segment_sum(inp,
                                                    cluster_idx,
                                                    self._num_clusters))
        cluster_counts.append(tf.unsorted_segment_sum(
            tf.reshape(tf.ones(tf.reshape(tf.shape(inp)[0], [-1])), [-1, 1]),
            cluster_idx,
            self._num_clusters))
    with ops.colocate_with(cluster_centers):
      new_clusters_centers = tf.add_n(cluster_sums) / (
          tf.cast(tf.add_n(cluster_counts), cluster_sums[0].dtype) + epsilon)
      if self._clusters_l2_normalized():
        new_clusters_centers = tf.nn.l2_normalize(new_clusters_centers, dim=1)
    return tf.assign(cluster_centers, new_clusters_centers)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:31,代码来源:clustering_ops.py


示例12: loss

    def loss(self, traindata):
        """build models, calculate losses.

        Args:
            traindata: 4-D Tensor of shape `[batch, height, width, channels]`.

        Returns:
            dict of each models' losses.
        """
        generated = self.g(self.z, training=True)
        g_outputs = self.d(generated, training=True, name='g')
        t_outputs = self.d(traindata, training=True, name='t')
        # add each losses to collection
        tf.add_to_collection(
            'g_losses',
            tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=tf.ones([self.batch_size], dtype=tf.int64),
                    logits=g_outputs)))
        tf.add_to_collection(
            'd_losses',
            tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=tf.ones([self.batch_size], dtype=tf.int64),
                    logits=t_outputs)))
        tf.add_to_collection(
            'd_losses',
            tf.reduce_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=tf.zeros([self.batch_size], dtype=tf.int64),
                    logits=g_outputs)))
        return {
            self.g: tf.add_n(tf.get_collection('g_losses'), name='total_g_loss'),
            self.d: tf.add_n(tf.get_collection('d_losses'), name='total_d_loss'),
        }
开发者ID:sugyan,项目名称:tf-dcgan,代码行数:35,代码来源:dcgan.py


示例13: multilevel_rpn_losses

def multilevel_rpn_losses(
        multilevel_anchors, multilevel_label_logits, multilevel_box_logits):
    """
    Args:
        multilevel_anchors: #lvl RPNAnchors
        multilevel_label_logits: #lvl tensors of shape HxWxA
        multilevel_box_logits: #lvl tensors of shape HxWxAx4

    Returns:
        label_loss, box_loss
    """
    num_lvl = len(cfg.FPN.ANCHOR_STRIDES)
    assert len(multilevel_anchors) == num_lvl
    assert len(multilevel_label_logits) == num_lvl
    assert len(multilevel_box_logits) == num_lvl

    losses = []
    with tf.name_scope('rpn_losses'):
        for lvl in range(num_lvl):
            anchors = multilevel_anchors[lvl]
            label_loss, box_loss = rpn_losses(
                anchors.gt_labels, anchors.encoded_gt_boxes(),
                multilevel_label_logits[lvl], multilevel_box_logits[lvl],
                name_scope='level{}'.format(lvl + 2))
            losses.extend([label_loss, box_loss])

        total_label_loss = tf.add_n(losses[::2], name='label_loss')
        total_box_loss = tf.add_n(losses[1::2], name='box_loss')
        add_moving_summary(total_label_loss, total_box_loss)
    return total_label_loss, total_box_loss
开发者ID:quanlzheng,项目名称:tensorpack,代码行数:30,代码来源:model_fpn.py


示例14: create

    def create(self):
        gan = self.gan
        config = self.config
        ops = self.gan.ops
        split = len(gan.generator.children)+len(gan.generator.parents)+1
        #generator structure: 
        # x, gp1, ..., gpn, gc1, ..., gcm
        d_real = self.d_real
        d_fake = self.d_fake

        net = gan.discriminator.sample

        ds = self.split_batch(net, split)
        d_real = ds[0]
        d_fake = tf.add_n(ds[1:len(gan.generator.parents)+1])/(len(gan.generator.parents))
        d_loss, _ = self._create(d_real, d_fake)

        ds = self.split_batch(net, split)
        d_real = ds[0]
        d_fake = tf.add_n(ds[1+len(gan.generator.parents):])/(len(gan.generator.children))
        _, g_loss = self._create(d_real, d_fake)
        self.children_losses = self.split_batch(g_loss, len(gan.generator.children))

        d_loss = ops.squash(d_loss, config.reduce or tf.reduce_mean) #linear doesn't work with this
        g_loss = ops.squash(g_loss, config.reduce or tf.reduce_mean)

        self.sample = [d_loss, g_loss]
        self.d_loss = d_loss
        self.g_loss = g_loss

        return self.sample
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:31,代码来源:evolution_loss.py


示例15: __init__

    def __init__(self, nr_gpu, input, model):
        super(MultiGPUGANTrainer, self).__init__()
        assert nr_gpu > 1
        raw_devices = ['/gpu:{}'.format(k) for k in range(nr_gpu)]

        # Setup input
        input = StagingInput(input)
        cbs = input.setup(model.get_inputs_desc())
        self.register_callback(cbs)

        # Build the graph with multi-gpu replication
        def get_cost(*inputs):
            model.build_graph(*inputs)
            return [model.d_loss, model.g_loss]
        self.tower_func = TowerFuncWrapper(get_cost, model.get_inputs_desc())
        devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
        cost_list = DataParallelBuilder.build_on_towers(
            list(range(nr_gpu)),
            lambda: self.tower_func(*input.get_input_tensors()),
            devices)
        # Simply average the cost here. It might be faster to average the gradients
        with tf.name_scope('optimize'):
            d_loss = tf.add_n([x[0] for x in cost_list]) * (1.0 / nr_gpu)
            g_loss = tf.add_n([x[1] for x in cost_list]) * (1.0 / nr_gpu)

            opt = model.get_optimizer()
            # run one d_min after one g_min
            g_min = opt.minimize(g_loss, var_list=model.g_vars,
                                 colocate_gradients_with_ops=True, name='g_op')
            with tf.control_dependencies([g_min]):
                d_min = opt.minimize(d_loss, var_list=model.d_vars,
                                     colocate_gradients_with_ops=True, name='d_op')
        # Define the training iteration
        self.train_op = d_min
开发者ID:ahuirecome,项目名称:tensorpack,代码行数:34,代码来源:GAN.py


示例16: top_sharded

  def top_sharded(self,
                  sharded_body_output,
                  sharded_targets,
                  data_parallelism,
                  weights_fn=common_layers.weights_nonzero):
    """Transform all shards of targets.

    Classes with cross-shard interaction will override this function.

    Args:
      sharded_body_output: A list of Tensors.
      sharded_targets: A list of Tensors.
      data_parallelism: a expert_utils.Parallelism object.
      weights_fn: function from targets to target weights.
    Returns:
      shaded_logits: A list of Tensors.
      training_loss: a Scalar.
    """
    sharded_logits = data_parallelism(self.top, sharded_body_output,
                                      sharded_targets)
    loss_num, loss_den = data_parallelism(
        common_layers.padded_cross_entropy,
        sharded_logits,
        sharded_targets,
        self._model_hparams.label_smoothing,
        weights_fn=weights_fn)
    loss = tf.add_n(loss_num) / tf.maximum(1.0, tf.add_n(loss_den))
    return sharded_logits, loss
开发者ID:TrunksLegendary,项目名称:tensor2tensor,代码行数:28,代码来源:modality.py


示例17: build_model

  def build_model(self):
    self.x = tf.placeholder(tf.float32, [self.reader.vocab_size], name="input")
    self.x_idx = tf.placeholder(tf.int32, [None], name='x_idx')  # mask paddings

    self.build_encoder()
    self.build_generator()

    self.objective = self.kl +self.recons_loss
    
    # optimizer for alternative update
    optimizer1 = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
    optimizer2 = tf.train.AdamOptimizer(learning_rate=0.1)
   
    fullvars = tf.GraphKeys.TRAINABLE_VARIABLES
    print 'fullvars:',fullvars

    enc_vars = tf.get_collection(fullvars,scope='encoder')
    print enc_vars
    
    dec_vars = tf.get_collection(fullvars,scope='generator')
    print dec_vars
    self.lossL2_enc = tf.add_n([ tf.nn.l2_loss(v) for v in enc_vars if 'bias' not in v.name]) * 0.0001
    self.lossL2_dec = tf.add_n([ tf.nn.l2_loss(v) for v in dec_vars if 'bias' not in v.name])
    print 'lossL2_enc:',self.lossL2_enc
    print 'lossL2_dec:',self.lossL2_dec          
    enc_grads = tf.gradients(self.kl+self.lossL2_enc, enc_vars)
    dec_grads = tf.gradients(self.recons_loss+self.lossL2_dec, dec_vars)
    
     
    self.optim_enc = optimizer1.apply_gradients(zip(enc_grads, enc_vars))
    self.optim_dec = optimizer2.apply_gradients(zip(dec_grads, dec_vars))  
开发者ID:wujsAct,项目名称:TeachingMachineReadAndComprehend,代码行数:31,代码来源:nvdm.py


示例18: _build_graph

    def _build_graph(self, inputs):
        image, label = inputs
        image = tf.expand_dims(image, 3)

        image = image * 2 - 1   # center the pixels values at zero

        with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
            M = self._build_keras_model()
            logits = M(image)
        prob = tf.nn.softmax(logits, name='prob')   # a Bx10 with probabilities

        # a vector of length B with loss of each sample
        cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
        cost = tf.reduce_mean(cost, name='cross_entropy_loss')  # the average cross-entropy loss

        wrong = symbolic_functions.prediction_incorrect(logits, label, name='incorrect')
        train_error = tf.reduce_mean(wrong, name='train_error')
        summary.add_moving_summary(train_error)

        wd_cost = tf.add_n(M.losses, name='regularize_loss')    # this is how Keras manage regularizers
        self.cost = tf.add_n([wd_cost, cost], name='total_cost')
        summary.add_moving_summary(cost, wd_cost, self.cost)

        # this is the keras naming
        summary.add_param_summary(('conv2d.*/kernel', ['histogram', 'rms']))
开发者ID:j50888,项目名称:tensorpack,代码行数:25,代码来源:mnist-keras.py


示例19: __init__

  def __init__(self, gan=None, config=None, trainer=None, name="SelfSupervisedTrainHook"):
    super().__init__(config=config, gan=gan, trainer=trainer, name=name)
    g_loss = []
    d_loss = []
    if hasattr(self.gan.inputs, 'frames'):
        x = gan.x0#gan.inputs.x
        g = gan.g0#gan.generator.sample
    else:
        x = gan.inputs.x
        g = gan.generator.sample
    reuse = False
    for i in range(4):
        if gan.width() != gan.height() and i % 2 == 0:
            continue
        _x = tf.image.rot90(x, i+1)
        _g = tf.image.rot90(g, i+1)
        stacked = tf.concat([_x, _g], axis=0)
        shared = gan.create_discriminator(stacked, reuse=True).named_layers['shared']
        r = gan.create_component(config["r"], input=shared, reuse=reuse)
        reuse=True
        gan.discriminator.add_variables(r)
        gan.generator.add_variables(r)
        labels = tf.one_hot(i, 4)
        _dl = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=r.sample[0])
        _gl = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=r.sample[1])
        d_loss.append(_dl)
        g_loss.append(_gl)

    self.g_loss = (self.config.alpha or 1.0) * tf.add_n(g_loss)
    self.d_loss = (self.config.beta or 1.0) * tf.add_n(d_loss)

    self.gan.add_metric('ssgl', self.g_loss)
    self.gan.add_metric('ssdl', self.d_loss)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:33,代码来源:self_supervised_train_hook.py


示例20: __call__

    def __call__(self, flow=None):
        """Constructs the Sequential and its inner pieces.

        Args:
            flow: Input `Tensor` object. (Default value = None)

        Returns:
            Output of this `Parallel`.

        """

        # build inner pieces.
        with tf.variable_op_scope([], self.name, 'Parallel', reuse=self.reuse):
            if not self.reuse:
                self.reuse = True

            outputs = []
            for i, piece in enumerate(self.child_pieces):
                outputs.append(piece(flow))

            if self.mode == 'concat':
                return tf.concat(self.along_dim, outputs)
            elif self.mode == 'mean':
                return tf.add_n(outputs) / len(self.child_pieces)
            elif self.mode == 'sum':
                return tf.add_n(outputs)
开发者ID:Anna-Jiang,项目名称:first-test,代码行数:26,代码来源:pieces.py



注:本文中的tensorflow.add_n函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python tensorflow.add_to_collection函数代码示例发布时间:2022-05-27
下一篇:
Python tensorflow.add函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap