• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python framework.get_global_step函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.framework.get_global_step函数的典型用法代码示例。如果您正苦于以下问题:Python get_global_step函数的具体用法?Python get_global_step怎么用?Python get_global_step使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了get_global_step函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _get_train_ops

  def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.

    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    _, loss = self._model_fn(features, targets, ModeKeys.TRAIN)
    # TODO(ipolosukhin): Move this to TensorFlowEstimator when
    # moving out training.
    if isinstance(self.learning_rate, types.FunctionType):
      learning_rate = self.learning_rate(contrib_framework.get_global_step())
    else:
      learning_rate = self.learning_rate
    if isinstance(self.optimizer, types.FunctionType):
      optimizer = self.optimizer(learning_rate)
    else:
      optimizer = self.optimizer
    train_op = layers.optimize_loss(
        loss,
        contrib_framework.get_global_step(),
        learning_rate=learning_rate,
        optimizer=optimizer,
        clip_gradients=self.clip_gradients)
    # Add update ops.
    train_op = control_flow_ops.group(
        train_op, *ops.get_collection('update_ops'))
    return train_op, loss
开发者ID:Baaaaam,项目名称:tensorflow,代码行数:35,代码来源:estimator.py


示例2: before_run

 def before_run(self, run_context):
   loss = (self.loss_op if self.loss_op is not None else
           run_context.session.graph.get_operation_by_name(
               LOSS_NAME).outputs[0])
   return session_run_hook.SessionRunArgs(
       {'global_step': contrib_framework.get_global_step(),
        'current_loss': loss})
开发者ID:rmcguinness,项目名称:tensorflow,代码行数:7,代码来源:random_forest.py


示例3: model_fn

  def model_fn(features, labels, mode):
    """Builds generic graph for training or eval."""

    # TODO logits = A tensor representing the pre-softmax likelyhood of
    # each digit. 
    tensors = {}
    # Add to the Graph the Ops for loss calculation.
    if mode == ModeKeys.INFER:
      # TODO tensors['digit'] = Tensor representing the predicted digit for 'features'
      # Since 'labels' is None we can't calculate a loss
      loss_op = None
    else:
      # TODO loss_op = Operation to calculate loss
      tensors['loss'] = loss_op
      tf.scalar_summary('loss', loss_op)

    # Add to the Graph the Ops for accuracy calculation.
    if mode == ModeKeys.EVAL:
      # TODO accuracy_op = Calculate the accuracy of the inferred digits given 'labels'
      tensors['accuracy'] = accuracy_op
      tf.scalar_summary('training/hptuning/metric', accuracy_op)

    # Add to the Graph the Ops that calculate and apply gradients.
    if mode == ModeKeys.TRAIN:
      global_step = framework.get_global_step()
      # TODO train_op = the gradient descent optimizer with the given learning rate
      # that minimizes the loss
    else:
      train_op = None

    return tensors, loss_op, train_op
开发者ID:ccortezb,项目名称:pipeline,代码行数:31,代码来源:model_skeleton.py


示例4: softmax_model

def softmax_model(X, Y_, mode):
    Ylogits = layers.linear(X, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10)))*100
    train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.003, "Adam")
    return {"predictions":predict, "classes": classes}, loss, train_op
开发者ID:laventura,项目名称:tensorflow-mnist-tutorial,代码行数:7,代码来源:task.py


示例5: _get_train_ops

  def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.

    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss()

    return train, self.training_loss
开发者ID:363158858,项目名称:tensorflow,代码行数:30,代码来源:random_forest.py


示例6: auto_encoder

def auto_encoder(x_1, x_2, x_mask_1, x_mask_2, y, dropout, opt):
    x_1_emb, W_emb = embedding(x_1, opt)  # batch L emb
    x_2_emb = tf.nn.embedding_lookup(W_emb, x_2)

    x_1_emb = tf.nn.dropout(x_1_emb, dropout)  # batch L emb
    x_2_emb = tf.nn.dropout(x_2_emb, dropout)  # batch L emb

    biasInit = tf.constant_initializer(0.001, dtype=tf.float32)
    x_1_emb = layers.fully_connected(tf.squeeze(x_1_emb), num_outputs=opt.embed_size, biases_initializer=biasInit, activation_fn=tf.nn.relu, scope='trans', reuse=None)  # batch L emb
    x_2_emb = layers.fully_connected(tf.squeeze(x_2_emb), num_outputs=opt.embed_size, biases_initializer=biasInit, activation_fn=tf.nn.relu, scope='trans', reuse=True)

    x_1_emb = tf.expand_dims(x_1_emb, 3)  # batch L emb 1
    x_2_emb = tf.expand_dims(x_2_emb, 3)

    if opt.encoder == 'aver':
        H_enc_1 = aver_emb_encoder(x_1_emb, x_mask_1)
        H_enc_2 = aver_emb_encoder(x_2_emb, x_mask_2)

    elif opt.encoder == 'max':
        H_enc_1 = max_emb_encoder(x_1_emb, x_mask_1, opt)
        H_enc_2 = max_emb_encoder(x_2_emb, x_mask_2, opt)

    elif opt.encoder == 'concat':
        H_enc_1 = concat_emb_encoder(x_1_emb, x_mask_1, opt)
        H_enc_2 = concat_emb_encoder(x_2_emb, x_mask_2, opt)

    # discriminative loss term
    if opt.combine_enc == 'mult':
        H_enc = tf.multiply(H_enc_1, H_enc_2)  # batch * n_gan

    if opt.combine_enc == 'concat':
        H_enc = tf.concat([H_enc_1, H_enc_2], 1)

    if opt.combine_enc == 'sub':
        H_enc = tf.subtract(H_enc_1, H_enc_2)

    if opt.combine_enc == 'mix':
        H_1 = tf.multiply(H_enc_1, H_enc_2)
        H_2 = tf.concat([H_enc_1, H_enc_2], 1)
        H_3 = tf.subtract(H_enc_1, H_enc_2)
        H_enc = tf.concat([H_1, H_2, H_3], 1)

    # calculate the accuracy
    logits = discriminator_2layer(H_enc, opt, dropout, prefix='classify_', num_outputs=opt.category, is_reuse=None)
    prob = tf.nn.softmax(logits)

    correct_prediction = tf.equal(tf.argmax(prob, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))

    train_op = layers.optimize_loss(
        loss,
        framework.get_global_step(),
        optimizer='Adam',
        # variables=d_vars,
        learning_rate=opt.lr)

    return accuracy, loss, train_op, W_emb
开发者ID:niurouli,项目名称:SWEM,代码行数:58,代码来源:eval_snli_emb.py


示例7: _loss_to_train_op

 def _loss_to_train_op(self, loss):
   """Map `loss` to a training op."""
   with ops.name_scope('loss_to_train_op'):
     trainable_variables = ops.get_default_graph().get_collection(
         ops.GraphKeys.TRAINABLE_VARIABLES)
     global_step = contrib_framework.get_global_step()
     gradients = self._optimizer.compute_gradients(
         loss=loss, var_list=trainable_variables)
     processed_gradients = self._process_gradients(gradients)
     return self._optimizer.apply_gradients(
         processed_gradients, global_step=global_step)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:11,代码来源:dynamic_rnn_estimator.py


示例8: _model_fn

 def _model_fn(features, targets, mode):
   ops.get_default_graph().add_to_collection('IS_TRAINING', mode == 'train')
   if self.class_weight is not None:
     constant_op.constant(self.class_weight, name='class_weight')
   predictions, loss = model_fn(features, targets)
   if isinstance(self.learning_rate, types.FunctionType):
     learning_rate = self.learning_rate(contrib_framework.get_global_step())
   else:
     learning_rate = self.learning_rate
   if isinstance(self.optimizer, types.FunctionType):
     optimizer = self.optimizer(learning_rate)
   else:
     optimizer = self.optimizer
   train_op = layers.optimize_loss(
       loss,
       contrib_framework.get_global_step(),
       learning_rate=learning_rate,
       optimizer=optimizer,
       clip_gradients=self.clip_gradients)
   return predictions, loss, train_op
开发者ID:AngleFork,项目名称:tensorflow,代码行数:20,代码来源:base.py


示例9: _build_model

 def _build_model(self, data, target):
     ids = tensorflow.split(1, self.n_ids, data)
     node_vectors = [
         learn.ops.categorical_variable(ids[i], self.vocabulary_sizes[i], self.layer_size, str(i))
         for i in range(self.n_ids)
     ]
     activation_in = tensorflow.squeeze(tensorflow.concat(2, node_vectors), [1])
     activation_out = layers.stack(activation_in, layers.fully_connected, self.hidden_units_formation)
     prediction, loss = learn.models.linear_regression(activation_out, target)
     train_op = layers.optimize_loss(loss, framework.get_global_step(), self.learning_rate, "SGD")
     return prediction, loss, train_op
开发者ID:yuchenhou,项目名称:elephant,代码行数:11,代码来源:estimator.py


示例10: _model_fn

  def _model_fn(features, labels, mode):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)

    graph_builder = graph_builder_class(params, device_assigner=device_assigner)
    inference = {}
    if (mode == model_fn_lib.ModeKeys.EVAL or
        mode == model_fn_lib.ModeKeys.INFER):
      inference[eval_metrics.INFERENCE_PROB_NAME] = (
          graph_builder.inference_graph(features))

      if not params.regression:
        inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
            inference[eval_metrics.INFERENCE_PROB_NAME], 1)

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    loss_deps = []
    training_graph = None
    if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              features, labels, input_weights=weights,
              num_trainers=num_trainers,
              trainer_id=trainer_id),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
      loss_deps.append(training_graph)

    training_loss = None
    if (mode == model_fn_lib.ModeKeys.EVAL or
        mode == model_fn_lib.ModeKeys.TRAIN):
      with ops.control_dependencies(loss_deps):
        training_loss = graph_builder.training_loss(
            features, labels, name=LOSS_NAME)
      if report_feature_importances and mode == model_fn_lib.ModeKeys.EVAL:
        training_loss = logging_ops.Print(training_loss,
                                          [graph_builder.feature_importances()],
                                          summarize=1000)
    # Put weights back in
    if weights is not None:
      features[weights_name] = weights

    training_hooks = []
    if early_stopping_rounds:
      training_hooks.append(TensorForestLossHook(early_stopping_rounds))

    return model_fn_lib.ModelFnOps(
        mode=mode,
        predictions=inference,
        loss=training_loss,
        train_op=training_graph,
        training_hooks=training_hooks)
开发者ID:Immexxx,项目名称:tensorflow,代码行数:54,代码来源:random_forest.py


示例11: conv_model

def conv_model(X, Y_, mode):
    XX = tf.reshape(X, [-1, 28, 28, 1])
    biasInit = tf.constant_initializer(0.1, dtype=tf.float32)
    Y1 = layers.conv2d(XX,  num_outputs=6,  kernel_size=[6, 6], biases_initializer=biasInit)
    Y2 = layers.conv2d(Y1, num_outputs=12, kernel_size=[5, 5], stride=2, biases_initializer=biasInit)
    Y3 = layers.conv2d(Y2, num_outputs=24, kernel_size=[4, 4], stride=2, biases_initializer=biasInit)
    Y4 = layers.flatten(Y3)
    Y5 = layers.relu(Y4, 200, biases_initializer=biasInit)
    Ylogits = layers.linear(Y5, 10)
    predict = tf.nn.softmax(Ylogits)
    classes = tf.cast(tf.argmax(predict, 1), tf.uint8)
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(Ylogits, tf.one_hot(Y_, 10)))*100
    train_op = layers.optimize_loss(loss, framework.get_global_step(), 0.001, "Adam")
    return {"predictions":predict, "classes": classes}, loss, train_op
开发者ID:laventura,项目名称:tensorflow-mnist-tutorial,代码行数:14,代码来源:task.py


示例12: _model_fn

  def _model_fn(features, labels, mode):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    keys = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)

    graph_builder = graph_builder_class(params, device_assigner=device_assigner)
    inference = {}
    if (mode == model_fn_lib.ModeKeys.EVAL or
        mode == model_fn_lib.ModeKeys.INFER):
      inference[eval_metrics.INFERENCE_PROB_NAME] = (
          graph_builder.inference_graph(features))

      if not params.regression:
        inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
            inference[eval_metrics.INFERENCE_PROB_NAME], 1)
      if keys:
        inference[KEYS_NAME] = keys

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    loss_deps = []
    training_graph = None
    if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              features, labels, input_weights=weights,
              num_trainers=num_trainers,
              trainer_id=trainer_id),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
      loss_deps.append(training_graph)

    training_loss = None
    if (mode == model_fn_lib.ModeKeys.EVAL or
        mode == model_fn_lib.ModeKeys.TRAIN):
      with ops.control_dependencies(loss_deps):
        training_loss = graph_builder.training_loss(
            features, labels, name=LOSS_NAME)
    # Put weights back in
    if weights is not None:
      features[weights_name] = weights
    return (inference, training_loss, training_graph)
开发者ID:cancan101,项目名称:tensorflow,代码行数:45,代码来源:random_forest.py


示例13: _model_fn

  def _model_fn(features, labels):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    keys = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)
    processed_features, spec = data_ops.ParseDataTensorOrDict(features)
    _assert_float32(processed_features)
    if labels is not None:
      labels = data_ops.ParseLabelTensorOrDict(labels)
      _assert_float32(labels)

    graph_builder = graph_builder_class(params, device_assigner=device_assigner)
    inference = {eval_metrics.INFERENCE_PROB_NAME:
                 graph_builder.inference_graph(processed_features,
                                               data_spec=spec)}
    if not params.regression:
      inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
          inference[eval_metrics.INFERENCE_PROB_NAME], 1)
    if keys:
      inference[KEYS_NAME] = keys

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    training_loss = None
    training_graph = None
    if labels is not None:
      training_loss = graph_builder.training_loss(processed_features, labels,
                                                  data_spec=spec,
                                                  name=LOSS_NAME)
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              processed_features, labels, data_spec=spec,
              input_weights=weights),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
    # Put weights back in
    if weights is not None:
      features[weights_name] = weights
    return (inference, training_loss, training_graph)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:41,代码来源:random_forest.py


示例14: _get_train_ops

  def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.

    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    _, loss = self._model_fn(features, targets, ModeKeys.TRAIN)
    train_op = layers.optimize_loss(
        loss,
        contrib_framework.get_global_step(),
        learning_rate=self.learning_rate,
        optimizer=self.optimizer,
        clip_gradients=self.clip_gradients)
    return train_op, loss
开发者ID:Absarvar,项目名称:tensorflow,代码行数:22,代码来源:estimator.py


示例15: _get_train_ops

  def _get_train_ops(self, features, targets):
    """Method that builds model graph and returns trainer ops.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.

    Returns:
      Tuple of train `Operation` and loss `Tensor`.
    """
    features, _, weights, spec = data_ops.ParseDataTensorOrDict(features)
    labels = data_ops.ParseLabelTensorOrDict(targets)
    features, labels = self._feature_engineering_fn(features, labels)
    _assert_float32(features)
    _assert_float32(labels)

    if weights is not None:
      if 'input_weights' in self.training_args:
        logging.warning('Replacing input_weights in training_args.')
      self.training_args['input_weights'] = weights

    graph_builder = self.graph_builder_class(
        self.params, device_assigner=self.device_assigner,
        **self.construction_args)

    epoch = None
    if self.data_feeder:
      epoch = self.data_feeder.make_epoch_variable()

    train = control_flow_ops.group(
        graph_builder.training_graph(
            features, labels, data_spec=spec, epoch=epoch,
            **self.training_args),
        state_ops.assign_add(contrib_framework.get_global_step(), 1))

    self.training_loss = graph_builder.training_loss(features, targets)

    return train, self.training_loss
开发者ID:KalraA,项目名称:tensorflow,代码行数:38,代码来源:random_forest.py


示例16: model_fn

  def model_fn(features, labels, mode):
    """Builds generic graph for training or eval."""

    # Build a Graph that computes predictions from the inference model.
    logits = inference(features, args.hidden1, args.hidden2)

    tensors = {}
    # Add to the Graph the Ops for loss calculation.
    if mode == ModeKeys.INFER:
      tensors['digit'] = tf.argmax(logits, 1)
      loss_op = None
    else:
      loss_op = loss(logits, labels)
      tensors['loss'] = loss_op
      tf.scalar_summary('loss', loss_op)

    if mode == ModeKeys.EVAL:
      # Add to the Graph the Ops for accuracy calculation.
      accuracy_op = evaluation(logits, labels)
      tensors['accuracy'] = accuracy_op
      tf.scalar_summary('training/hptuning/metric', accuracy_op)

    # Add to the Graph the Ops that calculate and apply gradients.
    if mode == ModeKeys.TRAIN:
      global_step = framework.get_global_step()
      # Create the gradient descent optimizer with the given learning rate.
      optimizer = tf.train.GradientDescentOptimizer(args.learning_rate)
      # Create a variable to track the global step.
      # Use the optimizer to apply the gradients that minimize the loss
      # (and also increment the global step counter) as a single training step.
      train_op = optimizer.minimize(loss_op, global_step=global_step)
      # Add streaming means.
    else:
      train_op = None

    return tensors, loss_op, train_op
开发者ID:ccortezb,项目名称:pipeline,代码行数:36,代码来源:model.py


示例17: _model_fn

  def _model_fn(features, labels, mode):
    """Function that returns predictions, training loss, and training op."""
    weights = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)

    keys = None
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)

    # If we're doing eval, optionally ignore device_assigner.
    # Also ignore device assigner if we're exporting (mode == INFER)
    dev_assn = device_assigner
    if (mode == model_fn_lib.ModeKeys.INFER or
        (local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
      dev_assn = None

    graph_builder = graph_builder_class(params,
                                        device_assigner=dev_assn)
    inference = {}
    output_alternatives = None
    if (mode == model_fn_lib.ModeKeys.EVAL or
        mode == model_fn_lib.ModeKeys.INFER):
      inference[eval_metrics.INFERENCE_PROB_NAME] = (
          graph_builder.inference_graph(features))

      if params.regression:
        predictions = {
            None: inference[eval_metrics.INFERENCE_PROB_NAME]}
        output_alternatives = {
            None: (constants.ProblemType.LINEAR_REGRESSION, predictions)}
      else:
        inference[eval_metrics.INFERENCE_PRED_NAME] = math_ops.argmax(
            inference[eval_metrics.INFERENCE_PROB_NAME], 1)

        predictions = {
            prediction_key.PredictionKey.PROBABILITIES:
                inference[eval_metrics.INFERENCE_PROB_NAME],
            prediction_key.PredictionKey.CLASSES:
                inference[eval_metrics.INFERENCE_PRED_NAME]}
        output_alternatives = {
            None: (constants.ProblemType.CLASSIFICATION, predictions)}

      if keys is not None:
        inference[keys_name] = keys

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    loss_deps = []
    training_graph = None
    training_hooks = []
    scaffold = None
    if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
      training_graph = control_flow_ops.group(
          graph_builder.training_graph(
              features, labels, input_weights=weights,
              num_trainers=num_trainers,
              trainer_id=trainer_id),
          state_ops.assign_add(contrib_framework.get_global_step(), 1))
      loss_deps.append(training_graph)

    training_loss = None
    if (mode == model_fn_lib.ModeKeys.EVAL or
        mode == model_fn_lib.ModeKeys.TRAIN):
      with ops.control_dependencies(loss_deps):
        training_loss = graph_builder.training_loss(
            features, labels, name=LOSS_NAME)

    # Put weights back in
    if weights is not None:
      features[weights_name] = weights

    if early_stopping_rounds:
      training_hooks.append(TensorForestLossHook(early_stopping_rounds,
                                                 loss_op=training_loss))

    if report_feature_importances:
      training_hooks.append(TensorForestRunOpAtEndHook(
          {'feature_importances': graph_builder.feature_importances()}))

    return model_fn_lib.ModelFnOps(
        mode=mode,
        predictions=inference,
        loss=training_loss,
        train_op=training_graph,
        training_hooks=training_hooks,
        scaffold=scaffold,
        output_alternatives=output_alternatives)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:88,代码来源:random_forest.py


示例18: optimize_loss

def optimize_loss(loss,
                  global_step,
                  learning_rate,
                  optimizer,
                  gradient_noise_scale=None,
                  gradient_multipliers=None,
                  clip_gradients=None,
                  learning_rate_decay_fn=None,
                  update_ops=None,
                  variables=None,
                  name=None,
                  summaries=None,
                  colocate_gradients_with_ops=False,
                  increment_global_step=True):
  """Given loss and parameters for optimizer, returns a training op.

  Various ways of passing optimizers, include:

  - string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES
      for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
  - function, takes learning rate `Tensor` as argument and must return
      `Optimizer` instance. E.g. `optimize_loss(...,
      optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
    Alternatively, if `learning_rate` is `None`, the function takes no
    arguments. E.g. `optimize_loss(..., learning_rate=None,
      optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
  - class, subclass of `Optimizer` that takes only one required argument -
      learning rate, such as AdamOptimizer, AdagradOptimizer.
      E.g. `optimize_loss(..., optimizer=tf.train.AdagradOptimizer)`.
  - object, instance of subclass of `Optimizer`.
      E.g., `optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.

  Args:
    loss: Scalar `Tensor`.
    global_step: Scalar int `Tensor`, step counter to update on each step
                 unless `increment_global_step` is `False`. If not supplied,
                 it will be fetched from the default graph (see
                 `tf.train.get_global_step` for details). If it's
                 not been created, no step will be incremented with each weight
                 update. `learning_rate_decay_fn` requires `global_step`.
    learning_rate: float or `Tensor`, magnitude of update per each training
                   step. Can be `None`.
    optimizer: string, class or optimizer instance, used as trainer.
               string should be name of optimizer, like 'SGD',
                 'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
               class should be sub-class of `tf.Optimizer` that implements
                 `compute_gradients` and `apply_gradients` functions.
               optimizer instance should be instantiation of `tf.Optimizer`
                 sub-class and have `compute_gradients` and `apply_gradients`
                 functions.
    gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
                          value.
    gradient_multipliers: dict of variables or variable names to floats.
                          If present, gradients for specified
                          variables will be multiplied by given constant.
    clip_gradients: float, callable or `None`. If float, is provided, a global
      clipping is applied to prevent the norm of the gradient to exceed this
      value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
      This callable takes a `list` of `(gradients, variables)` `tuple`s and
      returns the same thing with the gradients modified.
    learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
                            `Tensor`s, returns `Tensor`.
                            Can be used to implement any learning rate decay
                            functions.
                            For example: `tf.train.exponential_decay`.
                            Ignored if `learning_rate` is not supplied.
    update_ops: list of update `Operation`s to execute at each step. If `None`,
                uses elements of UPDATE_OPS collection. The order of execution
                between `update_ops` and `loss` is non-deterministic.
    variables: list of variables to optimize or
               `None` to use all trainable variables.
    name: The name for this operation is used to scope operations and summaries.
    summaries: List of internal quantities to visualize on tensorboard. If not
               set only the loss and the learning rate will be reported. The
               complete list is in OPTIMIZER_SUMMARIES.
    colocate_gradients_with_ops: If True, try colocating gradients with the
                                 corresponding op.
    increment_global_step: Whether to increment `global_step`. If your model
      calls `optimize_loss` multiple times per training step (e.g. to optimize
      different parts of the model), use this arg to avoid incrementing
      `global_step` more times than necessary.

  Returns:
    Training op.

  Raises:
    ValueError: if:
        * `loss` is an invalid type or shape.
        * `global_step` is an invalid type or shape.
        * `learning_rate` is an invalid type or value.
        * `optimizer` is wrong type.
        * `clip_gradients` is not float or callable.
        * `learning_rate` and `learning_rate_decay_fn` are supplied, but no
          `global_step` is available.
        * `gradients` is empty
  """
  loss = ops.convert_to_tensor(loss)
  contrib_framework.assert_scalar(loss)
  if global_step is None:
    global_step = contrib_framework.get_global_step()
#.........这里部分代码省略.........
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:101,代码来源:optimizers.py


示例19: conv_model_train_op

def conv_model_train_op(loss, mode):
    return layers.optimize_loss(loss, framework.get_global_step(), learning_rate=0.003, optimizer="Adam",
        # to remove learning rate decay, comment the next line
        learning_rate_decay_fn=lambda lr, step: 0.0001 + tf.train.exponential_decay(lr, step, -2000, math.e)
        ) if mode == learn.ModeKeys.TRAIN else None
开发者ID:spwcd,项目名称:QTML,代码行数:5,代码来源:task.py


示例20: _model_fn

  def _model_fn(features, labels, mode):
    """Function that returns predictions, training loss, and training op."""
    if (isinstance(features, ops.Tensor) or
        isinstance(features, sparse_tensor.SparseTensor)):
      features = {'features': features}
    weights = None
    if weights_name and weights_name in features:
      weights = features.pop(weights_name)

    keys = None
    if keys_name and keys_name in features:
      keys = features.pop(keys_name)

    # If we're doing eval, optionally ignore device_assigner.
    # Also ignore device assigner if we're exporting (mode == INFER)
    dev_assn = device_assigner
    if (mode == model_fn_lib.ModeKeys.INFER or
        (local_eval and mode == model_fn_lib.ModeKeys.EVAL)):
      dev_assn = None

    graph_builder = graph_builder_class(params,
                                        device_assigner=dev_assn)

    logits, tree_paths, regression_variance = graph_builder.inference_graph(
        features)

    summary.scalar('average_tree_size', graph_builder.average_size())
    # For binary classification problems, convert probabilities to logits.
    # Includes hack to get around the fact that a probability might be 0 or 1.
    if not params.regression and params.num_classes == 2:
      class_1_probs = array_ops.slice(logits, [0, 1], [-1, 1])
      logits = math_ops.log(
          math_ops.maximum(class_1_probs / math_ops.maximum(
              1.0 - class_1_probs, EPSILON), EPSILON))

    # labels might be None if we're doing prediction (which brings up the
    # question of why we force everything to adhere to a single model_fn).
    training_graph = None
    training_hooks = []
    if labels is not None and mode == model_fn_lib.ModeKeys.TRAIN:
      with ops.control_dependencies([logits.op]):
        training_graph = control_flow_ops.group(
            graph_builder.training_graph(
                features, labels, input_weights=weights,
                num_trainers=num_trainers,
                trainer_id=trainer_id),
            state_ops.assign_add(contrib_framework.get_global_step(), 1))

    # Put weights back in
    if weights is not None:
      features[weights_name] = weights

    # TensorForest's training graph isn't calculated directly from the loss
    # like many other models.
    def _train_fn(unused_loss):
      return training_graph

    model_ops = model_head.create_model_fn_ops(
        features=features,
        labels=labels,
        mode=mode,
        train_op_fn=_train_fn,
        logits=logits,
        scope=head_scope)

    # Ops are run in lexigraphical order of their keys. Run the resource
    # clean-up op last.
    all_handles = graph_builder.get_all_resource_handles()
    ops_at_end = {
        '9: clean up resources': control_flow_ops.group(
            *[resource_variable_ops.destroy_resource_op(handle)
              for handle in all_handles])}

    if report_feature_importances:
      ops_at_end['1: feature_importances'] = (
          graph_builder.feature_importances())

    training_hooks.append(TensorForestRunOpAtEndHook(ops_at_end))

    if early_stopping_rounds:
      training_hooks.append(
          TensorForestLossHook(
              early_stopping_rounds,
              early_stopping_loss_threshold=early_stopping_loss_threshold,
              loss_op=model_ops.loss))

    model_ops.training_hooks.extend(training_hooks)

    if keys is not None:
      model_ops.predictions[keys_name] = keys

    if params.inference_tree_paths:
      model_ops.predictions[TREE_PATHS_PREDICTION_KEY] = tree_paths

    if params.regression:
      model_ops.predictions[VARIANCE_PREDICTION_KEY] = regression_variance

    return model_ops
开发者ID:rmcguinness,项目名称:tensorflow,代码行数:98,代码来源:random_forest.py



注:本文中的tensorflow.contrib.framework.get_global_step函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python framework.is_tensor函数代码示例发布时间:2022-05-27
下一篇:
Python framework.create_global_step函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap