• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python metrics.streaming_mean函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.metrics.streaming_mean函数的典型用法代码示例。如果您正苦于以下问题:Python streaming_mean函数的具体用法?Python streaming_mean怎么用?Python streaming_mean使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了streaming_mean函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: _make_logistic_eval_metric_ops

def _make_logistic_eval_metric_ops(labels, predictions, thresholds):
  """Returns a dictionary of evaluation metric ops for logistic regression.

  Args:
    labels: The labels `Tensor`, or a dict with only one `Tensor` keyed by name.
    predictions: The predictions `Tensor`.
    thresholds: List of floating point thresholds to use for accuracy,
      precision, and recall metrics.

  Returns:
    A dict of metric results keyed by name.
  """
  # If labels is a dict with a single key, unpack into a single tensor.
  labels_tensor = labels
  if isinstance(labels, dict) and len(labels) == 1:
    labels_tensor = labels.values()[0]

  metrics = {}
  metrics[metric_key.MetricKey.PREDICTION_MEAN] = metrics_lib.streaming_mean(
      predictions)
  metrics[metric_key.MetricKey.LABEL_MEAN] = metrics_lib.streaming_mean(
      labels_tensor)
  # Also include the streaming mean of the label as an accuracy baseline, as
  # a reminder to users.
  metrics[metric_key.MetricKey.ACCURACY_BASELINE] = metrics_lib.streaming_mean(
      labels_tensor)

  metrics[metric_key.MetricKey.AUC] = metrics_lib.streaming_auc(
      labels=labels_tensor, predictions=predictions)

  for threshold in thresholds:
    predictions_at_threshold = math_ops.cast(
        math_ops.greater_equal(predictions, threshold),
        dtypes.float32,
        name='predictions_at_threshold_%f' % threshold)
    metrics[metric_key.MetricKey.ACCURACY_MEAN % threshold] = (
        metrics_lib.streaming_accuracy(labels=labels_tensor,
                                       predictions=predictions_at_threshold))
    # Precision for positive examples.
    metrics[metric_key.MetricKey.PRECISION_MEAN % threshold] = (
        metrics_lib.streaming_precision(labels=labels_tensor,
                                        predictions=predictions_at_threshold))
    # Recall for positive examples.
    metrics[metric_key.MetricKey.RECALL_MEAN % threshold] = (
        metrics_lib.streaming_recall(labels=labels_tensor,
                                     predictions=predictions_at_threshold))

  return metrics
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:48,代码来源:logistic_regressor.py


示例2: _get_eval_ops

  def _get_eval_ops(self, features, targets, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metrics to run. If None, the default metric functions
        are used; if {}, no metrics are used. Otherwise, `metrics` should map
        friendly names for the metric to a `MetricSpec` object defining which
        model outputs to evaluate against which targets with which metric
        function. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        `../../../../metrics/python/metrics/ops/streaming_metrics.py` and
        `../metric_spec.py`.

    Returns:
      metrics: `dict` of `Tensor` objects.

    Raises:
      ValueError: if `metrics` don't match `targets`.
    """
    predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL)
    result = {'loss': metrics_lib.streaming_mean(loss)}
    result.update(_make_metrics_ops(metrics, features, targets, predictions))
    return result
开发者ID:Nishant23,项目名称:tensorflow,代码行数:29,代码来源:estimator.py


示例3: _get_eval_ops

 def _get_eval_ops(self, features, targets, metrics=None):
   logits = self._model.build_model(
       features, self._feature_columns, is_training=False)
   model_fn_ops = self._head.head_ops(features, targets,
                                      tf.contrib.learn.ModeKeys.TRAIN,
                                      _noop_training_fn, logits=logits)
   return {'loss': metrics_lib.streaming_mean(model_fn_ops.loss)}
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:7,代码来源:composable_model_test.py


示例4: get_eval_ops

 def get_eval_ops(self, features, logits, targets, metrics=None):
     loss = self.loss(logits, targets, features)
     result = {"loss": metrics_lib.streaming_mean(loss)}
     if metrics:
         predictions = self.logits_to_predictions(logits, proba=False)
         result.update(_run_metrics(predictions, targets, metrics, self.get_weight_tensor(features)))
     return result
开发者ID:sathishreddy,项目名称:tensorflow,代码行数:7,代码来源:target_column.py


示例5: _get_eval_ops

  def _get_eval_ops(self, features, targets, metrics=None):
    """See base class."""
    logits = self._logits(features)
    result = {"loss": metrics_lib.streaming_mean(self._loss(
        logits, targets, features))}

    # Adds default metrics.
    if metrics is None:
      # TODO(b/29366811): This currently results in both an "accuracy" and an
      # "accuracy/threshold_0.500000_mean" metric for binary classification.
      metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}

    # Adds additional useful metrics for the special case of binary
    # classification.
    # TODO(zakaria): Move LogisticRegressor.get_default_metrics to metrics
    #   and handle eval metric from targetcolumn.
    if self._target_column.num_label_columns == 1:
      predictions = math_ops.sigmoid(logits)
      targets_float = math_ops.to_float(targets)
      default_metrics = (
          logistic_regressor.LogisticRegressor.get_default_metrics())
      for metric_name, metric_op in default_metrics.items():
        result[metric_name] = metric_op(predictions, targets_float)

    if metrics:
      class_metrics = {}
      proba_metrics = {}
      for name, metric_op in six.iteritems(metrics):
        if isinstance(name, tuple):
          if len(name) != 2:
            raise ValueError("Ignoring metric {}. It returned a tuple with "
                             "len {}, expected 2.".format(name, len(name)))
          else:
            if name[1] not in ["classes", "probabilities"]:
              raise ValueError("Ignoring metric {}. The 2nd element of its "
                               "name should be either 'classes' or "
                               "'probabilities'.".format(name))
            elif name[1] == "classes":
              class_metrics[name[0]] = metric_op
            else:
              proba_metrics[name[0]] = metric_op
        elif isinstance(name, str):
          class_metrics[name] = metric_op
        else:
          raise ValueError("Ignoring metric {}. Its name is not in the correct "
                           "form.".format(name))
      if class_metrics:
        predictions = self._target_column.logits_to_predictions(logits,
                                                                proba=False)
        result.update(self._run_metrics(predictions, targets, class_metrics,
                                        self._target_column.get_weight_tensor(
                                            features)))
      if proba_metrics:
        predictions = self._target_column.logits_to_predictions(logits,
                                                                proba=True)
        result.update(self._run_metrics(predictions, targets, proba_metrics,
                                        self._target_column.get_weight_tensor(
                                            features)))

    return result
开发者ID:Brandon-Tai,项目名称:tensorflow,代码行数:60,代码来源:dnn_linear_combined.py


示例6: _get_eval_ops

    def _get_eval_ops(self, features, targets, metrics):
        """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      targets: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metric ops to run. If None, the default metric functions
        are used; if {}, no metrics are used. If model has one output (i.e.,
        returning single predction), keys are `str`, e.g. `'accuracy'` - just a
        name of the metric that will show up in the logs / summaries.
        Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')`
        - name of the metric and name of `Tensor` in the predictions to run
        this metric on. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        ../../../../metrics/python/metrics/ops/streaming_metrics.py.

    Returns:
      metrics: `dict` of `Tensor` objects.

    Raises:
      ValueError: if `metrics` don't match `targets`.
    """
        predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL)
        result = {"loss": metrics_lib.streaming_mean(loss)}

        weights = self._get_weight_tensor(features)
        metrics = metrics or {}
        if isinstance(targets, dict) and len(targets) == 1:
            # Unpack single target into just tensor.
            targets = targets[list(targets.keys())[0]]
        for name, metric in six.iteritems(metrics):
            if isinstance(name, tuple):
                # Multi-head metrics.
                if not isinstance(predictions, dict):
                    raise ValueError(
                        "Metrics passed provide (name, prediction), "
                        "but predictions are not dict. "
                        "Metrics: %s, Predictions: %s." % (metrics, predictions)
                    )
                # Here are two options: targets are single Tensor or a dict.
                if isinstance(targets, dict) and name[1] in targets:
                    # If targets are dict and the prediction name is in it, apply metric.
                    result[name[0]] = metrics_lib.run_metric(metric, predictions[name[1]], targets[name[1]], weights)
                else:
                    # Otherwise pass the targets to the metric.
                    result[name[0]] = metrics_lib.run_metric(metric, predictions[name[1]], targets, weights)
            else:
                # Single head metrics.
                if isinstance(predictions, dict):
                    raise ValueError(
                        "Metrics passed provide only name, no prediction, "
                        "but predictions are dict. "
                        "Metrics: %s, Targets: %s." % (metrics, targets)
                    )
                result[name] = metrics_lib.run_metric(metric, predictions, targets, weights)
        return result
开发者ID:MrRabbit0o0,项目名称:tensorflow,代码行数:60,代码来源:estimator.py


示例7: _streaming_weighted_average_loss

 def _streaming_weighted_average_loss(predictions, target, weights=None):
   loss_unweighted = loss_fn(predictions, target)
   if weights is not None:
     weights = math_ops.to_float(weights)
   _, weighted_average_loss = _loss(loss_unweighted,
                                    weights,
                                    name="eval_loss")
   return metrics_lib.streaming_mean(weighted_average_loss)
开发者ID:caikehe,项目名称:tensorflow,代码行数:8,代码来源:head.py


示例8: _class_predictions_streaming_mean

 def _class_predictions_streaming_mean(
     predictions, labels, weights=None, class_id=None):
   del labels
   return metrics_lib.streaming_mean(
       array_ops.where(
           math_ops.equal(
               math_ops.to_int32(class_id),
               math_ops.to_int32(predictions)),
           array_ops.ones_like(predictions),
           array_ops.zeros_like(predictions)),
       weights=weights)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:11,代码来源:head.py


示例9: _get_eval_ops

    def _get_eval_ops(self, features, targets, metrics=None):
        """See base class."""
        logits = self._logits(features)
        result = {"loss": metrics_lib.streaming_mean(self._loss(logits, targets, features))}

        if metrics:
            predictions = self._target_column.logits_to_predictions(logits, proba=False)
            result.update(
                self._run_metrics(predictions, targets, metrics, self._target_column.get_weight_tensor(features))
            )

        return result
开发者ID:285219011,项目名称:liuwenfeng,代码行数:12,代码来源:dnn_linear_combined.py


示例10: _get_eval_ops

  def _get_eval_ops(self, features, targets, metrics=None):
    """See base class."""
    logits = self._logits(features)
    result = {"loss": metrics_lib.streaming_mean(self._loss(
        logits, targets,
        weight_tensor=self._get_weight_tensor(features)))}

    # Adding default metrics
    if metrics is None:
      metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}

    if self._n_classes == 2:
      predictions = math_ops.sigmoid(logits)
      result["auc"] = metrics_lib.streaming_auc(predictions, targets)

    if metrics:
      class_metrics = {}
      proba_metrics = {}
      for name, metric_op in six.iteritems(metrics):
        if isinstance(name, tuple):
          if len(name) != 2:
            raise ValueError("Ignoring metric {}. It returned a tuple with "
                             "len {}, expected 2.".format(name, len(name)))
          else:
            if name[1] not in ["classes", "probabilities"]:
              raise ValueError("Ignoring metric {}. The 2nd element of its "
                               "name should be either 'classes' or "
                               "'probabilities'.".format(name))
            elif name[1] == "classes":
              class_metrics[name[0]] = metric_op
            else:
              proba_metrics[name[0]] = metric_op
        elif isinstance(name, str):
          class_metrics[name] = metric_op
        else:
          raise ValueError("Ignoring metric {}. Its name is not in the correct "
                           "form.".format(name))

      if class_metrics:
        predictions = self._logits_to_predictions(logits, proba=False)
        result.update(self._run_metrics(predictions, targets, class_metrics,
                                        self._get_weight_tensor(features)))
      if proba_metrics:
        predictions = self._logits_to_predictions(logits, proba=True)
        result.update(self._run_metrics(predictions, targets, proba_metrics,
                                        self._get_weight_tensor(features)))

    return result
开发者ID:Ambier,项目名称:tensorflow,代码行数:48,代码来源:dnn_linear_combined.py


示例11: get_eval_ops

  def get_eval_ops(self, features, logits, targets, metrics=None):
    loss = self.loss(logits, targets, features)
    result = {"loss": metrics_lib.streaming_mean(loss)}

    # Adds default metrics.
    if metrics is None:
      # TODO(b/29366811): This currently results in both an "accuracy" and an
      # "accuracy/threshold_0.500000_mean" metric for binary classification.
      metrics = {("accuracy", "classes"): metrics_lib.streaming_accuracy}

    predictions = math_ops.sigmoid(logits)
    targets_float = math_ops.to_float(targets)

    default_metrics = self._default_eval_metrics()
    for metric_name, metric_op in default_metrics.items():
      result[metric_name] = metric_op(predictions, targets_float)

    class_metrics = {}
    proba_metrics = {}
    for name, metric_op in six.iteritems(metrics):
      if isinstance(name, tuple):
        if len(name) != 2:
          raise ValueError("Ignoring metric {}. It returned a tuple with "
                           "len {}, expected 2.".format(name, len(name)))
        else:
          if name[1] not in ["classes", "probabilities"]:
            raise ValueError("Ignoring metric {}. The 2nd element of its "
                             "name should be either 'classes' or "
                             "'probabilities'.".format(name))
          elif name[1] == "classes":
            class_metrics[name[0]] = metric_op
          else:
            proba_metrics[name[0]] = metric_op
      elif isinstance(name, str):
        class_metrics[name] = metric_op
      else:
        raise ValueError("Ignoring metric {}. Its name is not in the correct "
                         "form.".format(name))
    if class_metrics:
      class_predictions = self.logits_to_predictions(logits, proba=False)
      result.update(_run_metrics(class_predictions, targets,
                                 class_metrics,
                                 self.get_weight_tensor(features)))
    if proba_metrics:
      predictions = self.logits_to_predictions(logits, proba=True)
      result.update(_run_metrics(predictions, targets, proba_metrics,
                                 self.get_weight_tensor(features)))
    return result
开发者ID:ckchow,项目名称:tensorflow,代码行数:48,代码来源:target_column.py


示例12: _evaluate_model

    def _evaluate_model(self, input_fn, hooks=None, checkpoint_path=None, name=''):
        # Check that model has been trained (if nothing has been set explicitly).
        if not checkpoint_path:
            latest_path = saver.latest_checkpoint(self._model_dir)
            if not latest_path:
                error_message = "Could not find trained model at {}.".format(self._model_dir)
                raise EstimatorNotTrainedError(error_message)
            checkpoint_path = latest_path

        # Setup output directory.
        eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name)

        with ops.Graph().as_default() as g:
            random_seed.set_random_seed(self._config.tf_random_seed)
            global_step = training.create_global_step(g)
            features, labels = input_fn()

            estimator_spec = self._call_model_fn(features, labels, Modes.EVAL)
            if MetricKeys.LOSS in estimator_spec.eval_metric_ops:
                raise ValueError("Metric with name `{}` is not allowed, because Estimator "
                                 "already defines a default metric "
                                 "with the same name.".format(MetricKeys.LOSS))
            estimator_spec.eval_metric_ops[
                MetricKeys.LOSS] = metrics_lib.streaming_mean(estimator_spec.loss)
            update_op, eval_dict = self._extract_metric_update_ops(estimator_spec.eval_metric_ops)

            if ops.GraphKeys.GLOBAL_STEP in eval_dict:
                raise ValueError("Metric with name `global_step` is not allowed, because "
                                 "Estimator already defines a default metric with the same name.")
            eval_dict[ops.GraphKeys.GLOBAL_STEP] = global_step

            eval_results = evaluation._evaluate_once(
                checkpoint_path=checkpoint_path,
                master=self._config.evaluation_master,
                scaffold=estimator_spec.scaffold,
                eval_ops=update_op,
                final_ops=eval_dict,
                hooks=hooks,
                config=self._session_config)

            self._write_dict_to_summary(
                output_dir=eval_dir,
                dictionary=eval_results,
                current_global_step=eval_results[ops.GraphKeys.GLOBAL_STEP])

            return eval_results
开发者ID:AlexMikhalev,项目名称:polyaxon,代码行数:46,代码来源:estimator.py


示例13: _get_eval_ops

  def _get_eval_ops(self, features, targets, metrics=None):
    """See base class."""
    logits = self._logits(features)
    result = {"loss": metrics_lib.streaming_mean(self._loss(
        logits, targets,
        weight_tensor=self._get_weight_tensor(features)))}

    # Adding default metrics
    if metrics is None:
      metrics = {"accuracy": metrics_lib.streaming_accuracy}

    if self._n_classes == 2:
      predictions = math_ops.sigmoid(logits)
      result["eval_auc"] = metrics_lib.streaming_auc(predictions, targets)

    if metrics:
      predictions = self._logits_to_predictions(logits, proba=False)
      result.update(self._run_metrics(predictions, targets, metrics,
                                      self._get_weight_tensor(features)))

    return result
开发者ID:MISingularity,项目名称:tensorflow,代码行数:21,代码来源:dnn_linear_combined.py


示例14: _get_eval_ops

  def _get_eval_ops(self, features, labels, metrics):
    """Method that builds model graph and returns evaluation ops.

    Expected to be overriden by sub-classes that require custom support.
    This implementation uses `model_fn` passed as parameter to constructor to
    build model.

    Args:
      features: `Tensor` or `dict` of `Tensor` objects.
      labels: `Tensor` or `dict` of `Tensor` objects.
      metrics: Dict of metrics to run. If None, the default metric functions
        are used; if {}, no metrics are used. Otherwise, `metrics` should map
        friendly names for the metric to a `MetricSpec` object defining which
        model outputs to evaluate against which labels with which metric
        function. Metric ops should support streaming, e.g., returning
        update_op and value tensors. See more details in
        `../../../../metrics/python/metrics/ops/streaming_metrics.py` and
        `../metric_spec.py`.

    Returns:
      `ModelFnOps` object.

    Raises:
      ValueError: if `metrics` don't match `labels`.
    """
    model_fn_ops = self._call_model_fn(
        features, labels, model_fn_lib.ModeKeys.EVAL)

    # Custom metrics should overwrite defaults.
    if metrics:
      model_fn_ops.eval_metric_ops.update(_make_metrics_ops(
          metrics, features, labels, model_fn_ops.predictions))

    if metric_key.MetricKey.LOSS not in model_fn_ops.eval_metric_ops:
      model_fn_ops.eval_metric_ops[metric_key.MetricKey.LOSS] = (
          metrics_lib.streaming_mean(model_fn_ops.loss))
    return model_fn_ops
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:37,代码来源:estimator.py


示例15: _target_streaming_mean

def _target_streaming_mean(unused_predictions, target, weights=None):
  return metrics_lib.streaming_mean(target, weights=weights)
开发者ID:caikehe,项目名称:tensorflow,代码行数:2,代码来源:head.py


示例16: _indicator_labels_streaming_mean

def _indicator_labels_streaming_mean(
    predictions, labels, weights=None, class_id=None):
  del predictions
  if class_id is not None:
    labels = labels[:, class_id]
  return metrics_lib.streaming_mean(labels, weights=weights)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:6,代码来源:head.py


示例17: _get_eval_ops

 def _get_eval_ops(self, features, targets, metrics=None):
   logits = self._model.build_model(
       features, self._feature_columns, is_training=False)
   loss = self._target_column.loss(logits, targets, features)
   return {'loss': metrics_lib.streaming_mean(loss)}
开发者ID:2020zyc,项目名称:tensorflow,代码行数:5,代码来源:composable_model_test.py


示例18: _predictions_streaming_mean

def _predictions_streaming_mean(
    predictions, labels, weights=None, class_id=None):
  del labels
  if class_id is not None:
    predictions = predictions[:, class_id]
  return metrics_lib.streaming_mean(predictions, weights=weights)
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:6,代码来源:head.py


示例19: _predictions_streaming_mean

def _predictions_streaming_mean(predictions, unused_labels):
  return metrics_lib.streaming_mean(predictions)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:2,代码来源:logistic_regressor.py


示例20: _labels_streaming_mean

def _labels_streaming_mean(unused_predictions, labels):
  return metrics_lib.streaming_mean(labels)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:2,代码来源:logistic_regressor.py



注:本文中的tensorflow.contrib.metrics.streaming_mean函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python metric_ops.streaming_mean函数代码示例发布时间:2022-05-27
下一篇:
Python loss_ops.log_loss函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap