• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python feature_column.numeric_column函数代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.python.feature_column.feature_column.numeric_column函数的典型用法代码示例。如果您正苦于以下问题:Python numeric_column函数的具体用法?Python numeric_column怎么用?Python numeric_column使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。



在下文中一共展示了numeric_column函数的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: test_weight_column_should_not_be_used_as_feature

 def test_weight_column_should_not_be_used_as_feature(self):
   with self.assertRaisesRegexp(ValueError,
                                'weight_column should not be used as feature'):
     parsing_utils.classifier_parse_example_spec(
         feature_columns=[fc.numeric_column('a')],
         label_key='b',
         weight_column=fc.numeric_column('a'))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:parsing_utils_test.py


示例2: test_parse_features

  def test_parse_features(self):
    """Tests the various behaviours of kmeans._parse_features_if_necessary."""

    # No-op if a tensor is passed in.
    features = constant_op.constant(self.points)
    parsed_features = kmeans_lib._parse_features_if_necessary(features, None)
    self.assertAllEqual(features, parsed_features)

    # All values from a feature dict are transformed into a tensor.
    feature_dict = {
        'x': [[point[0]] for point in self.points],
        'y': [[point[1]] for point in self.points]
    }
    parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
        feature_dict, None)
    self._parse_feature_dict_helper(features, parsed_feature_dict)

    # Only the feature_columns of a feature dict are transformed into a tensor.
    feature_dict_with_extras = {
        'foo': 'bar',
        'x': [[point[0]] for point in self.points],
        'baz': {'fizz': 'buzz'},
        'y': [[point[1]] for point in self.points]
    }
    feature_columns = [fc.numeric_column(key='x'), fc.numeric_column(key='y')]
    parsed_feature_dict = kmeans_lib._parse_features_if_necessary(
        feature_dict_with_extras, feature_columns)
    self._parse_feature_dict_helper(features, parsed_feature_dict)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:28,代码来源:kmeans_test.py


示例3: test_multi_feature_column

  def test_multi_feature_column(self):
    # Create checkpoint: num_inputs=2, hidden_units=(2, 2), num_outputs=1.
    global_step = 100
    _create_checkpoint((
        (((1., 2.), (3., 4.),), (5., 6.)),
        (((7., 8.), (9., 8.),), (7., 6.)),
        (((5.,), (4.,),), (3.,))
    ), global_step, self._model_dir)

    # Create DNNRegressor and evaluate.
    dnn_regressor = dnn.DNNRegressor(
        hidden_units=(2, 2),
        feature_columns=(feature_column.numeric_column('age'),
                         feature_column.numeric_column('height')),
        model_dir=self._model_dir)
    input_fn = numpy_io.numpy_input_fn(
        x={'age': np.array(((20,), (40,))), 'height': np.array(((4,), (8,)))},
        y=np.array(((213.,), (421.,))),
        batch_size=2,
        shuffle=False)
    self.assertAllClose({
        # TODO(ptucker): Point to tool for calculating a neural net output?
        # predictions = 7315, 13771
        # loss = ((213-7315)^2 + (421-13771)^2) / 2 = 228660896
        metric_keys.MetricKeys.LOSS: 228660896.,
        # average_loss = loss / 2 = 114330452
        metric_keys.MetricKeys.LOSS_MEAN: 114330452.,
        ops.GraphKeys.GLOBAL_STEP: global_step
    }, dnn_regressor.evaluate(input_fn=input_fn, steps=1))
开发者ID:lldavuull,项目名称:tensorflow,代码行数:29,代码来源:dnn_test.py


示例4: _get_estimator

  def _get_estimator(self,
                     train_distribute,
                     eval_distribute,
                     remote_cluster=None):
    input_dimension = LABEL_DIMENSION
    linear_feature_columns = [
        feature_column.numeric_column("x", shape=(input_dimension,))
    ]
    dnn_feature_columns = [
        feature_column.numeric_column("x", shape=(input_dimension,))
    ]

    return dnn_linear_combined.DNNLinearCombinedRegressor(
        linear_feature_columns=linear_feature_columns,
        dnn_hidden_units=(2, 2),
        dnn_feature_columns=dnn_feature_columns,
        label_dimension=LABEL_DIMENSION,
        model_dir=self._model_dir,
        dnn_optimizer=adagrad.AdagradOptimizer(0.001),
        linear_optimizer=adagrad.AdagradOptimizer(0.001),
        config=run_config_lib.RunConfig(
            experimental_distribute=DistributeConfig(
                train_distribute=train_distribute,
                eval_distribute=eval_distribute,
                remote_cluster=remote_cluster)))
开发者ID:abhinav-upadhyay,项目名称:tensorflow,代码行数:25,代码来源:estimator_training_test.py


示例5: test_complete_flow_with_mode

  def test_complete_flow_with_mode(self, distribution):
    label_dimension = 2
    input_dimension = label_dimension
    batch_size = 10
    data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
    data = data.reshape(batch_size, label_dimension)
    train_input_fn = self.dataset_input_fn(
        x={'x': data},
        y=data,
        batch_size=batch_size // len(distribution.worker_devices),
        shuffle=True)
    eval_input_fn = self.dataset_input_fn(
        x={'x': data},
        y=data,
        batch_size=batch_size // len(distribution.worker_devices),
        shuffle=False)
    predict_input_fn = numpy_io.numpy_input_fn(
        x={'x': data}, batch_size=batch_size, shuffle=False)

    linear_feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    dnn_feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    feature_columns = linear_feature_columns + dnn_feature_columns
    estimator = dnn_linear_combined.DNNLinearCombinedRegressor(
        linear_feature_columns=linear_feature_columns,
        dnn_hidden_units=(2, 2),
        dnn_feature_columns=dnn_feature_columns,
        label_dimension=label_dimension,
        model_dir=self._model_dir,
        # TODO(isaprykin): Work around the colocate_with error.
        dnn_optimizer=adagrad.AdagradOptimizer(0.001),
        linear_optimizer=adagrad.AdagradOptimizer(0.001),
        config=run_config.RunConfig(
            train_distribute=distribution, eval_distribute=distribution))

    num_steps = 10
    estimator.train(train_input_fn, steps=num_steps)

    scores = estimator.evaluate(eval_input_fn)
    self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
    self.assertIn('loss', six.iterkeys(scores))

    predictions = np.array([
        x[prediction_keys.PredictionKeys.PREDICTIONS]
        for x in estimator.predict(predict_input_fn)
    ])
    self.assertAllEqual((batch_size, label_dimension), predictions.shape)

    feature_spec = feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = estimator.export_savedmodel(tempfile.mkdtemp(),
                                             serving_input_receiver_fn)
    self.assertTrue(gfile.Exists(export_dir))
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:57,代码来源:estimator_integration_test.py


示例6: test_weight_column_as_numeric_column

 def test_weight_column_as_numeric_column(self):
   parsing_spec = parsing_utils.classifier_parse_example_spec(
       feature_columns=[fc.numeric_column('a')],
       label_key='b',
       weight_column=fc.numeric_column('c'))
   expected_spec = {
       'a': parsing_ops.FixedLenFeature((1,), dtype=dtypes.float32),
       'b': parsing_ops.FixedLenFeature((1,), dtype=dtypes.int64),
       'c': parsing_ops.FixedLenFeature((1,), dtype=dtypes.float32),
   }
   self.assertDictEqual(expected_spec, parsing_spec)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:11,代码来源:parsing_utils_test.py


示例7: test_multi_feature_column_multi_dim_logits

  def test_multi_feature_column_multi_dim_logits(self):
    """Tests multiple feature columns and multi-dimensional logits.

    All numbers are the same as test_multi_dim_input_multi_dim_logits. The only
    difference is that the input consists of two 1D feature columns, instead of
    one 2D feature column.
    """
    base_global_step = 100
    create_checkpoint((([[.6, .5], [-.6, -.5]],
                        [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]),
                       ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),),
                      base_global_step, self._model_dir)
    hidden_units = (2, 2)
    logits_dimension = 3
    inputs = ([[10.]], [[8.]])
    expected_logits = [[-0.48, 0.48, 0.39]]

    for mode in [
        model_fn.ModeKeys.TRAIN, model_fn.ModeKeys.EVAL,
        model_fn.ModeKeys.PREDICT
    ]:
      with ops.Graph().as_default():
        training_util.create_global_step()
        head = mock_head(
            self,
            hidden_units=hidden_units,
            logits_dimension=logits_dimension,
            expected_logits=expected_logits)
        estimator_spec = self._dnn_model_fn(
            features={
                'age': constant_op.constant(inputs[0]),
                'height': constant_op.constant(inputs[1])
            },
            labels=constant_op.constant([[1]]),
            mode=mode,
            head=head,
            hidden_units=hidden_units,
            feature_columns=[
                feature_column.numeric_column('age'),
                feature_column.numeric_column('height')
            ],
            optimizer=mock_optimizer(self, hidden_units))
        with monitored_session.MonitoredTrainingSession(
            checkpoint_dir=self._model_dir) as sess:
          if mode == model_fn.ModeKeys.TRAIN:
            sess.run(estimator_spec.train_op)
          elif mode == model_fn.ModeKeys.EVAL:
            sess.run(estimator_spec.loss)
          elif mode == model_fn.ModeKeys.PREDICT:
            sess.run(estimator_spec.predictions)
          else:
            self.fail('Invalid mode: {}'.format(mode))
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:52,代码来源:dnn_testing_utils.py


示例8: test_multi_dim

  def test_multi_dim(self):
    """Asserts evaluation metrics for multi-dimensional input and logits."""
    global_step = 100
    dnn_testing_utils.create_checkpoint(
        (([[.6, .5], [-.6, -.5]], [.1, -.1]), ([[1., .8], [-.8, -1.]],
                                               [.2, -.2]),
         ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3,
                                           .0]),), global_step, self._model_dir)
    n_classes = 3

    dnn_classifier = dnn.DNNClassifier(
        hidden_units=(2, 2),
        feature_columns=[feature_column.numeric_column('age', shape=[2])],
        n_classes=n_classes,
        model_dir=self._model_dir)
    def _input_fn():
      # batch_size = 2, one false label, and one true.
      return {'age': [[10., 8.], [10., 8.]]}, [[1], [0]]
    # Uses identical numbers as
    # DNNModelFnTest.test_multi_dim_input_multi_dim_logits.
    # See that test for calculation of logits.
    # logits = [[-0.48, 0.48, 0.39], [-0.48, 0.48, 0.39]]
    # probabilities = exp(logits)/sum(exp(logits))
    #               = [[0.16670536, 0.43538380, 0.39791084],
    #                  [0.16670536, 0.43538380, 0.39791084]]
    # loss = -log(0.43538380) - log(0.16670536)
    expected_loss = 2.62305466
    self.assertAllClose({
        metric_keys.MetricKeys.LOSS: expected_loss,
        metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
        metric_keys.MetricKeys.ACCURACY: 0.5,
        ops.GraphKeys.GLOBAL_STEP: global_step
    }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:33,代码来源:dnn_test.py


示例9: test_one_dim

  def test_one_dim(self):
    """Asserts evaluation metrics for one-dimensional input and logits."""
    global_step = 100
    dnn_testing_utils.create_checkpoint(
        (([[.6, .5]], [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]),
         ([[-1.], [1.]], [.3]),), global_step, self._model_dir)

    dnn_classifier = dnn.DNNClassifier(
        hidden_units=(2, 2),
        feature_columns=[feature_column.numeric_column('age')],
        model_dir=self._model_dir)
    def _input_fn():
      # batch_size = 2, one false label, and one true.
      return {'age': [[10.], [10.]]}, [[1], [0]]
    # Uses identical numbers as DNNModelTest.test_one_dim_logits.
    # See that test for calculation of logits.
    # logits = [[-2.08], [-2.08]] =>
    # logistic = 1/(1 + exp(-logits)) = [[0.11105597], [0.11105597]]
    # loss = -1. * log(0.111) -1. * log(0.889) = 2.31544200
    expected_loss = 2.31544200
    self.assertAllClose({
        metric_keys.MetricKeys.LOSS: expected_loss,
        metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2.,
        metric_keys.MetricKeys.ACCURACY: 0.5,
        metric_keys.MetricKeys.PREDICTION_MEAN: 0.11105597,
        metric_keys.MetricKeys.LABEL_MEAN: 0.5,
        metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
        # There is no good way to calculate AUC for only two data points. But
        # that is what the algorithm returns.
        metric_keys.MetricKeys.AUC: 0.5,
        metric_keys.MetricKeys.AUC_PR: 0.75,
        ops.GraphKeys.GLOBAL_STEP: global_step
    }, dnn_classifier.evaluate(input_fn=_input_fn, steps=1))
开发者ID:ajaybhat,项目名称:tensorflow,代码行数:33,代码来源:dnn_test.py


示例10: test_from_scratch_validate_summary

  def test_from_scratch_validate_summary(self):
    hidden_units = (2, 2)
    mock_optimizer = _mock_optimizer(self, hidden_units=hidden_units)
    dnn_classifier = dnn.DNNClassifier(
        hidden_units=hidden_units,
        feature_columns=(feature_column.numeric_column('age'),),
        optimizer=mock_optimizer,
        model_dir=self._model_dir)
    self.assertEqual(0, mock_optimizer.minimize.call_count)

    # Train for a few steps, then validate optimizer, summaries, and
    # checkpoint.
    num_steps = 5
    summary_hook = _SummaryHook()
    dnn_classifier.train(
        input_fn=lambda: ({'age': [[10.]]}, [[1]]), steps=num_steps,
        hooks=(summary_hook,))
    self.assertEqual(1, mock_optimizer.minimize.call_count)
    _assert_checkpoint(
        self, num_steps, input_units=1, hidden_units=hidden_units,
        output_units=1, model_dir=self._model_dir)
    summaries = summary_hook.summaries()
    self.assertEqual(num_steps, len(summaries))
    for summary in summaries:
      summary_keys = [v.tag for v in summary.value]
      self.assertIn(metric_keys.MetricKeys.LOSS, summary_keys)
      self.assertIn(metric_keys.MetricKeys.LOSS_MEAN, summary_keys)
开发者ID:cameronphchen,项目名称:tensorflow,代码行数:27,代码来源:dnn_test.py


示例11: test_multi_dim_weights

  def test_multi_dim_weights(self):
    """Asserts evaluation metrics for multi-dimensional input and logits."""
    # same checkpoint with test_multi_dim.
    global_step = 100
    create_checkpoint((([[.6, .5], [-.6, -.5]],
                        [.1, -.1]), ([[1., .8], [-.8, -1.]], [.2, -.2]),
                       ([[-1., 1., .5], [-1., 1., .5]], [.3, -.3, .0]),),
                      global_step, self._model_dir)
    label_dimension = 3

    dnn_regressor = self._dnn_regressor_fn(
        hidden_units=(2, 2),
        feature_columns=[feature_column.numeric_column('age', shape=[2])],
        label_dimension=label_dimension,
        weight_column='w',
        model_dir=self._model_dir)

    def _input_fn():
      return {'age': [[10., 8.]], 'w': [10.]}, [[1., -1., 0.5]]

    # Uses identical numbers as test_multi_dim.
    # See that test for calculation of logits.
    # loss = 4.3929*10
    expected_loss = 43.929
    metrics = dnn_regressor.evaluate(input_fn=_input_fn, steps=1)
    self.assertAlmostEqual(
        expected_loss, metrics[metric_keys.MetricKeys.LOSS], places=3)
开发者ID:AutumnQYN,项目名称:tensorflow,代码行数:27,代码来源:dnn_testing_utils.py


示例12: test_ar_lstm_regressor

 def test_ar_lstm_regressor(self):
   dtype = dtypes.float32
   model_dir = tempfile.mkdtemp(dir=self.get_temp_dir())
   exogenous_feature_columns = (
       feature_column.numeric_column("exogenous"),
   )
   estimator = estimators.LSTMAutoRegressor(
       periodicities=10,
       input_window_size=10,
       output_window_size=6,
       model_dir=model_dir,
       num_features=1,
       extra_feature_columns=exogenous_feature_columns,
       num_units=10,
       config=_SeedRunConfig())
   times = numpy.arange(20, dtype=numpy.int64)
   values = numpy.arange(20, dtype=dtype.as_numpy_dtype)
   exogenous = numpy.arange(20, dtype=dtype.as_numpy_dtype)
   features = {
       feature_keys.TrainEvalFeatures.TIMES: times,
       feature_keys.TrainEvalFeatures.VALUES: values,
       "exogenous": exogenous
   }
   train_input_fn = input_pipeline.RandomWindowInputFn(
       input_pipeline.NumpyReader(features), shuffle_seed=2, num_threads=1,
       batch_size=16, window_size=16)
   eval_input_fn = input_pipeline.RandomWindowInputFn(
       input_pipeline.NumpyReader(features), shuffle_seed=3, num_threads=1,
       batch_size=16, window_size=16)
   estimator.train(input_fn=train_input_fn, steps=1)
   evaluation = estimator.evaluate(
       input_fn=eval_input_fn, steps=1)
   self.assertAllEqual(evaluation["loss"], evaluation["average_loss"])
   self.assertAllEqual([], evaluation["loss"].shape)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:34,代码来源:estimators_test.py


示例13: test_dnn_and_linear_logits_are_added

  def test_dnn_and_linear_logits_are_added(self):
    with ops.Graph().as_default():
      variables_lib.Variable([[1.0]], name='linear/linear_model/x/weights')
      variables_lib.Variable([2.0], name='linear/linear_model/bias_weights')
      variables_lib.Variable([[3.0]], name='dnn/hiddenlayer_0/kernel')
      variables_lib.Variable([4.0], name='dnn/hiddenlayer_0/bias')
      variables_lib.Variable([[5.0]], name='dnn/logits/kernel')
      variables_lib.Variable([6.0], name='dnn/logits/bias')
      variables_lib.Variable(1, name='global_step', dtype=dtypes.int64)
      linear_testing_utils.save_variables_to_ckpt(self._model_dir)

    x_column = feature_column.numeric_column('x')
    est = dnn_linear_combined.DNNLinearCombinedRegressor(
        linear_feature_columns=[x_column],
        dnn_hidden_units=[1],
        dnn_feature_columns=[x_column],
        model_dir=self._model_dir)
    input_fn = numpy_io.numpy_input_fn(
        x={'x': np.array([[10.]])}, batch_size=1, shuffle=False)
    # linear logits = 10*1 + 2 = 12
    # dnn logits = (10*3 + 4)*5 + 6 = 176
    # logits = dnn + linear = 176 + 12 = 188
    self.assertAllClose(
        {
            prediction_keys.PredictionKeys.PREDICTIONS: [188.],
        },
        next(est.predict(input_fn=input_fn)))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:dnn_linear_combined_test.py


示例14: _test_parsed_sequence_example

  def _test_parsed_sequence_example(
      self, col_name, col_fn, col_arg, shape, values):
    """Helper function to check that each FeatureColumn parses correctly.

    Args:
      col_name: string, name to give to the feature column. Should match
        the name that the column will parse out of the features dict.
      col_fn: function used to create the feature column. For example,
        sequence_numeric_column.
      col_arg: second arg that the target feature column is expecting.
      shape: the expected dense_shape of the feature after parsing into
        a SparseTensor.
      values: the expected values at index [0, 2, 6] of the feature
        after parsing into a SparseTensor.
    """
    example = _make_sequence_example()
    columns = [
        fc.categorical_column_with_identity('int_ctx', num_buckets=100),
        fc.numeric_column('float_ctx'),
        col_fn(col_name, col_arg)
    ]
    context, seq_features = parsing_ops.parse_single_sequence_example(
        example.SerializeToString(),
        context_features=fc.make_parse_example_spec(columns[:2]),
        sequence_features=fc.make_parse_example_spec(columns[2:]))

    with self.cached_session() as sess:
      ctx_result, seq_result = sess.run([context, seq_features])
      self.assertEqual(list(seq_result[col_name].dense_shape), shape)
      self.assertEqual(
          list(seq_result[col_name].values[[0, 2, 6]]), values)
      self.assertEqual(list(ctx_result['int_ctx'].dense_shape), [1])
      self.assertEqual(ctx_result['int_ctx'].values[0], 5)
      self.assertEqual(list(ctx_result['float_ctx'].shape), [1])
      self.assertAlmostEqual(ctx_result['float_ctx'][0], 123.6, places=1)
开发者ID:ThunderQi,项目名称:tensorflow,代码行数:35,代码来源:sequence_feature_column_integration_test.py


示例15: _test_complete_flow

  def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
                          input_dimension, label_dimension, prediction_length):
    feature_columns = [
        feature_column_lib.numeric_column('x', shape=(input_dimension,))
    ]
    est = _baseline_estimator_fn(
        label_dimension=label_dimension,
        model_dir=self._model_dir)

    # TRAIN
    # learn y = x
    est.train(train_input_fn, steps=200)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(200, scores[ops.GraphKeys.GLOBAL_STEP])
    self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))

    # PREDICT
    predictions = np.array(
        [x['predictions'] for x in est.predict(predict_input_fn)])
    self.assertAllEqual((prediction_length, label_dimension), predictions.shape)

    # EXPORT
    feature_spec = feature_column_lib.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                       serving_input_receiver_fn)
    self.assertTrue(gfile.Exists(export_dir))
开发者ID:BhaskarNallani,项目名称:tensorflow,代码行数:30,代码来源:baseline_test.py


示例16: _testCheckpointCompatibleWithNonAnnotatedEstimator

  def _testCheckpointCompatibleWithNonAnnotatedEstimator(
      self, train_input_fn, predict_input_fn, non_annotated_class,
      annotated_class, prediction_key, estimator_args):
    input_dimension = 2
    feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))
    ]
    estimator = non_annotated_class(
        model_dir=self._model_dir,
        hidden_units=(2, 2),
        feature_columns=feature_columns,
        **estimator_args)

    estimator.train(train_input_fn, steps=10)

    predictions = np.array(
        [x[prediction_key] for x in estimator.predict(predict_input_fn)])

    annotated_estimator = annotated_class(
        model_dir=self._model_dir,
        hidden_units=(2, 2),
        feature_columns=feature_columns,
        warm_start_from=self._model_dir,
        **estimator_args)

    annotated_predictions = np.array([
        x[prediction_key] for x in annotated_estimator.predict(predict_input_fn)
    ])

    self.assertAllEqual(predictions.shape, annotated_predictions.shape)
    for i, (a, b) in enumerate(
        zip(predictions.flatten(), annotated_predictions.flatten())):
      self.assertAlmostEqual(a, b, msg='index=%d' % i)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:33,代码来源:dnn_with_layer_annotations_test.py


示例17: testWarmStart_BucketizedColumn

  def testWarmStart_BucketizedColumn(self):
    # Create feature column.
    real = fc.numeric_column("real")
    real_bucket = fc.bucketized_column(real, boundaries=[0., 1., 2., 3.])

    # Save checkpoint from which to warm-start.
    _, prev_bucket_val = self._create_prev_run_var(
        "linear_model/real_bucketized/weights",
        shape=[5, 1],
        initializer=norms())

    partitioner = lambda shape, dtype: [1] * len(shape)
    # New graph, new session WITHOUT warm-starting.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as sess:
        cols_to_vars = self._create_linear_model([real_bucket], partitioner)
        sess.run(variables.global_variables_initializer())
        # Without warm-starting, the weights should be initialized using default
        # initializer (which is init_ops.zeros_initializer).
        self._assert_cols_to_vars(cols_to_vars,
                                  {real_bucket: [np.zeros([5, 1])]}, sess)

    # New graph, new session with warm-starting.
    with ops.Graph().as_default() as g:
      with self.test_session(graph=g) as sess:
        cols_to_vars = self._create_linear_model([real_bucket], partitioner)
        ws_util._warm_start(
            ws_util.WarmStartSettings(
                self.get_temp_dir(), vars_to_warm_start=".*real_bucketized.*"))
        sess.run(variables.global_variables_initializer())
        # Verify weights were correctly warm-started.
        self._assert_cols_to_vars(cols_to_vars,
                                  {real_bucket: [prev_bucket_val]}, sess)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:33,代码来源:warm_starting_util_test.py


示例18: test_train_op_calls_both_dnn_and_linear

 def test_train_op_calls_both_dnn_and_linear(self):
   opt = gradient_descent.GradientDescentOptimizer(1.)
   x_column = feature_column.numeric_column('x')
   input_fn = numpy_io.numpy_input_fn(
       x={'x': np.array([[0.], [1.]])},
       y=np.array([[0.], [1.]]),
       batch_size=1,
       shuffle=False)
   est = dnn_linear_combined.DNNLinearCombinedClassifier(
       linear_feature_columns=[x_column],
       # verifies linear_optimizer is used only for linear part.
       linear_optimizer=self._mock_optimizer(opt, 'linear'),
       dnn_hidden_units=(2, 2),
       dnn_feature_columns=[x_column],
       # verifies dnn_optimizer is used only for linear part.
       dnn_optimizer=self._mock_optimizer(opt, 'dnn'),
       model_dir=self._model_dir)
   est.train(input_fn, steps=1)
   # verifies train_op fires linear minimize op
   self.assertEqual(100.,
                    checkpoint_utils.load_variable(
                        self._model_dir, 'linear_called'))
   # verifies train_op fires dnn minimize op
   self.assertEqual(100.,
                    checkpoint_utils.load_variable(
                        self._model_dir, 'dnn_called'))
开发者ID:1000sprites,项目名称:tensorflow,代码行数:26,代码来源:dnn_linear_combined_test.py


示例19: _test_logits

 def _test_logits(
     self, mode, hidden_units, logits_dimension, inputs, expected_logits):
   """Tests that the expected logits are passed to mock head."""
   with ops.Graph().as_default():
     training_util.create_global_step()
     head = _mock_head(
         self,
         hidden_units=hidden_units,
         logits_dimension=logits_dimension,
         expected_logits=expected_logits)
     estimator_spec = dnn._dnn_model_fn(
         features={'age': constant_op.constant(inputs)},
         labels=constant_op.constant([[1]]),
         mode=mode,
         head=head,
         hidden_units=hidden_units,
         feature_columns=[
             feature_column.numeric_column('age',
                                           shape=np.array(inputs).shape[1:])],
         optimizer=_mock_optimizer(self, hidden_units))
     with monitored_session.MonitoredTrainingSession(
         checkpoint_dir=self._model_dir) as sess:
       if mode == model_fn.ModeKeys.TRAIN:
         sess.run(estimator_spec.train_op)
       elif mode == model_fn.ModeKeys.EVAL:
         sess.run(estimator_spec.loss)
       elif mode == model_fn.ModeKeys.PREDICT:
         sess.run(estimator_spec.predictions)
       else:
         self.fail('Invalid mode: {}'.format(mode))
开发者ID:cameronphchen,项目名称:tensorflow,代码行数:30,代码来源:dnn_test.py


示例20: _test_complete_flow

  def _test_complete_flow(
      self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
      n_classes, batch_size):
    feature_columns = [
        feature_column.numeric_column('x', shape=(input_dimension,))]
    est = dnn.DNNClassifier(
        hidden_units=(2, 2),
        feature_columns=feature_columns,
        n_classes=n_classes,
        model_dir=self._model_dir)

    # TRAIN
    num_steps = 10
    est.train(train_input_fn, steps=num_steps)

    # EVALUTE
    scores = est.evaluate(eval_input_fn)
    self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
    self.assertIn('loss', six.iterkeys(scores))

    # PREDICT
    predicted_proba = np.array([
        x[prediction_keys.PredictionKeys.PROBABILITIES]
        for x in est.predict(predict_input_fn)
    ])
    self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)

    # EXPORT
    feature_spec = feature_column.make_parse_example_spec(feature_columns)
    serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
        feature_spec)
    export_dir = est.export_savedmodel(tempfile.mkdtemp(),
                                       serving_input_receiver_fn)
    self.assertTrue(gfile.Exists(export_dir))
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:34,代码来源:dnn_test.py



注:本文中的tensorflow.python.feature_column.feature_column.numeric_column函数示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap