• 设为首页
  • 点击收藏
  • 手机版
    手机扫一扫访问
    迪恩网络手机版
  • 关注官方公众号
    微信扫一扫关注
    迪恩网络公众号

Python sdca_ops.SdcaModel类代码示例

原作者: [db:作者] 来自: [db:来源] 收藏 邀请

本文整理汇总了Python中tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel的典型用法代码示例。如果您正苦于以下问题:Python SdcaModel类的具体用法?Python SdcaModel怎么用?Python SdcaModel使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。



在下文中一共展示了SdcaModel类的20个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: testLinearFeatureValues

  def testLinearFeatureValues(self):
    # Setup test data
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, -10.0, -2.0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 14.0, 2.0),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)

      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=0.5,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss',
                     prior=0.0)
      tf.initialize_all_variables().run()
      lr = SdcaModel(CONTAINER, examples, variables, options)
      prediction = lr.predictions(examples)

      lr.minimize().run()

      # Predictions should be 8/9 of label due to minimizing regularized loss:
      #   (label - 2 * 2 * weight)^2 / 2 + L2 * 2 * weight^2
      self.assertAllClose([-10.0 * 8 / 9, 14.0 * 8 / 9],
                          prediction.eval(),
                          rtol=0.07)
开发者ID:4colors,项目名称:tensorflow,代码行数:30,代码来源:sdca_ops_test.py


示例2: testDenseFeaturesWeightedExamples

    def testDenseFeaturesWeightedExamples(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[[1.0], [1.0]], [[0.5], [-0.5]]], weights=[3.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)
            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Point (1.0, 0.5) has higher weight than (1.0, -0.5) so the model will
            # try to increase the margin from (1.0, 0.5). Due to regularization,
            # (1.0, -0.5) will be within the margin. For these points and example
            # weights, the optimal weights are w_1~=0.4 and w_2~=1.2 which give an L2
            # loss of 0.5 * 0.25 * 0.25 * 1.6 = 0.2. The binary predictions will be
            # correct, but the boundary will be much closer to the 2nd point than the
            # first one.
            self.assertAllClose([1.0, -0.2], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.2, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.4, regularized_loss.eval(), atol=0.02)
开发者ID:apollos,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py


示例3: testSparseRandom

  def testSparseRandom(self):
    dim = 20
    num_examples = 1000
    # Number of non-zero features per example.
    non_zeros = 10
    # Setup test data.
    with self._single_threaded_test_session():
      examples, variables = make_random_examples_and_variables_dicts(
          num_examples, dim, non_zeros)
      options = dict(
          symmetric_l2_regularization=.1,
          symmetric_l1_regularization=0,
          num_table_shards=1,
          adaptive=False,
          loss_type='logistic_loss')

      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      train_op = lr.minimize()
      for _ in range(4):
        train_op.run()
      lr.update_weights(train_op).run()
      # Duality gap is 1.4e-5.
      # It would be 0.01 without shuffling and 0.02 with adaptive sampling.
      self.assertNear(0.0, lr.approximate_duality_gap().eval(), err=1e-3)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops_test.py


示例4: testSimple

    def testSimple(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)
            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be 2/3 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
            self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0], predictions.eval(), rtol=0.005)
            # Approximate gap should be very close to 0.0. (In fact, because the gap
            # is only approximate, it is likely that upon convergence the duality gap
            # can have a tiny negative value).
            self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), atol=1e-2)
开发者ID:apollos,项目名称:tensorflow,代码行数:26,代码来源:sdca_ops_test.py


示例5: testOutOfRangeSparseFeatures

  def testOutOfRangeSparseFeatures(self):
    # Setup test data
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(0, 0)
      options = dict(
          symmetric_l2_regularization=1,
          symmetric_l1_regularization=0,
          loss_type='logistic_loss')

      lr = SdcaModel(examples, variables, options)
      variables_lib.global_variables_initializer().run()
      train_op = lr.minimize()
      with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
                                   'indices.*'):
        train_op.run()
开发者ID:Immexxx,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py


示例6: testNoWeightedExamples

 def testNoWeightedExamples(self):
   # Setup test data with 1 positive, and 1 negative example.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   # Zeroed out example weights.
   example_weights = [0.0, 0.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.5,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss')
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
     with self.assertRaisesOpError(
         'No weighted examples in 2 training examples'):
       lr.minimize().run()
     self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
开发者ID:4colors,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops_test.py


示例7: testDenseFeatures

  def testDenseFeatures(self):
    with self._single_threaded_test_session():
      examples = make_dense_examples_dict(
          dense_feature_values=[[-2.0, 0.0], [0.0, 2.0]],
          weights=[1.0, 1.0],
          labels=[-10.0, 14.0])
      variables = make_dense_variable_dict(2, 2)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss')
      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      predictions = lr.predictions(examples)

      for _ in xrange(20):
        lr.minimize().run()

      # Predictions should be 4/5 of label due to minimizing regularized loss:
      #   (label - 2 * weight)^2 / 2 + L2 * weight^2
      self.assertAllClose([-10.0 * 4 / 5, 14.0 * 4 / 5],
                          predictions.eval(),
                          rtol=0.01)

      loss = lr.regularized_loss(examples)
      self.assertAllClose(148.0 / 10.0, loss.eval(), atol=0.01)
开发者ID:CPostelnicu,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops_test.py


示例8: testDenseFeaturesWithDefaultWeights

  def testDenseFeaturesWithDefaultWeights(self):
    with self._single_threaded_test_session():
      examples, variables = make_dense_examples_and_variables_dicts(
          dense_features_values=[[[1.0], [0.0]], [0.0, 1.0]],
          weights=[1.0, 1.0],
          labels=[10.0, -5.0])
      options = dict(symmetric_l2_regularization=1.0,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss')
      lr = SdcaModel(examples, variables, options)
      tf.initialize_all_variables().run()
      predictions = lr.predictions(examples)

      train_op = lr.minimize()
      for _ in range(_MAX_ITERATIONS):
        train_op.run()

      # The loss function for these particular features is given by:
      # 1/2(label_1-w_1)^2 + 1/2(label_2-w_2)^2 + \lambda/2 (w_1^2 + w_2^2). So,
      # differentiating wrt to w_1, w_2 yields the following optimal values:
      # w_1* = label_1/(\lambda + 1)= 10/2, w_2* =label_2/(\lambda + 1)= -5/2.
      # In this case the (unnormalized regularized) loss will be:
      # 1/2(10-5)^2 + 1/2(5-5/2)^2 + 1/2(5^2 + (5/2)^2) = 125.0/4. The actual
      # loss should be further normalized by the sum of example weights.
      self.assertAllClose([5.0, -2.5],
                          predictions.eval(),
                          rtol=0.01)
      loss = lr.regularized_loss(examples)
      self.assertAllClose(125.0 / 8.0, loss.eval(), atol=0.01)
开发者ID:jyegerlehner,项目名称:tensorflow,代码行数:29,代码来源:sdca_ops_test.py


示例9: testL2Regularization

    def testL2Regularization(self):
        # Setup test data
        example_protos = [
            # 2 identical examples
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            # 2 more identical examples
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0, 1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=16, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)

            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be 1/5 of label due to minimizing regularized loss:
            #   (label - 2 * weight)^2 + L2 * 16 * weight^2
            optimal1 = -10.0 / 5.0
            optimal2 = 14.0 / 5.0
            self.assertAllClose([optimal1, optimal1, optimal2, optimal2], predictions.eval(), rtol=0.01)
开发者ID:apollos,项目名称:tensorflow,代码行数:29,代码来源:sdca_ops_test.py


示例10: testSimple

  def testSimple(self):
    # Setup test data
    example_protos = [
        make_example_proto(
            {'age': [0],
             'gender': [0]}, -10.0),
        make_example_proto(
            {'age': [1],
             'gender': [1]}, 14.0),
    ]
    example_weights = [1.0, 1.0]
    with self._single_threaded_test_session():
      examples = make_example_dict(example_protos, example_weights)
      variables = make_variable_dict(1, 1)
      options = dict(symmetric_l2_regularization=1,
                     symmetric_l1_regularization=0,
                     loss_type='squared_loss')

      lr = SdcaModel(CONTAINER, examples, variables, options)
      tf.initialize_all_variables().run()
      predictions = lr.predictions(examples)

      for _ in xrange(20):
        lr.minimize().run()

      # Predictions should be 2/3 of label due to minimizing regularized loss:
      #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2
      self.assertAllClose([-20.0 / 3.0, 28.0 / 3.0],
                          predictions.eval(),
                          rtol=0.005)
      self.assertAllClose(0.01,
                          lr.approximate_duality_gap().eval(),
                          rtol=1e-2,
                          atol=1e-2)
开发者ID:CPostelnicu,项目名称:tensorflow,代码行数:34,代码来源:sdca_ops_test.py


示例11: testFeatureValues

    def testFeatureValues(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0, -2.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0, 2.0),
        ]
        example_weights = [5.0, 3.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)

            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1, symmetric_l1_regularization=0, loss_type="squared_loss")

            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)

            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # There are 4 (sparse) variable weights to be learned. 2 for age and 2 for
            # gender. Let w_1, w_2 be age weights, w_3, w_4 be gender weights, y_1,
            # y_2 be the labels for examples 1 and 2 respectively and s_1, s_2 the
            # corresponding *example* weights. With the given feature values, the loss
            # function is given by:
            # s_1/2(y_1 + 2w_1 + 2w_3)^2 + s_2/2(y_2 - 2w_2 - 2w_4)^2
            # + \lambda/2 (w_1^2 + w_2^2 + w_3^2 + w_4^2). Solving for the optimal, it
            # can be verified that:
            # w_1* = w_3* = -2.0 s_1 y_1/(\lambda + 8 s_1) and
            # w_2* = w_4* = 2 \cdot s_2 y_2/(\lambda + 8 s_2). Equivalently, due to
            # regularization and example weights, the predictions are within:
            # 8 \cdot s_i /(\lambda + 8 \cdot s_i) of the labels.
            self.assertAllClose([-10 * 40.0 / 41.0, 14.0 * 24 / 25.0], predictions.eval(), atol=0.01)
开发者ID:apollos,项目名称:tensorflow,代码行数:34,代码来源:sdca_ops_test.py


示例12: testL1Regularization

    def testL1Regularization(self):
        # Setup test data
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, -10.0),
            make_example_proto({"age": [1], "gender": [1]}, 14.0),
        ]
        example_weights = [1.0, 1.0]
        with self._single_threaded_test_session():
            examples = make_example_dict(example_protos, example_weights)
            variables = make_variable_dict(1, 1)
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=4.0, loss_type="squared_loss")
            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            prediction = lr.predictions(examples)
            loss = lr.regularized_loss(examples)

            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # Predictions should be -4.0, 48/5 due to minimizing regularized loss:
            #   (label - 2 * weight)^2 / 2 + L2 * 2 * weight^2 + L1 * 4 * weight
            self.assertAllClose([-4.0, 20.0 / 3.0], prediction.eval(), rtol=0.08)

            # Loss should be the sum of the regularized loss value from above per
            # example after plugging in the optimal weights.
            self.assertAllClose(308.0 / 6.0, loss.eval(), atol=0.01)
开发者ID:apollos,项目名称:tensorflow,代码行数:27,代码来源:sdca_ops_test.py


示例13: testDenseFeaturesWithArbitraryWeights

    def testDenseFeaturesWithArbitraryWeights(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]], weights=[20.0, 10.0], labels=[10.0, -5.0]
            )
            options = dict(symmetric_l2_regularization=5.0, symmetric_l1_regularization=0, loss_type="squared_loss")
            lr = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = lr.predictions(examples)

            train_op = lr.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # The loss function for these particular features is given by:
            # 1/2 s_1 (label_1-w_1)^2 + 1/2 s_2(label_2-w_2)^2 +
            # \lambda/2 (w_1^2 + w_2^2) where s_1, s_2 are the *example weights. It
            # turns out that the optimal (variable) weights are given by:
            # w_1* = label_1 \cdot s_1/(\lambda + s_1)= 8.0 and
            # w_2* =label_2 \cdot s_2/(\lambda + s_2)= -10/3.
            # In this case the (unnormalized regularized) loss will be:
            # s_1/2(8-10)^2 + s_2/2(5-10/3)^2 + 5.0/2(8^2 + (10/3)^2) = 2175.0/9. The
            # actual loss should be further normalized by the sum of example weights.
            self.assertAllClose([8.0, -10.0 / 3], predictions.eval(), rtol=0.01)
            loss = lr.regularized_loss(examples)
            self.assertAllClose(2175.0 / 270.0, loss.eval(), atol=0.01)
开发者ID:apollos,项目名称:tensorflow,代码行数:26,代码来源:sdca_ops_test.py


示例14: testDenseFeaturesPerfectlySeparable

    def testDenseFeaturesPerfectlySeparable(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[1.0, 1.0], [1.0, -1.0]], weights=[1.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)

            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            self.assertAllClose([1.0, -1.0], predictions.eval(), atol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())

            # (1.0, 1.0) and (1.0, -1.0) are perfectly separable by x-axis (that is,
            # the SVM's functional margin >=1), so the unregularized loss is ~0.0.
            # There is only loss due to l2-regularization. For these datapoints, it
            # turns out that w_1~=0.0 and w_2~=1.0 which means that l2 loss is ~0.25.
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.0, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.25, regularized_loss.eval(), atol=0.02)
开发者ID:apollos,项目名称:tensorflow,代码行数:26,代码来源:sdca_ops_test.py


示例15: testDenseFeaturesSeparableWithinMargins

    def testDenseFeaturesSeparableWithinMargins(self):
        with self._single_threaded_test_session():
            examples, variables = make_dense_examples_and_variables_dicts(
                dense_features_values=[[[1.0, 0.5], [1.0, -0.5]]], weights=[1.0, 1.0], labels=[1.0, 0.0]
            )
            options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="hinge_loss")
            model = SdcaModel(examples, variables, options)
            tf.initialize_all_variables().run()
            predictions = model.predictions(examples)
            binary_predictions = get_binary_predictions_for_hinge(predictions)

            train_op = model.minimize()
            for _ in range(_MAX_ITERATIONS):
                train_op.run()

            # (1.0, 0.5) and (1.0, -0.5) are separable by x-axis but the datapoints
            # are within the margins so there is unregularized loss (1/2 per example).
            # For these datapoints, optimal weights are w_1~=0.0 and w_2~=1.0 which
            # gives an L2 loss of ~0.25.
            self.assertAllClose([0.5, -0.5], predictions.eval(), rtol=0.05)
            self.assertAllEqual([1, 0], binary_predictions.eval())
            unregularized_loss = model.unregularized_loss(examples)
            regularized_loss = model.regularized_loss(examples)
            self.assertAllClose(0.5, unregularized_loss.eval(), atol=0.02)
            self.assertAllClose(0.75, regularized_loss.eval(), atol=0.02)
开发者ID:apollos,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops_test.py


示例16: testFractionalExampleLabel

  def testFractionalExampleLabel(self):
    # Setup test data with 1 positive, and 1 mostly-negative example.
    example_protos = [
        make_example_proto({
            'age': [0],
            'gender': [0]
        }, 0.1),
        make_example_proto({
            'age': [1],
            'gender': [1]
        }, 1),
    ]
    example_weights = [1.0, 1.0]
    for num_shards in _SHARD_NUMBERS:
      with self._single_threaded_test_session():
        examples = make_example_dict(example_protos, example_weights)
        variables = make_variable_dict(1, 1)
        options = dict(
            symmetric_l2_regularization=1,
            symmetric_l1_regularization=0,
            num_table_shards=num_shards,
            loss_type='logistic_loss')

        lr = SdcaModel(examples, variables, options)
        variables_lib.global_variables_initializer().run()
        with self.assertRaisesOpError(
            'Only labels of 0.0 or 1.0 are supported right now.'):
          lr.minimize().run()
开发者ID:Immexxx,项目名称:tensorflow,代码行数:28,代码来源:sdca_ops_test.py


示例17: testDuplicateExampleIds

 def testDuplicateExampleIds(self):
   # Setup test data with 1 positive, and 1 negative example.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [1.0, 1.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     examples['example_ids'] = ['duplicate_id'
                                for x in examples['example_ids']]
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.5,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss')
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
     with self.assertRaisesOpError('Detected 1 duplicates in example_ids'):
       lr.minimize().run()
     self.assertAllClose([0.5, 0.5], lr.predictions(examples).eval())
开发者ID:AlexCre,项目名称:tensorflow,代码行数:25,代码来源:sdca_ops_test.py


示例18: testImbalancedWithExampleWeights

    def testImbalancedWithExampleWeights(self):
        # Setup test data with 1 positive, and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [1]}, 1),
        ]
        example_weights = [3.0, 1.0]
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.global_variables_initializer().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                lr.update_weights(train_op).run()

                self.assertAllClose(0.284860, unregularized_loss.eval(), atol=0.08)
                self.assertAllClose(0.408044, loss.eval(), atol=0.012)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 1], predicted_labels.eval())
                self.assertAllClose(0.0, lr.approximate_duality_gap().eval(), rtol=2e-2, atol=1e-2)
开发者ID:brchiu,项目名称:tensorflow,代码行数:33,代码来源:sdca_ops_test.py


示例19: testInstancesOfOneClassOnly

    def testInstancesOfOneClassOnly(self):
        # Setup test data with 1 positive (ignored), and 1 negative example.
        example_protos = [
            make_example_proto({"age": [0], "gender": [0]}, 0),
            make_example_proto({"age": [1], "gender": [0]}, 1),  # Shares gender with the instance above.
        ]
        example_weights = [1.0, 0.0]  # Second example "omitted" from training.
        for num_shards in _SHARD_NUMBERS:
            with self._single_threaded_test_session():
                examples = make_example_dict(example_protos, example_weights)
                variables = make_variable_dict(1, 1)
                options = dict(
                    symmetric_l2_regularization=1,
                    symmetric_l1_regularization=0,
                    num_table_shards=num_shards,
                    loss_type="logistic_loss",
                )

                lr = SdcaModel(examples, variables, options)
                tf.global_variables_initializer().run()
                unregularized_loss = lr.unregularized_loss(examples)
                loss = lr.regularized_loss(examples)
                predictions = lr.predictions(examples)
                train_op = lr.minimize()
                for _ in range(_MAX_ITERATIONS):
                    train_op.run()
                lr.update_weights(train_op).run()
                self.assertAllClose(0.411608, unregularized_loss.eval(), atol=0.05)
                self.assertAllClose(0.525457, loss.eval(), atol=0.01)
                predicted_labels = get_binary_predictions_for_logistic(predictions)
                self.assertAllEqual([0, 0], predicted_labels.eval())
                self.assertAllClose(0.01, lr.approximate_duality_gap().eval(), rtol=1e-2, atol=1e-2)
开发者ID:brchiu,项目名称:tensorflow,代码行数:32,代码来源:sdca_ops_test.py


示例20: testOutOfRangeDenseFeatures

 def testOutOfRangeDenseFeatures(self):
     with self._single_threaded_test_session():
         examples, variables = make_dense_examples_and_variables_dicts(
             dense_features_values=[[[1.0, 0.0], [0.0, 1.0]]], weights=[20.0, 10.0], labels=[1.0, 0.0]
         )
         # Replace with a variable of size 1 instead of 2.
         variables["dense_features_weights"] = [tf.Variable(tf.zeros([1], dtype=tf.float32))]
         options = dict(symmetric_l2_regularization=1.0, symmetric_l1_regularization=0, loss_type="logistic_loss")
         lr = SdcaModel(examples, variables, options)
         tf.initialize_all_variables().run()
         train_op = lr.minimize()
         with self.assertRaisesRegexp(
             tf.errors.InvalidArgumentError, "More dense features than we have parameters for.*"
         ):
             train_op.run()
开发者ID:apollos,项目名称:tensorflow,代码行数:15,代码来源:sdca_ops_test.py



注:本文中的tensorflow.contrib.linear_optimizer.python.ops.sdca_ops.SdcaModel类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。


鲜花

握手

雷人

路过

鸡蛋
该文章已有0人参与评论

请发表评论

全部评论

专题导读
上一篇:
Python interpreter.Interpreter类代码示例发布时间:2022-05-27
下一篇:
Python saved_model_export_utils.make_export_strategy函数代码示例发布时间:2022-05-27
热门推荐
阅读排行榜

扫描微信二维码

查看手机版网站

随时了解更新最新资讯

139-2527-9053

在线客服(服务时间 9:00~18:00)

在线QQ客服
地址:深圳市南山区西丽大学城创智工业园
电邮:jeky_zhao#qq.com
移动电话:139-2527-9053

Powered by 互联科技 X3.4© 2001-2213 极客世界.|Sitemap