本文整理汇总了Python中mlxtend.classifier.LogisticRegression类的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegression类的具体用法?Python LogisticRegression怎么用?Python LogisticRegression使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LogisticRegression类的17个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。
示例1: test_logistic_regression_gd
def test_logistic_regression_gd():
t = np.array([0.51, 1.18, 4.40])
lr = LogisticRegression(epochs=100, eta=0.01, learning='gd', random_seed=0)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, t, 2)
assert((y == lr.predict(X)).all())
开发者ID:beingzy,项目名称:mlxtend,代码行数:7,代码来源:test_logistic_regression.py
示例2: test_print_progress_3
def test_print_progress_3():
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=1,
print_progress=3,
random_seed=1)
lr.fit(X, y)
开发者ID:JJLWHarrison,项目名称:mlxtend,代码行数:7,代码来源:test_logistic_regression.py
示例3: test_ary_persistency_in_shuffling
def test_ary_persistency_in_shuffling():
orig = X.copy()
lr = LogisticRegression(eta=0.01,
epochs=100,
minibatches=len(y),
l2_lambda=1.0,
random_seed=1)
lr.fit(X, y)
np.testing.assert_almost_equal(orig, X, 6)
开发者ID:CandyPythonFlow,项目名称:mlxtend,代码行数:9,代码来源:test_logistic_regression.py
示例4: test_score_function
def test_score_function():
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=1,
random_seed=1)
lr.fit(X, y)
acc = lr.score(X, y)
assert acc == 1.0, "Acc: %s" % acc
开发者ID:CandyPythonFlow,项目名称:mlxtend,代码行数:9,代码来源:test_logistic_regression.py
示例5: test_logistic_regression_sgd
def test_logistic_regression_sgd():
t = np.array([0.53, 1.2, 4.4])
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=len(y),
random_seed=1)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, t, 2)
assert((y == lr.predict(X)).all())
开发者ID:datasci-co,项目名称:mlxtend,代码行数:10,代码来源:test_logistic_regression.py
示例6: test_l2_regularization_gd
def test_l2_regularization_gd():
lr = LogisticRegression(eta=0.01, epochs=20,
learning='gd', regularization='l2',
lambda_=1.0, random_seed=0)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([ 0.252, 1.186, 2.296])
np.testing.assert_almost_equal(lr.w_, expect_weights, 3)
acc = sum(y_pred == y) / len(y)
assert(acc == 1.0)
开发者ID:arunsingh,项目名称:mlxtend,代码行数:11,代码来源:test_logistic_regression.py
示例7: test_predict_proba
def test_predict_proba():
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=1,
random_seed=1)
lr.fit(X, y)
idx = [0, 48, 99] # sample labels: 0, 0, 1
y_pred = lr.predict_proba(X[idx])
expect = np.array([0.009, 0.012, 0.993])
np.testing.assert_almost_equal(y_pred, expect, 3)
开发者ID:CandyPythonFlow,项目名称:mlxtend,代码行数:11,代码来源:test_logistic_regression.py
示例8: test_score_function
def test_score_function():
t = np.array([0.52, 1.2, 4.4])
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=1,
random_seed=1)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, t, 2)
acc = lr.score(X, y)
assert acc == 1.0, "Acc: %s" % acc
开发者ID:GQiuQi,项目名称:mlxtend,代码行数:11,代码来源:test_logistic_regression.py
示例9: test_logistic_regression_sgd
def test_logistic_regression_sgd():
w = np.array([[1.18], [4.38]])
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=len(y),
random_seed=1)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, w, 2)
y_pred = lr.predict(X)
acc = np.sum(y == y_pred, axis=0) / float(X.shape[0])
assert acc == 1.0, "Acc: %s" % acc
开发者ID:CandyPythonFlow,项目名称:mlxtend,代码行数:12,代码来源:test_logistic_regression.py
示例10: test_logistic_regression_gd
def test_logistic_regression_gd():
t = np.array([0.52, 1.2, 4.4])
lr = LogisticRegression(epochs=100,
eta=0.01,
minibatches=1,
random_seed=1)
lr.fit(X, y) # 0, 1 class
np.testing.assert_almost_equal(lr.w_, t, 2)
y_pred = lr.predict(X)
acc = np.sum(y == y_pred, axis=0) / float(X.shape[0])
assert acc == 1.0, "Acc: %s" % acc
开发者ID:blahblueray,项目名称:mlxtend,代码行数:12,代码来源:test_logistic_regression.py
示例11: test_l2_regularization_sgd
def test_l2_regularization_sgd():
lr = LogisticRegression(eta=0.01,
epochs=100,
minibatches=len(y),
l2_lambda=1.0,
random_seed=1)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([[0.24], [0.35]])
np.testing.assert_almost_equal(lr.w_, expect_weights, 2)
y_pred = lr.predict(X)
acc = np.sum(y == y_pred, axis=0) / float(X.shape[0])
assert acc == 0.97, "Acc: %s" % acc
开发者ID:CandyPythonFlow,项目名称:mlxtend,代码行数:14,代码来源:test_logistic_regression.py
示例12: test_l2_regularization_gd
def test_l2_regularization_gd():
lr = LogisticRegression(eta=0.01,
epochs=20,
minibatches=1,
l2_lambda=1.0,
random_seed=1)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([0.153, 1.055, 2.284])
np.testing.assert_almost_equal(lr.w_, expect_weights, 3)
y_pred = lr.predict(X)
acc = np.sum(y == y_pred, axis=0) / float(X.shape[0])
assert acc == 1.0, "Acc: %s" % acc
开发者ID:blahblueray,项目名称:mlxtend,代码行数:14,代码来源:test_logistic_regression.py
示例13: test_l2_regularization_gd
def test_l2_regularization_gd():
lr = LogisticRegression(eta=0.01,
epochs=20,
minibatches=1,
l2_lambda=1.0,
regularization='l2',
random_seed=1)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([0.303, 1.066, 2.329])
np.testing.assert_almost_equal(lr.w_, expect_weights, 3)
acc = sum(y_pred == y) / len(y)
assert(acc == 1.0)
开发者ID:datasci-co,项目名称:mlxtend,代码行数:14,代码来源:test_logistic_regression.py
示例14: test_l2_regularization_sgd
def test_l2_regularization_sgd():
lr = LogisticRegression(eta=0.01,
epochs=100,
minibatches=len(y),
l2_lambda=1.0,
regularization='l2',
random_seed=1)
lr.fit(X, y)
y_pred = lr.predict(X)
expect_weights = np.array([-2.73e-04, 2.40e-01, 3.53e-01])
np.testing.assert_almost_equal(lr.w_, expect_weights, 2)
acc = sum(y_pred == y) / float(len(y))
assert(acc == 0.97)
开发者ID:datasci-co,项目名称:mlxtend,代码行数:15,代码来源:test_logistic_regression.py
示例15: test_refit_weights
def test_refit_weights():
w = np.array([[1.2], [4.4]])
b = np.array([0.52])
lr = LogisticRegression(epochs=50,
eta=0.01,
minibatches=1,
random_seed=1)
lr.fit(X, y)
w1 = lr.w_[0][0]
w2 = lr.w_[0][0]
lr.fit(X, y, init_params=False)
assert w1 != lr.w_[0][0]
assert w2 != lr.w_[1][0]
np.testing.assert_almost_equal(lr.w_, w, 2)
np.testing.assert_almost_equal(lr.b_, b, 2)
开发者ID:CandyPythonFlow,项目名称:mlxtend,代码行数:17,代码来源:test_logistic_regression.py
示例16: iris_data
from mlxtend.data import iris_data
from mlxtend.evaluate import plot_decision_regions
from mlxtend.classifier import LogisticRegression
import matplotlib.pyplot as plt
# Loading Data
X, y = iris_data()
X = X[:, [0, 3]] # sepal length and petal width
X = X[0:100] # class 0 and class 1
y = y[0:100] # class 0 and class 1
# standardize
X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
lr = LogisticRegression(eta=0.01, epochs=100, learning='sgd')
lr.fit(X, y)
plot_decision_regions(X, y, clf=lr)
plt.title('Logistic Regression - Stochastic Gradient Descent')
plt.show()
print(lr.w_)
plt.plot(range(len(lr.cost_)), lr.cost_)
plt.xlabel('Iterations')
plt.ylabel('Missclassifications')
plt.show()
开发者ID:raman-sharma,项目名称:ML-Learn,代码行数:31,代码来源:logistic_regression.py
示例17: iris_data
import matplotlib.pyplot as plt
X, y = iris_data()
X = X[:, [0, 3]]
X = X[0:100]
y = y[0:100]
X[:,0] = (X[:,0] - X[:,0].mean()) / X[:,0].std()
X[:,1] = (X[:,1] - X[:,1].mean()) / X[:,1].std()
lr = LogisticRegression(eta = 0.1,
l2_lambda=0.0,
epochs=500,
#minibatches=1, # 1 for Gradient Descent
#minibatches=len(y), # len(y) for SGD learning
minibatches=5, # 100/5 = 20 -> minibatch-s
random_seed=1,
print_progress=3)
lr.fit(X, y)
y_pred = lr.predict(X)
print('Last 3 Class Labels: %s' % y_pred[-3:])
y_pred = lr.predict_proba(X)
print('Last 3 Class Labels: %s' % y_pred[-3:])
plot_decision_regions(X, y, clf=lr)
plt.title("Logistic regression - gd")
plt.show()
开发者ID:clover9gu,项目名称:simplemining,代码行数:31,代码来源:gd.py
注:本文中的mlxtend.classifier.LogisticRegression类示例由纯净天空整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。 |
请发表评论