Created
March 2, 2017 08:12
-
-
Save arthurmensch/4da3120311cf72300b9177d60e87937d to your computer and use it in GitHub Desktop.
SAGAClassifier vs sklearn liblinear
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from lightning.classification import SAGAClassifier | |
from scipy import sparse | |
from sklearn.datasets import load_iris, make_classification | |
from sklearn.linear_model.logistic import ( | |
LogisticRegression, | |
) | |
def test(): | |
iris = load_iris() | |
X, y = iris.data, iris.target | |
X = np.concatenate([X] * 10) | |
y = np.concatenate([y] * 10) | |
X_bin = X[y <= 1] | |
y_bin = y[y <= 1] * 2 - 1 | |
X_sparse, y_sparse = make_classification(n_samples=100, n_features=50, | |
random_state=0) | |
X_sparse = sparse.csr_matrix(X_sparse) | |
for penalty in ['l2', 'l1']: | |
for (X, y) in ((X_bin, y_bin), (X_sparse, y_sparse)): | |
n_samples = X.shape[0] | |
for alpha in [1e-4, 0.001, 0.01, 0.1, 1, 10, 100]: | |
liblinear = LogisticRegression( | |
C=1. / (n_samples * alpha), | |
solver='liblinear', | |
multi_class='ovr', | |
max_iter=500, | |
fit_intercept=False, | |
penalty=penalty, random_state=0, tol=1e-24) | |
if penalty == 'l1': | |
lalpha = 0 | |
lbeta = alpha | |
lpenalty = 'l1' | |
else: | |
lalpha = alpha | |
lbeta = 0 | |
lpenalty = None | |
lsaga = SAGAClassifier(loss='log', | |
beta=lbeta, penalty=lpenalty, | |
alpha=lalpha, | |
max_iter=5000, | |
random_state=0) | |
lsaga.fit(X, y) | |
liblinear.fit(X, y) | |
print('[penalty=%s, alpha=%s, solver=liblinear]' % (penalty, alpha), | |
liblinear.coef_[0, :4]) | |
print('[penalty=%s, alpha=%s, solver=lightning]' % (penalty, alpha), | |
lsaga.coef_[0, :4]) | |
print('-------------------------------') | |
test() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment