Created
May 28, 2020 20:11
-
-
Save loiseaujc/7c38045001eda2e3b21e8d5cff8b4562 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# --> Import standard Python libraries. | |
import numpy as np | |
from scipy.special import expit | |
from scipy.linalg import norm | |
# --> Import sklearn utility functions. | |
from sklearn.base import BaseEstimator, ClassifierMixin | |
class LogisticRegression_Newton(BaseEstimator, ClassifierMixin): | |
""" | |
Implementation of Logistic Regression. Minimization is performed | |
by Newton method. Note that we assume a unit-term has been prepended | |
to X for the sake of simplicity. | |
""" | |
def __init__(self, maxiter=1000, tol=1e-8): | |
# --> Maximum number of iterations. | |
self.maxiter = maxiter | |
# --> Tolerance for the optimizer. | |
self.tol = tol | |
def predict(self, X): | |
return np.rint(self.predict_proba(X)).astype(np.int) | |
def predict_proba(self, X): | |
return expit(X @ self.weights) | |
def fit(self, X, y): | |
""" | |
Implementation of the Newton method. | |
INPUT | |
----- | |
X : numpy 2D array. Each row corresponds to one training example. | |
It is assumed that the first column is a column of ones (bias). | |
y : numpy 1D array. Label (0 or 1) of each example. | |
OUTPUT | |
------ | |
self : The trained logistic regression model. | |
""" | |
# --> Number of examples and features. | |
m, n = X.shape | |
# --> Initialize the weights. | |
self.weights = np.zeros((n, )) | |
# --> Training using gradient descent and optimal stepsize. | |
for _ in range(self.maxiter): | |
# --> Compute the gradient. | |
grad = X.T @ (self.predict_proba(X) - y) / m | |
# --> Compute the Hessian matrix. | |
hess = X.T @ np.diag(expit(X @ self.weights) * (1-expit(X @ self.weights))) @ X / m | |
# --> Update the weights. | |
# NOTE : For real applications, do not use explicit inverse ! | |
self.weights -= np.linalg.inv(hess) @ grad | |
# --> Check for convergence. | |
if norm(grad)**2 < self.tol: | |
break | |
return self |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment