Created
April 18, 2019 06:31
-
-
Save nielsuit227/c19970c48a0ae159a5bbb0bf59096976 to your computer and use it in GitHub Desktop.
Finished
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from typing import Union, Iterable | |
import numpy as np | |
# Linear Discriminant Analysis. | |
# Can be used for linear classification or supervised dimension reduction. | |
# Assumes same class covariances. | |
# Just binary implementation. | |
# Saves ordered transformation matrix. | |
from numpy.core._multiarray_umath import ndarray | |
class LDA(object): | |
def __init__(self, n_components=None): | |
self._od = n_components | |
self._scalings = [] | |
self._transform_matrix = [] | |
self._x = [] | |
self._y = [] | |
self._mu_p = [] | |
self._mu_n = [] | |
self._mu = [] | |
self._n_p = [] | |
self._n_n = [] | |
self._s_w = [] | |
self._s_b = [] | |
def fit(self, x, y): | |
if set(y) != {1, -1}: | |
raise ValueError('Only binary classes {+1,-1} are accepted.') | |
n, m = np.shape(x) | |
self._mu_p = np.mean(x[y == 1, :], axis=0) | |
self._mu_n = np.mean(x[y == -1, :], axis=0) | |
self._s_w = np.zeros((n, n)) | |
# Class means | |
self._n_p = np.sum(y == 1) | |
self._n_n = np.sum(y == -1) | |
self._mu = np.mean(x, axis=0) | |
# Data variances | |
self._s_w = np.dot((x[y == -1, :] - self._mu_n).T, (x[y == -1, :] - self._mu_n)) | |
self._s_w += np.dot((x[y == 1, :] - self._mu_p).T, (x[y == 1, :] - self._mu_p)) | |
# Mean variances | |
self._s_b = self._n_n*np.outer((self._mu_n - self._mu), (self._mu_n - self._mu)) | |
self._s_b += self._n_p*np.outer((self._mu_p - self._mu), (self._mu_p - self._mu)) | |
# Outputs | |
self._scalings, self._transform_matrix = np.linalg.eig(np.dot(np.linalg.inv(self._s_w), self._s_b)) | |
return self._scalings, self._transform_matrix | |
def transform(self, x, n_components=None): | |
if n_components is not None: | |
self._od = n_components | |
if not self._od: | |
raise ValueError('Output dimension undefined, ' | |
'please specify n_components in either the model or the transform') | |
return np.dot(x, self._transform_matrix[:, :self._od]) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment