Created
September 16, 2022 03:18
-
-
Save Abhayparashar31/f729725b8c2137dc680f965a7a39eaff to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
from sklearn.datasets import make_classification | |
from sklearn.model_selection import train_test_split | |
from sklearn.metrics import accuracy_score | |
## Base Models | |
from sklearn.neighbors import KNeighborsClassifier | |
from sklearn.tree import DecisionTreeClassifier | |
from sklearn.svm import SVC | |
## Meta Learner | |
from sklearn.linear_model import LogisticRegression | |
## Creating Sample Data | |
X,y = make_classification(n_samples=10000, n_features=20, n_informative=15, n_redundant=5, random_state=42) | |
## Training a Individual Logistic Regression Model | |
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) | |
logrec = LogisticRegression() | |
logrec.fit(X_train,y_train) | |
pred = logrec.predict(X_test) | |
score = accuracy_score(y_test, pred) | |
print('Base Model Accuracy: %.3f' % (score*100)) | |
## Defining Base Models | |
def base_models(): | |
models = list() | |
models.append(('knn', KNeighborsClassifier())) | |
models.append(('dt', DecisionTreeClassifier())) | |
models.append(('svm', SVC(probability=True))) | |
return models | |
## Fitting Ensemble Blending Model | |
## Step 1:Splitting Data Into Train, Holdout(Validation) and Test Sets | |
X_train_full, X_test, y_train_full, y_test = train_test_split(X, y, test_size=0.3, random_state=1) | |
X_train, X_val, y_train, y_val = train_test_split(X_train_full, y_train_full, test_size=0.33, random_state=1) | |
## Step 2: train base models on train set and make predictions on validation set | |
models = base_models() | |
meta_X = list() | |
for name, model in models: | |
# training base models on train set | |
model.fit(X_train, y_train) | |
# predict on hold out set | |
yhat = model.predict_proba(X_val) | |
# storing predictions | |
meta_X.append(yhat) | |
# horizontal stacking predictions | |
meta_X = np.hstack(meta_X) | |
## Step 3: Creating Blending Meta Learner | |
blender = LogisticRegression() | |
## training on base model predictions | |
blender.fit(meta_X, y_val) | |
## Step 4: Making predictions using blending meta learner | |
meta_X = list() | |
for name, model in models: | |
yhat = model.predict_proba(X_test) | |
meta_X.append(yhat) | |
meta_X = np.hstack(meta_X) | |
y_pred = blender.predict(meta_X) | |
# Evaluate predictions | |
score = accuracy_score(y_test, y_pred) | |
print('Blending Accuracy: %.3f' % (score*100)) | |
--------------------------------- | |
Base Model Accuracy: 82.367 | |
Blending Accuracy: 96.733 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment