Skip to content

Instantly share code, notes, and snippets.

@SoSeDiK
Last active May 25, 2023 20:48
Show Gist options
  • Save SoSeDiK/b01c23e8d67a79961939f4db7c98cb6d to your computer and use it in GitHub Desktop.
Save SoSeDiK/b01c23e8d67a79961939f4db7c98cb6d to your computer and use it in GitHub Desktop.
OI#2
import matplotlib.pyplot as plt
import numpy as np
import h5py
import os
from sklearn.neural_network import MLPClassifier
from sklearn.svm import SVC
cwd = os.getcwd() # current working directory
path = os.path.join(cwd, 'data')
def load_dataset():
file_name = os.path.join(path, 'train_catvnoncat.h5')
train_dataset = h5py.File(file_name, "r")
x_train = np.array(train_dataset["train_set_x"][:]) # your train set features
y_train = np.array(train_dataset["train_set_y"][:]) # your train set labels
file_name = os.path.join(path, 'test_catvnoncat.h5')
test_dataset = h5py.File(file_name, "r")
x_test = np.array(test_dataset["test_set_x"][:]) # your test set features
y_test = np.array(test_dataset["test_set_y"][:]) # your test set labels
classes = ['non-cat', 'cat']
y_train = y_train.reshape(-1, 1)
y_test = y_test.reshape(-1, 1)
return x_train, y_train, x_test, y_test, classes
x_train, y_train, x_test, y_test, classes = load_dataset()
index = 27
plt.figure()
plt.imshow(x_train[index])
plt.show()
print("y = " + str(y_train[index, :]) + ", it's a '" + classes[np.squeeze(y_train[index, :])] + "' picture.")
# Normalize data
m_train, num_px, _, _ = x_train.shape
m_test = x_test.shape[0]
x_train_flatten = x_train.reshape(m_train, -1)
x_test_flatten = x_test.reshape(m_test, -1)
x_train_scaled = x_train_flatten / 255.
x_test_scaled = x_test_flatten / 255.
y_train = np.squeeze(y_train)
y_test = np.squeeze(y_test)
print('Y_train.shape =', y_train.shape)
print('Y_test.shape =', y_test.shape)
print('X_train_scaled.shape =', x_train_scaled.shape)
for activation in ['logistic', 'relu']:
# 1-layer NN classifier
print('- NN 1,', activation)
clf = MLPClassifier(
hidden_layer_sizes=100,
solver='sgd',
activation=activation,
max_iter=300,
alpha=0.0001
).fit(x_train_scaled, y_train)
print("train accuracy= {:.3%}".format(clf.score(x_train_scaled, y_train)))
print("test accuracy= {:.3%}".format(clf.score(x_test_scaled, y_test)))
# 2-layers MLP classifier
print('- NN 2', activation)
clf = MLPClassifier(
hidden_layer_sizes=(3, 3),
solver='sgd',
activation='logistic',
max_iter=300,
alpha=0.0001
).fit(x_train_scaled, y_train)
print("train accuracy = {:.3%}".format(clf.score(x_train_scaled, y_train)))
print("test accuracy = {:.3%}".format(clf.score(x_test_scaled, y_test)))
# 3-layers MLP classifier
print('- NN 3', activation)
clf = MLPClassifier(
hidden_layer_sizes=(20, 7, 10),
solver='sgd',
activation='logistic',
max_iter=300,
alpha=0.0001
).fit(x_train_scaled, y_train)
print("train accuracy = {:.3%}".format(clf.score(x_train_scaled, y_train)))
print("test accuracy = {:.3%}".format(clf.score(x_test_scaled, y_test)))
# Compare with SVM
print('SVM')
clf = SVC(C=10, kernel='rbf', gamma=0.001).fit(x_train_scaled, y_train)
print("train accuracy = {:.3%}".format(clf.score(x_train_scaled, y_train)))
print("test accuracy = {:.3%}".format(clf.score(x_test_scaled, y_test)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment