Created
September 1, 2019 10:56
-
-
Save sdoshi579/748fa76919ffe35a78fff24ce22b43a3 to your computer and use it in GitHub Desktop.
Predicting the traffic signs by building CNN model using Keras
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import pandas as pd | |
from scipy.misc import imread | |
import math | |
import numpy as np | |
import cv2 | |
import keras | |
import seaborn as sns | |
from keras.layers import Dense, Dropout, Flatten, Input | |
from keras.layers import Conv2D, MaxPooling2D | |
from keras.layers import BatchNormalization | |
from keras.optimizers import Adam | |
from keras.models import Sequential | |
### LOADING DATASET | |
data_dir = os.path.abspath('~/GTSRB/Final_Training/Images') | |
os.path.exists(data_dir) | |
### Function to resize the images using open cv | |
def resize_cv(im): | |
return cv2.resize(im, (64, 64), interpolation = cv2.INTER_LINEAR) | |
### Loading datset | |
list_images = [] | |
output = [] | |
for dir in os.listdir(data_dir): | |
if dir == '.DS_Store' : | |
continue | |
inner_dir = os.path.join(data_dir, dir) | |
csv_file = pd.read_csv(os.path.join(inner_dir,"GT-" + dir + '.csv'), sep=';') | |
for row in csv_file.iterrows() : | |
img_path = os.path.join(inner_dir, row[1].Filename) | |
img = imread(img_path) | |
img = img[row[1]['Roi.X1']:row[1]['Roi.X2'],row[1]['Roi.Y1']:row[1]['Roi.Y2'],:] | |
img = resize_cv(img) | |
list_images.append(img) | |
output.append(row[1].ClassId) | |
### Plotting the dataset | |
fig = sns.distplot(output, kde=False, bins = 43, hist = True, hist_kws=dict(edgecolor="black", linewidth=2)) | |
fig.set(title = "Traffic signs frequency graph", | |
xlabel = "ClassId", | |
ylabel = "Frequency") | |
input_array = np.stack(list_images) | |
train_y = keras.utils.np_utils.to_categorical(output) | |
### Randomizing the dataset | |
randomize = np.arange(len(input_array)) | |
np.random.shuffle(randomize) | |
x = input_array[randomize] | |
y = train_y[randomize] | |
### Splitting the dataset in train, validation, test set | |
split_size = int(x.shape[0]*0.6) | |
train_x, val_x = x[:split_size], x[split_size:] | |
train1_y, val_y = y[:split_size], y[split_size:] | |
split_size = int(val_x.shape[0]*0.5) | |
val_x, test_x = val_x[:split_size], val_x[split_size:] | |
val_y, test_y = val_y[:split_size], val_y[split_size:] | |
### Building the model | |
hidden_num_units = 2048 | |
hidden_num_units1 = 1024 | |
hidden_num_units2 = 128 | |
output_num_units = 43 | |
epochs = 10 | |
batch_size = 16 | |
pool_size = (2, 2) | |
input_shape = Input(shape=(32, 32,3)) | |
model = Sequential([ | |
Conv2D(16, (3, 3), activation='relu', input_shape=(64,64,3), padding='same'), | |
BatchNormalization(), | |
Conv2D(16, (3, 3), activation='relu', padding='same'), | |
BatchNormalization(), | |
MaxPooling2D(pool_size=pool_size), | |
Dropout(0.2), | |
Conv2D(32, (3, 3), activation='relu', padding='same'), | |
BatchNormalization(), | |
Conv2D(32, (3, 3), activation='relu', padding='same'), | |
BatchNormalization(), | |
MaxPooling2D(pool_size=pool_size), | |
Dropout(0.2), | |
Conv2D(64, (3, 3), activation='relu', padding='same'), | |
BatchNormalization(), | |
Conv2D(64, (3, 3), activation='relu', padding='same'), | |
BatchNormalization(), | |
MaxPooling2D(pool_size=pool_size), | |
Dropout(0.2), | |
Flatten(), | |
Dense(units=hidden_num_units, activation='relu'), | |
Dropout(0.3), | |
Dense(units=hidden_num_units1, activation='relu'), | |
Dropout(0.3), | |
Dense(units=hidden_num_units2, activation='relu'), | |
Dropout(0.3), | |
Dense(units=output_num_units, input_dim=hidden_num_units, activation='softmax'), | |
]) | |
model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=1e-4), metrics=['accuracy']) | |
### Training the model | |
trained_model_conv = model.fit(train_x.reshape(-1,64,64,3), train1_y, epochs=epochs, batch_size=batch_size, validation_data=(val_x, val_y)) | |
### Prdicting the class | |
pred = model.predict_classes(test_x) | |
### Evaluating the model | |
model.evaluate(test_x, test_y) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Traceback (most recent call last):
File "signdetect.py", line 27, in
for dir in os.listdir(data_dir):
OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect: 'C:\Users\Vishal B\Anaconda3\x07nacondaproject\envs\lucky\Lib\site-packages\models\research\object_detection\GTSRB\Train'