Skip to content

Instantly share code, notes, and snippets.

View Blaizzy's full-sized avatar
🏠
Working from home

Prince Canuma Blaizzy

🏠
Working from home
View GitHub Profile
# helper function
def plot(img, i):
ax = plt.subplot(1,4,i+1)
plt.tight_layout()
ax.set_title('Sample#{}'.format(i))
ax.axis('off')
``plt.imshow(im)
plt.pause(1) #1 sec
# instatiating our Dataset class
class Rescale(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_sz):
assert isinstance(output_sz,(int,tuple))
transforms = transforms.Compose([
Reshape(256),
RandomCrop(224),
ToTensor(),
])
data = ImageDataset(root_dir = '/images/', transform =transforms)
# Creating dataloader
dataloader = DataLoader(data, batch_size=4, shuffle=True, num_workers=4)
#helper function to show batch
def plot_batch(image):
grid = utils.make_grid(image)
plt.imshow(grid.numpy().transpose((1,2,0)))
for batch_n, img in enumerate(dataloader):
print(batch_n, img.size())
@Blaizzy
Blaizzy / CNN.py
Last active April 10, 2019 09:21
from keras import layers
from keras import models
model = models.Sequential()
model.add(layers.Conv2D(32, kernel_size = (3,3), activation='relu', input_shape=(28,28,1)))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(layers.MaxPooling2D((2,2)))
model.add(layers.Conv2D(64, kernel_size=(3,3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation = 'relu'))
model.add(layers.Dense(10, activation = 'softmax'))
# Getting Data
from keras.datasets import mnist
from keras.utils import to_categorical
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000,28,28,1)) # (n_imgs, h, w, channels)
train_images = train_images.astype('float32')/ 255.
class BasicBlock(layers.Layer):
def __init__(self, filters, strides = 1, kernel = (3,3)):
super(BasicBlock,self).__init__()
self.conv = layers.Conv2D(filters, kernel, strides = strides, padding = 'valid')
self.bn = layers.BatchNormalization()
self.relu = layers.ReLU()
def call(self,inputs):
x = self.bn(self.conv(inputs))
x = self.relu(x)
class ResNet34(keras.Model):
def __init__(self, classes=2, act = 'softmax', filters = []):
super(ResNet34,self).__init__()
self.conv1 = layers.Conv2D(64, kernel_size = (7,7), strides = 2, padding = 'valid')
self.pool = layers.MaxPooling2D(pool_size = (2, 2), strides = 2,
padding='valid')
self.top = top_flow(filters[:3]) # first 8 residual blocks
self.mid, self.tail = mid_flow(filters[:2]) # last 8 residual block
self.avgpool = layers.GlobalAveragePooling2D()
self.linear = layers.Dense(classes, activation = act)
@Blaizzy
Blaizzy / LSTM.py
Last active September 12, 2019 16:09
# LSTM archicture Pseudocode
# Part 1/2
output_t = np.tanh(np.dot(input_t, self.Wo) + np.dot(state_t, self.Uo) + np.dot(C_t, self.Vo) + self.bo)
i_f = sigmoid(np.dot(input_t, self.Wi) + np.dot(state_t, self.Ui) + self.bi)
f_t = sigmoid(np.dot(input_t, self.Wf) + np.dot(state_t, self.Uf) + self.bf)
k_t = sigmoid(np.dot(input_t, self.Wk) + np.dot(state_t, self.Uk) + self.bk)
# You obtain the new carry state by combining i_t, f_t and k_t.
# Part 2/2