Skip to content

Instantly share code, notes, and snippets.

View AlessandroMondin's full-sized avatar

AlessandroMondin AlessandroMondin

  • AI engineer
  • Berlin
View GitHub Profile
if __name__ == "__main__":
logger = logger(__name__)
lib = "pt"
train_set, val_set = load_imagefolder("../workspace_7/GTSRB/Final_Training/Images/", 0.1, lib)
train_class = TrainModel(lib)
epochs = 10
lr = 0.1
# number of classes of the dataset
num_outputs = 43
import numpy as np
from tensorflow.keras.preprocessing import image_dataset_from_directory
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torch.utils.data.sampler import SubsetRandomSampler
# this function takes 3 arguments: the directory of your image folder, the % of data used for validation,
# the library that can be either tensorflow or pytorch
def load_imagefolder(dir, val_size=0.1, library = "tf"):
def train_loop(self, lr, train_data, W, b):
losses = []
accuracies = []
if self.library == "tf":
for X, Y in train_data:
with tf.GradientTape() as tape:
X = X / 255.0
# y_hat has shape (batch_size, num_of_classes)
y_hat = self.logistic_regression(X, W, b)
def logistic_regression(self, X, W, b):
if self.library == "tf":
# flatten_X has shape (batch_size, W*H*Channels) --> in our case (64, 32*32*3)
flatten_X = tf.reshape(X, (-1, W.shape[0]))
out = self.softmax(tf.matmul(flatten_X, W) + b)
else:
flatten_X = X.reshape((-1, W.shape[0]))
out = self.softmax(torch.matmul(flatten_X,W) + b)
return out
def cross_entropy(self, scaled_logits, one_hot):
if self.library == "tf":
masked_logits = tf.boolean_mask(scaled_logits, one_hot)
ce = -tf.math.log(masked_logits)
else:
masked_logits = torch.masked_select(scaled_logits, one_hot)
ce = -torch.log(masked_logits)
return ce
def accuracy(self, y_hat, Y):
if self.library == "tf":
# calculate argmax
argmax = tf.cast(tf.argmax(y_hat, axis=1), Y.dtype)
acc = tf.math.reduce_sum(tf.cast(argmax == Y, tf.int32)) / Y.shape[0]
else:
argmax = torch.argmax(y_hat, dim=1)
acc = torch.sum(torch.eq(argmax, Y)) / Y.shape[0]
return acc
def softmax(self, logits):
if self.library == "tf":
exp = tf.exp(logits)
denom = tf.math.reduce_sum(exp, 1, keepdims=True)
else:
exp = torch.exp(logits)
denom = torch.sum(exp, dim=1, keepdim=True)
class CNNBlock(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=0):
super(CNNBlock, self).__init__()
self.seq_block = nn.Sequential(
class CNNBlocks(nn.Module):
"""
Parameters:
n_conv (int): creates a block of n_conv convolutions
in_channels (int): number of in_channels of the first block's convolution
out_channels (int): number of out_channels of the first block's convolution
expand (bool) : if True after the first convolution of a blocl the number of channels doubles
"""
def __init__(self,
n_conv,
class Encoder(nn.Module):
"""
Parameters:
in_channels (int): number of in_channels of the first CNNBlocks
out_channels (int): number of out_channels of the first CNNBlocks
padding (int): padding applied in each convolution
downhill (int): number times a CNNBlocks + MaxPool2D it's applied.
"""
def __init__(self,
in_channels,