Skip to content

Instantly share code, notes, and snippets.

View dvgodoy's full-sized avatar

Daniel Voigt Godoy dvgodoy

View GitHub Profile
@dvgodoy
dvgodoy / vae.py
Last active April 30, 2022 09:47
set_seed(13)
z_size = 1
input_shape = (1, 28, 28)
base_model = nn.Sequential(
# (C, H, W) -> C*H*W
nn.Flatten(),
# C*H*W -> 2048
nn.Linear(np.prod(input_shape), 2048),
class EncoderVar(nn.Module):
def __init__(self, input_shape, z_size, base_model):
super().__init__()
self.z_size = z_size
self.input_shape = input_shape
self.base_model = base_model
output_size = self.get_output_size()
self.lin_mu = nn.Linear(output_size, z_size)
self.lin_var = nn.Linear(output_size, z_size)
set_seed(13)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model_ae.to(device)
loss_fn = nn.MSELoss()
optim = torch.optim.Adam(model_ae.parameters(), 0.0003)
num_epochs = 10
train_losses = []
class AutoEncoder(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.enc = encoder
self.dec = decoder
def forward(self, x):
# when encoder met decoder
enc_out = self.enc(x)
return self.dec(enc_out)
decoder = nn.Sequential(
# z_size -> 2048
nn.Linear(z_size, 2048),
nn.LeakyReLU(),
# 2048 -> 2048
nn.Linear(2048, 2048),
nn.LeakyReLU(),
# 2048 -> C*H*W
nn.Linear(2048, np.prod(input_shape)),
# C*H*W -> (C, H, W)
import torch.nn as nn
def set_seed(self, seed=42):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.manual_seed(seed)
np.random.seed(seed)
class Encoder(nn.Module):
def __init__(self, input_shape, z_size, base_model):
import numpy as np
import matplotlib.pyplot as plt
import torch
from PIL import Image
from torch.utils.data import TensorDataset, DataLoader
def draw_circle(radius, center_x=0.5, center_y=0.5, size=28):
# draw a circle using coordinates for the center, and the radius
circle = plt.Circle((center_x, center_y), radius, color='k', fill=False)
fig, ax = plt.subplots(figsize=(1, 1))
# Computes our model's predicted output - forward pass
yhat = b + w * x_train
# Computing the loss
# We are using ALL data points, so this is BATCH gradient
# descent. How wrong is our model? That's the error!
error = (yhat - y_train)
# It is a regression, so it computes mean squared error (MSE)
loss = (error ** 2).mean()
# Initializes parameters "b" and "w" randomly
np.random.seed(42)
b = np.random.randn(1)
w = np.random.randn(1)
# Shuffles the indices
idx = np.arange(N)
np.random.shuffle(idx)
# Uses first 80 random indices for train
train_idx = idx[:int(N*.8)]
# Uses the remaining indices for validation
val_idx = idx[int(N*.8):]
# Generates train and validation sets