Skip to content

Instantly share code, notes, and snippets.

plt.plot(slope_list)
plt.plot(intercept_list)
plt.title('Learnt Values of Slope and Intercept')
plt.legend(['slope', 'intercept'])
plt.xlabel('#Epochs')
plt.ylabel('Learnt Parameteres')
plt.scatter(x_list, y_list , color = 'cyan')
plt.plot((x_min, x_max), (m_learnt*x_min + c_learnt, m_learnt*x_max + c_learnt), color = 'r')
plt.title('Synthetic Data Points, with m = {} and c = {}'.format(round(m_learnt, 2), round(c_learnt, 2)))
plt.xlabel("Independent Variable 'x'")
plt.ylabel("Dependent Variable 'y'")
# Plotting the epoch losses
plt.plot(losses)
plt.title('Loss VS Epoch')
plt.xlabel('#Epoch')
plt.ylabel('Loss')
# Let's see what are the learnt parameters after having trained the model for hundreds of epochs
m_learnt = model.linear.weight.item()
c_learnt = model.linear.bias.item()
print('\nCompare the learnt parameters with the original ones')
print('\nm_synthetic VS m_learnt')
print(' {} {}'.format(m_synthetic, m_learnt))
print('\nc_synthetic VS c_learnt')
print(' {} {}'.format(c_synthetic, c_learnt))
losses = [] # to keep track of the epoch lossese
slope_list = [] # to keep track of the slope learnt by the model
intercept_list = [] # to keep track of the intercept learnt by the model
EPOCHS = 2500
print('\nTRAINING...')
for epoch in range(EPOCHS):
# We need to clear the gradients of the optimizer before running the back-propagation in PyTorch
optimizer.zero_grad()
# We also need to convert all the data into tensors before we could use them for training our model.
data_x = torch.tensor([[x] for x in x_list], dtype = torch.float)
data_y = torch.tensor([[y] for y in y_list], dtype = torch.float)
# Defining the Loss Function
# Mean Squared Error is the most common choice of Loss Function for Linear Regression models.
criterion = torch.nn.MSELoss()
# Defining the Optimizer, which would update all the trainable parameters of the model, making the model learn the data distribution better and hence fit the distribution better.
optimizer = torch.optim.Adam(model.parameters(), lr = 0.0005)
for name, parameter in model.named_parameters():
print('name : {}'.format(name))
print('parameter : {}'.format(parameter.item()))
print('learnable : {}'.format(parameter.requires_grad))
print('parameter.shape: {}'.format(parameter.shape))
print('---------------------------------')
# Defining the model architecture.
class LinearRegressionModel(torch.nn.Module):
def __init__(self):
super(LinearRegressionModel, self).__init__()
self.linear = torch.nn.Linear(1, 1) # this layer of the model has a single neuron, that takes in one scalar input and gives out one scalar output.
def forward(self, x):
y_pred = self.linear(x)
return y_pred