Last active
May 10, 2022 17:08
-
-
Save AlessandroMondin/76f2cd654d18ec94038d95157a9c5f3e to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def train_loop(self, lr, train_data, W, b): | |
losses = [] | |
accuracies = [] | |
if self.library == "tf": | |
for X, Y in train_data: | |
with tf.GradientTape() as tape: | |
X = X / 255.0 | |
# y_hat has shape (batch_size, num_of_classes) | |
y_hat = self.logistic_regression(X, W, b) | |
one_hot = tf.one_hot(Y, 43) | |
# let's say target = [y1, y2] and sources = [x1, x2]. The result will be: | |
# grad will be grad = [dy1/dx1 + dy2/dx1, dy1/dx2 + dy2/dx2] | |
loss = self.cross_entropy(y_hat, one_hot) | |
losses.append(tf.math.reduce_mean(loss)) | |
grads = tape.gradient(loss, [W, b]) | |
self.sgd([W, b], grads, lr, X.shape[0]) | |
acc = self.accuracy(y_hat, Y) | |
accuracies.append(acc) | |
else: | |
for X, Y in train_data: | |
#X = X / 255 | |
y_hat = self.logistic_regression(X, W, b) | |
one_hot = torch.nn.functional.one_hot(Y, 43).bool() | |
loss = self.cross_entropy(y_hat, one_hot) | |
losses.append(torch.mean(loss).item()) | |
grads = torch.autograd.grad(loss, [W,b], grad_outputs=torch.ones_like(loss)) | |
self.sgd([W, b], grads, lr, X.shape[0]) | |
acc = self.accuracy(y_hat, Y) | |
accuracies.append(acc) | |
return np.mean(losses), np.mean(accuracies) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment