Created
March 8, 2018 02:44
-
-
Save khanhnamle1994/156739738d47dd5eb0c855a0da13ecf3 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| import torch | |
| # Batch Size = 32, Input Dimension = 500, Hidden Dimension = 50, Output Dimension = 5 | |
| dtype = torch.FloatTensor | |
| # Create random tensors for data and weights | |
| x = torch.randn(32, 500).type(dtype) | |
| y = torch.randn(32, 5).type(dtype) | |
| w1 = torch.randn(500, 50).type(dtype) | |
| w2 = torch.randn(50, 5).type(dtype) | |
| learning_rate = 1e-6 | |
| for t in range(250): | |
| # Forward pass: Compute predictions and loss | |
| h = x.mm(w1) | |
| h_relu = h.clamp(min=0) | |
| y_pred = h_relu.mm(w2) | |
| loss = (y_pred - y).pow(2).sum() | |
| # Backward pass: Compute gradients | |
| grad_y_pred = 2.0 * (y_pred - y) | |
| grad_w2 = h_relu.t().mm(grad_y_pred) | |
| grad_h_relu = grad_y_pred.mm(w2.t()) | |
| grad_h = grad_h_relu.clone() | |
| grad_h[h < 0] = 0 | |
| grad_w1 = x.t().mm(grad_h) | |
| # Gradient descent step on weights | |
| w1 -= learning_rate * grad_w1 | |
| w2 -= learning_rate * grad_w2 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment