Skip to content

Instantly share code, notes, and snippets.

graph(%alist.1 : Tensor):
%26 : int = prim::Constant[value=1]()
%18 : int = prim::Constant[value=0]()
%16 : bool = prim::Constant[value=1]() # /home/wanchaol/test.py:194:4
%4 : None = prim::Constant()
%1 : int = prim::Constant[value=3]() # /home/wanchaol/test.py:193:22
%2 : int = prim::Constant[value=4]() # /home/wanchaol/test.py:193:25
%9 : int = prim::Constant[value=9223372036854775807]() # /home/wanchaol/test.py:194:23
%3 : int[] = prim::ListConstruct(%1, %2)
%res.1 : Tensor = aten::randn(%3, %4, %4, %4, %4) # /home/wanchaol/test.py:193:10
graph(%self : ClassType<ModelTrainer>,
%data.1 : Float(*, *, *, *, *),
%target.1 : Long(*, *)):
%829 : int[] = prim::Constant[value=[0, 0]]()
%828 : int[] = prim::Constant[value=[1, 1]]()
%3 : str = prim::Constant[value="params"]() # test.py:140:38
%4 : str = prim::Constant[value="step"]() # test.py:152:18
%5 : str = prim::Constant[value="weight_decay"]() # test.py:155:29
%6 : str = prim::Constant[value="lr"]() # test.py:166:32
%7 : str = prim::Constant[value="lr_decay"]() # test.py:167:56
    class eagerNet(nn.Module):
            def __init__(self):
                super(eagerNet, self).__init__()
                self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
                self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
                self.fc1 = nn.Linear(320, 50)
                self.fc2 = nn.Linear(50, 10)

            def forward(self, x):
```python
class eagerNet(nn.Module):
def __init__(self):
super(eagerNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
class GreedySearchDecoder(nn.Module):
def __init__(self, encoder, decoder, decoder_n_layers):
super(GreedySearchDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self._device = device
self._SOS_token = SOS_token
self._decoder_n_layers = decoder_n_layers
__constants__ = ['_device', '_SOS_token', '_decoder_n_layers']
# Luong attention layer
class Attn(torch.nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ['dot', 'general', 'concat']:
raise ValueError(self.method, "is not an appropriate attention method.")
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = torch.nn.Linear(self.hidden_size, hidden_size)
## first use case
inp = torch.randn(3, 3)
a = torch.jit.script(F.dropout)
a(inp)
## error:
# RuntimeError:
# undefined value _VF:
pip install git+https://github.com/arraiyopensource/kornia
====
import torch
import torch.nn as nn
from torch.testing import assert_allclose
import kornia
@torch.jit.script
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
def forward(self, x):
x = x.view(-1, 784)
h1 = self.fc1(x)
import torch
import torch.nn as nn
import torch.nn.functional as F
class MnistNet(nn.Module):
def __init__(self):
super(MnistNet, self).__init__()
# self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
# self.conv2 = nn.Conv2d(1, 20, kernel_size=5)
# self.conv2_drop = nn.Dropout2d()