Skip to content

Instantly share code, notes, and snippets.

(/scratch/rzou/pt/nt6-env) [1] rzou@devfair0317:/scratch/rzou/pt/nt6/benchmarks/operator_benchmark (nt9) $ python -m pt.add_test --list_tests
Traceback (most recent call last):
File "/scratch/rzou/pt/nt6-env/lib/python3.7/runpy.py", line 193, in _run_module_as_main
"__main__", mod_spec)
File "/scratch/rzou/pt/nt6-env/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/scratch/rzou/pt/nt6/benchmarks/operator_benchmark/pt/add_test.py", line 6, in <module>
import operator_benchmark as op_bench
File "/scratch/rzou/pt/nt6/benchmarks/operator_benchmark/operator_benchmark.py", line 3, in <module>
import benchmark_runner # noqa
@zou3519
zou3519 / asdf.txt
Last active December 10, 2017 02:25
n people: 0, 1, ..., n-1
Each person i will receive, from Enlightened Santa Alter bot, a person j.
Encode this as an n-digit binary number, such that the jth element is 1 and everything else is 0.
For example, if the person i receives person 3, their 'secret' number is (zero-indexed) 000100...0
Now, person i has some n-digit binary 'secret'. Call this x_i.
Construct n numbers (call them 'pieces'), {x_{i, 0}, x_{i, 1}, ..., x_{i, n-1}}
such that the XOR of all of them is equal to x_i.
@zou3519
zou3519 / warpshfl.txt
Created November 22, 2017 21:00
Before/after numbers from changing cuda varInnermostDim to use warp shuffle reduces
import torch
tensor = torch.randn(100).cuda()
%timeit tensor.var(0); torch.cuda.synchronize()
tensor = torch.randn(10000).cuda()
%timeit tensor.var(0); torch.cuda.synchronize()
tensor = torch.randn(1000, 2, 10).cuda()
%timeit tensor.var(2); torch.cuda.synchronize()
import torch
from torch.autograd import Variable
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
v_in = Variable(torch.Tensor([0.1, 0.1]).view(2, 1), requires_grad=True)
def forward(v_in):
f1 = lambda x: x * 2
# add this to TestAutograd in test/test_autograd.py
def test_convbackwardbackward(self):
v_in = Variable(torch.Tensor([0.1, 0.1]).view(1, 2, 1, 1), requires_grad=True)
def forward(v_in):
f1 = lambda x: x * 0.0001
f2 = torch.nn.Conv2d(2, 1, 1, 1)
grad_out = Variable(torch.ones(1, 1, 1, 1))
gradient = torch.autograd.grad(outputs=f2(f1(v_in)), inputs=v_in,
grad_outputs=grad_out,
import torch
from torch.autograd import Variable
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
v_in = Variable(torch.Tensor([0.1, 0.1]).view(1, 2, 1, 1), requires_grad=True)
f1 = lambda x: x * 2
f2 = nn.Conv2d(2, 1, 1, 1)
import re
import subprocess
import sys
PY3 = sys.version_info >= (3, 0)
reinforce_cmd = 'python examples/reinforcement_learning/reinforce.py'
actor_critic_cmd = 'python examples/reinforcement_learning/actor_critic.py'
import numpy as np
import torch
from torch.autograd import Variable
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def main():
import numpy as np
import torch
from torch.autograd import Variable
def set_seed(seed):
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def main():
@zou3519
zou3519 / test.out
Created November 10, 2017 14:46
allennlp test run
----------------------------- Captured stderr call -----------------------------
2017-11-09 22:23:15,113 - INFO - allennlp.common.checks - Pytorch version: 0.3.0b0+1f694e9
_________________________ EntropyTest.test_masked_case _________________________
self = <tests.training.metrics.entropy_test.EntropyTest testMethod=test_masked_case>
def test_masked_case(self):
metric = Entropy()
# This would have non-zero entropy without the mask.
logits = torch.Tensor([[1, 1, 1, 1],