Created
November 10, 2017 14:40
-
-
Save zou3519/e9cc880defd9741d0317c2ab9204661d to your computer and use it in GitHub Desktop.
Running pyro tests
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
=================================== FAILURES =================================== | |
________________________ test_mean_and_variance[Normal] ________________________ | |
[gw50] linux -- Python 3.6.3 /opt/conda/envs/pytorch-py3.6/bin/python | |
dist = <tests.distributions.dist_fixture.Fixture object at 0x7f3d68010fd0> | |
def test_mean_and_variance(dist): | |
for idx in dist.get_test_data_indices(): | |
num_samples = dist.get_num_samples(idx) | |
dist_params = dist.get_dist_params(idx) | |
torch_samples = dist.get_samples(num_samples, **dist_params) | |
sample_mean = torch_samples.float().mean(0) | |
sample_var = torch_samples.float().var(0) | |
try: | |
analytic_mean = dist.pyro_dist.analytic_mean(**dist_params) | |
analytic_var = dist.pyro_dist.analytic_var(**dist_params) | |
assert_equal(sample_mean, analytic_mean, prec=dist.prec) | |
> assert_equal(sample_var, analytic_var, prec=dist.prec) | |
tests/distributions/test_distributions.py:106: | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
tests/common.py:193: in assert_equal | |
assert_tensors_equal(x, y, prec, msg) | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
a = | |
16.1697 | |
[torch.FloatTensor of size 1] | |
b = | |
16 | |
[torch.FloatTensor of size 1] | |
, prec = 0.07, msg = '' | |
def assert_tensors_equal(a, b, prec=1e-5, msg=''): | |
assert a.size() == b.size(), msg | |
if prec == 0: | |
assert (a == b).all(), msg | |
elif a.numel() > 0: | |
b = b.type_as(a) | |
b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu() | |
# check that NaNs are in the same locations | |
nan_mask = a != a | |
assert torch.equal(nan_mask, b != b), msg | |
diff = a - b | |
diff[nan_mask] = 0 | |
if diff.is_signed(): | |
diff = diff.abs() | |
max_err = diff.max() | |
> assert max_err < prec, msg | |
E AssertionError | |
tests/common.py:153: AssertionError | |
!!!!!!!!!!!! xdist.dsession.Interrupted: stopping after 1 failures !!!!!!!!!!!!! | |
=================================== FAILURES =================================== | |
________________ test_subsample_gradient[TraceGraph-nonreparam] ________________ | |
[gw7] linux -- Python 3.6.3 /opt/conda/envs/pytorch-py3.6/bin/python | |
trace_graph = True, reparameterized = False | |
@pytest.mark.parametrize("reparameterized", [True, False], ids=["reparam", "nonreparam"]) | |
@pytest.mark.parametrize("trace_graph", [False, True], ids=["Trace", "TraceGraph"]) | |
def test_subsample_gradient(trace_graph, reparameterized): | |
pyro.clear_param_store() | |
data_size = 2 | |
subsample_size = 1 | |
num_particles = 1000 | |
precision = 0.333 | |
data = dist.normal(ng_zeros(data_size), ng_ones(data_size)) | |
def model(subsample_size): | |
with pyro.iarange("data", len(data), subsample_size) as ind: | |
x = data[ind] | |
z = pyro.sample("z", dist.Normal(ng_zeros(len(x)), ng_ones(len(x)), | |
reparameterized=reparameterized)) | |
pyro.observe("x", dist.Normal(z, ng_ones(len(x)), reparameterized=reparameterized), x) | |
def guide(subsample_size): | |
mu = pyro.param("mu", lambda: Variable(torch.zeros(len(data)), requires_grad=True)) | |
sigma = pyro.param("sigma", lambda: Variable(torch.ones(1), requires_grad=True)) | |
with pyro.iarange("data", len(data), subsample_size) as ind: | |
mu = mu[ind] | |
sigma = sigma.expand(subsample_size) | |
pyro.sample("z", dist.Normal(mu, sigma, reparameterized=reparameterized)) | |
optim = Adam({"lr": 0.1}) | |
inference = SVI(model, guide, optim, loss="ELBO", | |
trace_graph=trace_graph, num_particles=num_particles) | |
# Compute gradients without subsampling. | |
inference.loss_and_grads(model, guide, subsample_size=data_size) | |
params = dict(pyro.get_param_store().named_parameters()) | |
expected_grads = {name: param.grad.data.clone() for name, param in params.items()} | |
zero_grads(params.values()) | |
# Compute gradients with subsampling. | |
inference.loss_and_grads(model, guide, subsample_size=subsample_size) | |
actual_grads = {name: param.grad.data.clone() for name, param in params.items()} | |
for name in sorted(params): | |
print('\nexpected {} = {}'.format(name, expected_grads[name].cpu().numpy())) | |
print('actual {} = {}'.format(name, actual_grads[name].cpu().numpy())) | |
> assert_equal(actual_grads, expected_grads, prec=precision) | |
tests/infer/test_gradient.py:60: | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
tests/common.py:211: in assert_equal | |
assert_equal(x_val, y[key], prec, msg='{} {}'.format(key, msg)) | |
tests/common.py:193: in assert_equal | |
assert_tensors_equal(x, y, prec, msg) | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
a = | |
1.3598 | |
[torch.cuda.DoubleTensor of size 1 (GPU 0)] | |
b = | |
1.7634 | |
[torch.cuda.DoubleTensor of size 1 (GPU 0)] | |
, prec = 0.333 | |
msg = 'sigma ' | |
def assert_tensors_equal(a, b, prec=1e-5, msg=''): | |
assert a.size() == b.size(), msg | |
if prec == 0: | |
assert (a == b).all(), msg | |
elif a.numel() > 0: | |
b = b.type_as(a) | |
b = b.cuda(device=a.get_device()) if a.is_cuda else b.cpu() | |
# check that NaNs are in the same locations | |
nan_mask = a != a | |
assert torch.equal(nan_mask, b != b), msg | |
diff = a - b | |
diff[nan_mask] = 0 | |
if diff.is_signed(): | |
diff = diff.abs() | |
max_err = diff.max() | |
> assert max_err < prec, msg | |
E AssertionError: sigma | |
tests/common.py:153: AssertionError | |
----------------------------- Captured stdout call ----------------------------- | |
expected mu = [ 1.69177696 0.77558258] | |
actual mu = [ 1.5125116 0.87605821] | |
expected sigma = [ 1.76336247] | |
actual sigma = [ 1.35984357] | |
=============================== warnings summary =============================== | |
tests/infer/test_valid_models.py::test_iarange_wrong_size_error | |
source:354: DeprecationWarning: invalid escape sequence \ | |
-- Docs: http://doc.pytest.org/en/latest/warnings.html | |
!!!!!!!!!!!! xdist.dsession.Interrupted: stopping after 1 failures !!!!!!!!!!!!! | |
1 failed, 460 passed, 16 skipped, 10 xfailed, 1 xpassed, 1 warnings in 124.61 seconds | |
=================================== FAILURES =================================== | |
____________________________ test_cuda[dmm/dmm.py] _____________________________ | |
[gw8] linux -- Python 3.6.3 /opt/conda/envs/pytorch-py3.6/bin/python | |
example = '/tmp/2587/examples/dmm/dmm.py', args = [['--num-epochs=1'], '--cuda'] | |
@requires_cuda | |
@pytest.mark.stage("test_examples") | |
@pytest.mark.parametrize('example,args', CUDA_EXAMPLES.items(), ids=list(CUDA_EXAMPLES)) | |
def test_cuda(example, args): | |
example = os.path.join(EXAMPLES_DIR, example) | |
> check_call([sys.executable, example] + args) | |
tests/test_examples.py:52: | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
/opt/conda/envs/pytorch-py3.6/lib/python3.6/subprocess.py:286: in check_call | |
retcode = call(*popenargs, **kwargs) | |
/opt/conda/envs/pytorch-py3.6/lib/python3.6/subprocess.py:267: in call | |
with Popen(*popenargs, **kwargs) as p: | |
/opt/conda/envs/pytorch-py3.6/lib/python3.6/subprocess.py:709: in __init__ | |
restore_signals, start_new_session) | |
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ | |
self = <subprocess.Popen object at 0x7fc32f6971d0> | |
args = ['/opt/conda/envs/pytorch-py3.6/bin/python', '/tmp/2587/examples/dmm/dmm.py', ['--num-epochs=1'], '--cuda'] | |
executable = b'/opt/conda/envs/pytorch-py3.6/bin/python', preexec_fn = None | |
close_fds = True, pass_fds = (), cwd = None, env = None, startupinfo = None | |
creationflags = 0, shell = False, p2cread = -1, p2cwrite = -1, c2pread = -1 | |
c2pwrite = -1, errread = -1, errwrite = -1, restore_signals = True | |
start_new_session = False | |
def _execute_child(self, args, executable, preexec_fn, close_fds, | |
pass_fds, cwd, env, | |
startupinfo, creationflags, shell, | |
p2cread, p2cwrite, | |
c2pread, c2pwrite, | |
errread, errwrite, | |
restore_signals, start_new_session): | |
"""Execute program (POSIX version)""" | |
if isinstance(args, (str, bytes)): | |
args = [args] | |
else: | |
args = list(args) | |
if shell: | |
args = ["/bin/sh", "-c"] + args | |
if executable: | |
args[0] = executable | |
if executable is None: | |
executable = args[0] | |
orig_executable = executable | |
# For transferring possible exec failure from child to parent. | |
# Data format: "exception name:hex errno:description" | |
# Pickle is not used; it is complex and involves memory allocation. | |
errpipe_read, errpipe_write = os.pipe() | |
# errpipe_write must not be in the standard io 0, 1, or 2 fd range. | |
low_fds_to_close = [] | |
while errpipe_write < 3: | |
low_fds_to_close.append(errpipe_write) | |
errpipe_write = os.dup(errpipe_write) | |
for low_fd in low_fds_to_close: | |
os.close(low_fd) | |
try: | |
try: | |
# We must avoid complex work that could involve | |
# malloc or free in the child process to avoid | |
# potential deadlocks, thus we do all this here. | |
# and pass it to fork_exec() | |
if env is not None: | |
env_list = [] | |
for k, v in env.items(): | |
k = os.fsencode(k) | |
if b'=' in k: | |
raise ValueError("illegal environment variable name") | |
env_list.append(k + b'=' + os.fsencode(v)) | |
else: | |
env_list = None # Use execv instead of execve. | |
executable = os.fsencode(executable) | |
if os.path.dirname(executable): | |
executable_list = (executable,) | |
else: | |
# This matches the behavior of os._execvpe(). | |
executable_list = tuple( | |
os.path.join(os.fsencode(dir), executable) | |
for dir in os.get_exec_path(env)) | |
fds_to_keep = set(pass_fds) | |
fds_to_keep.add(errpipe_write) | |
self.pid = _posixsubprocess.fork_exec( | |
args, executable_list, | |
close_fds, tuple(sorted(map(int, fds_to_keep))), | |
cwd, env_list, | |
p2cread, p2cwrite, c2pread, c2pwrite, | |
errread, errwrite, | |
errpipe_read, errpipe_write, | |
> restore_signals, start_new_session, preexec_fn) | |
E TypeError: expected str, bytes or os.PathLike object, not list | |
/opt/conda/envs/pytorch-py3.6/lib/python3.6/subprocess.py:1275: TypeError | |
!!!!!!!!!!!! xdist.dsession.Interrupted: stopping after 1 failures !!!!!!!!!!!!! | |
========================== 1 failed in 12.60 seconds =========================== |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment