Skip to content

Instantly share code, notes, and snippets.

@davipatti
Created October 20, 2022 19:18
Show Gist options
  • Save davipatti/1f1858c9353fb7fbdba6ae7f584b096c to your computer and use it in GitHub Desktop.
Save davipatti/1f1858c9353fb7fbdba6ae7f584b096c to your computer and use it in GitHub Desktop.
[aesara error] Error when trying to use aesara.scan with aesara==2.8.7 pymc==4.2.2 #aesara #pymc
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/basic.py:111, in Container.__set__(self, value)
109 try:
110 # Use in-place filtering when/if possible
--> 111 self.storage[0] = self.type.filter_inplace(
112 value, self.storage[0], **kwargs
113 )
114 except NotImplementedError:
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/graph/type.py:130, in Type.filter_inplace(self, value, storage, strict, allow_downcast)
111 """Return data or an appropriately wrapped/converted data by converting it in-place.
112
113 This method allows one to reuse old allocated memory. If this method
(...)
128 NotImplementedError
129 """
--> 130 raise NotImplementedError()
NotImplementedError:
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/vm.py:1246, in VMLinker.make_all(self, profiler, input_storage, output_storage, storage_map)
1242 # no-recycling is done at each VM.__call__ So there is
1243 # no need to cause duplicate c code by passing
1244 # no_recycling here.
1245 thunks.append(
-> 1246 node.op.make_thunk(node, storage_map, compute_map, [], impl=impl)
1247 )
1248 linker_make_thunk_time[node] = time.time() - thunk_start
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/scan/op.py:1534, in Scan.make_thunk(self, node, storage_map, compute_map, no_recycling, impl)
1531 # Analyse the compile inner function to determine which inputs and
1532 # outputs are on the gpu and speed up some checks during the execution
1533 outs_is_tensor = [
-> 1534 isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.outputs
1535 ]
1537 try:
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/scan/op.py:1466, in Scan.fn(self)
1464 profile = self.profile
-> 1466 self._fn = pfunc(
1467 wrapped_inputs,
1468 wrapped_outputs,
1469 mode=self.mode_instance,
1470 accept_inplace=False,
1471 profile=profile,
1472 on_unused_input="ignore",
1473 fgraph=self.fgraph,
1474 )
1476 return self._fn
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/pfunc.py:371, in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys, fgraph)
359 inputs, cloned_outputs = construct_pfunc_ins_and_outs(
360 params,
361 outputs,
(...)
368 fgraph=fgraph,
369 )
--> 371 return orig_function(
372 inputs,
373 cloned_outputs,
374 mode,
375 accept_inplace=accept_inplace,
376 name=name,
377 profile=profile,
378 on_unused_input=on_unused_input,
379 output_keys=output_keys,
380 fgraph=fgraph,
381 )
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:1759, in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys, fgraph)
1758 with config.change_flags(compute_test_value="off"):
-> 1759 fn = m.create(defaults)
1760 finally:
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:1667, in FunctionMaker.create(self, input_storage, trustme, storage_map)
1665 self.profile.import_time += import_time
-> 1667 fn = self.function_builder(
1668 _fn,
1669 _i,
1670 _o,
1671 self.indices,
1672 self.outputs,
1673 defaults,
1674 self.unpack_single,
1675 self.return_none,
1676 self.output_keys,
1677 self,
1678 name=self.name,
1679 )
1681 fn.profile = self.profile
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:448, in Function.__init__(self, vm, input_storage, output_storage, indices, outputs, defaults, unpack_single, return_none, output_keys, maker, name)
447 else:
--> 448 c.value = value
449 c.required = required
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/basic.py:115, in Container.__set__(self, value)
114 except NotImplementedError:
--> 115 self.storage[0] = self.type.filter(value, **kwargs)
117 except Exception as e:
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/tensor/type.py:244, in TensorType.filter(self, data, strict, allow_downcast)
240 if not all(
241 ds == ts if ts is not None else True
242 for ds, ts in zip(data.shape, self.shape)
243 ):
--> 244 raise TypeError(
245 f"The type's shape ({self.shape}) is not compatible with the data's ({data.shape})"
246 )
248 if self.filter_checks_isfinite and not np.all(np.isfinite(data)):
TypeError: ("The type's shape ((844,)) is not compatible with the data's ((0,))", 'Container name "None"')
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
Cell In [40], line 30
21 mu, _ = aesara.scan(
22 f,
23 sequences=[i.T, gaps.T],
24 outputs_info=y0,
25 non_sequences=[y_max, b, rho],
26 )
28 pm.Normal("y_hat", mu=mu.T, sigma=pm.Exponential("sigma", 1), observed=responses)
---> 30 trace = pm.sample(random_seed=194965)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/sampling.py:527, in sample(draws, step, init, n_init, initvals, trace, chains, cores, tune, progressbar, model, random_seed, discard_tuned_samples, compute_convergence_checks, callback, jitter_max_retries, return_inferencedata, idata_kwargs, mp_ctx, **kwargs)
524 auto_nuts_init = False
526 initial_points = None
--> 527 step = assign_step_methods(model, step, methods=pm.STEP_METHODS, step_kwargs=kwargs)
529 if isinstance(step, list):
530 step = CompoundStep(step)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/sampling.py:229, in assign_step_methods(model, step, methods, step_kwargs)
221 selected = max(
222 methods,
223 key=lambda method, var=rv_var, has_gradient=has_gradient: method._competence(
224 var, has_gradient
225 ),
226 )
227 selected_steps[selected].append(var)
--> 229 return instantiate_steppers(model, steps, selected_steps, step_kwargs)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/sampling.py:147, in instantiate_steppers(model, steps, selected_steps, step_kwargs)
145 args = step_kwargs.get(step_class.name, {})
146 used_keys.add(step_class.name)
--> 147 step = step_class(vars=vars, model=model, **args)
148 steps.append(step)
150 unused_args = set(step_kwargs).difference(used_keys)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/step_methods/hmc/nuts.py:178, in NUTS.__init__(self, vars, max_treedepth, early_max_treedepth, **kwargs)
120 def __init__(self, vars=None, max_treedepth=10, early_max_treedepth=8, **kwargs):
121 r"""Set up the No-U-Turn sampler.
122
123 Parameters
(...)
176 `pm.sample` to the desired number of tuning steps.
177 """
--> 178 super().__init__(vars, **kwargs)
180 self.max_treedepth = max_treedepth
181 self.early_max_treedepth = early_max_treedepth
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/step_methods/hmc/base_hmc.py:95, in BaseHMC.__init__(self, vars, scaling, step_scale, is_cov, model, blocked, potential, dtype, Emax, target_accept, gamma, k, t0, adapt_step_size, step_rand, **aesara_kwargs)
92 else:
93 vars = [self._model.rvs_to_values.get(var, var) for var in vars]
---> 95 super().__init__(vars, blocked=blocked, model=self._model, dtype=dtype, **aesara_kwargs)
97 self.adapt_step_size = adapt_step_size
98 self.Emax = Emax
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/step_methods/arraystep.py:276, in GradientSharedStep.__init__(self, vars, model, blocked, dtype, logp_dlogp_func, **aesara_kwargs)
273 model = modelcontext(model)
275 if logp_dlogp_func is None:
--> 276 func = model.logp_dlogp_function(vars, dtype=dtype, **aesara_kwargs)
277 else:
278 func = logp_dlogp_func
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/model.py:642, in Model.logp_dlogp_function(self, grad_vars, tempered, **kwargs)
638 ip = self.initial_point(0)
639 extra_vars_and_values = {
640 var: ip[var.name] for var in extra_vars if var in input_vars and var not in grad_vars
641 }
--> 642 return ValueGradFunction(costs, grad_vars, extra_vars_and_values, **kwargs)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/model.py:382, in ValueGradFunction.__init__(self, costs, grad_vars, extra_vars_and_values, dtype, casting, compute_grads, **kwargs)
378 outputs = [cost]
380 inputs = grad_vars
--> 382 self._aesara_function = compile_pymc(inputs, outputs, givens=givens, **kwargs)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/pymc/aesaraf.py:970, in compile_pymc(inputs, outputs, random_seed, mode, **kwargs)
968 opt_qry = mode.provided_optimizer.including("random_make_inplace", check_parameter_opt)
969 mode = Mode(linker=mode.linker, optimizer=opt_qry)
--> 970 aesara_function = aesara.function(
971 inputs,
972 outputs,
973 updates={**rng_updates, **kwargs.pop("updates", {})},
974 mode=mode,
975 **kwargs,
976 )
977 return aesara_function
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/__init__.py:317, in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
311 fn = orig_function(
312 inputs, outputs, mode=mode, accept_inplace=accept_inplace, name=name
313 )
314 else:
315 # note: pfunc will also call orig_function -- orig_function is
316 # a choke point that all compilation must pass through
--> 317 fn = pfunc(
318 params=inputs,
319 outputs=outputs,
320 mode=mode,
321 updates=updates,
322 givens=givens,
323 no_default_updates=no_default_updates,
324 accept_inplace=accept_inplace,
325 name=name,
326 rebuild_strict=rebuild_strict,
327 allow_input_downcast=allow_input_downcast,
328 on_unused_input=on_unused_input,
329 profile=profile,
330 output_keys=output_keys,
331 )
332 return fn
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/pfunc.py:371, in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys, fgraph)
357 profile = ProfileStats(message=profile)
359 inputs, cloned_outputs = construct_pfunc_ins_and_outs(
360 params,
361 outputs,
(...)
368 fgraph=fgraph,
369 )
--> 371 return orig_function(
372 inputs,
373 cloned_outputs,
374 mode,
375 accept_inplace=accept_inplace,
376 name=name,
377 profile=profile,
378 on_unused_input=on_unused_input,
379 output_keys=output_keys,
380 fgraph=fgraph,
381 )
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:1759, in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys, fgraph)
1747 m = Maker(
1748 inputs,
1749 outputs,
(...)
1756 fgraph=fgraph,
1757 )
1758 with config.change_flags(compute_test_value="off"):
-> 1759 fn = m.create(defaults)
1760 finally:
1761 t2 = time.time()
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:1652, in FunctionMaker.create(self, input_storage, trustme, storage_map)
1649 start_import_time = aesara.link.c.cmodule.import_time
1651 with config.change_flags(traceback__limit=config.traceback__compile_limit):
-> 1652 _fn, _i, _o = self.linker.make_thunk(
1653 input_storage=input_storage_lists, storage_map=storage_map
1654 )
1656 end_linker = time.time()
1658 linker_time = end_linker - start_linker
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/basic.py:254, in LocalLinker.make_thunk(self, input_storage, output_storage, storage_map, **kwargs)
247 def make_thunk(
248 self,
249 input_storage: Optional["InputStorageType"] = None,
(...)
252 **kwargs,
253 ) -> Tuple["BasicThunkType", "InputStorageType", "OutputStorageType"]:
--> 254 return self.make_all(
255 input_storage=input_storage,
256 output_storage=output_storage,
257 storage_map=storage_map,
258 )[:3]
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/vm.py:1255, in VMLinker.make_all(self, profiler, input_storage, output_storage, storage_map)
1253 thunks[-1].lazy = False
1254 except Exception:
-> 1255 raise_with_op(fgraph, node)
1257 t1 = time.time()
1259 if self.profile:
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/utils.py:534, in raise_with_op(fgraph, node, thunk, exc_info, storage_map)
529 warnings.warn(
530 f"{exc_type} error does not allow us to add an extra error message"
531 )
532 # Some exception need extra parameter in inputs. So forget the
533 # extra long error message in that case.
--> 534 raise exc_value.with_traceback(exc_trace)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/vm.py:1246, in VMLinker.make_all(self, profiler, input_storage, output_storage, storage_map)
1241 thunk_start = time.time()
1242 # no-recycling is done at each VM.__call__ So there is
1243 # no need to cause duplicate c code by passing
1244 # no_recycling here.
1245 thunks.append(
-> 1246 node.op.make_thunk(node, storage_map, compute_map, [], impl=impl)
1247 )
1248 linker_make_thunk_time[node] = time.time() - thunk_start
1249 if not hasattr(thunks[-1], "lazy"):
1250 # We don't want all ops maker to think about lazy Ops.
1251 # So if they didn't specify that its lazy or not, it isn't.
1252 # If this member isn't present, it will crash later.
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/scan/op.py:1534, in Scan.make_thunk(self, node, storage_map, compute_map, no_recycling, impl)
1529 node_output_storage = [storage_map[r] for r in node.outputs]
1531 # Analyse the compile inner function to determine which inputs and
1532 # outputs are on the gpu and speed up some checks during the execution
1533 outs_is_tensor = [
-> 1534 isinstance(out, TensorVariable) for out in self.fn.maker.fgraph.outputs
1535 ]
1537 try:
1538 if impl == "py":
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/scan/op.py:1466, in Scan.fn(self)
1463 elif self.profile:
1464 profile = self.profile
-> 1466 self._fn = pfunc(
1467 wrapped_inputs,
1468 wrapped_outputs,
1469 mode=self.mode_instance,
1470 accept_inplace=False,
1471 profile=profile,
1472 on_unused_input="ignore",
1473 fgraph=self.fgraph,
1474 )
1476 return self._fn
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/pfunc.py:371, in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys, fgraph)
357 profile = ProfileStats(message=profile)
359 inputs, cloned_outputs = construct_pfunc_ins_and_outs(
360 params,
361 outputs,
(...)
368 fgraph=fgraph,
369 )
--> 371 return orig_function(
372 inputs,
373 cloned_outputs,
374 mode,
375 accept_inplace=accept_inplace,
376 name=name,
377 profile=profile,
378 on_unused_input=on_unused_input,
379 output_keys=output_keys,
380 fgraph=fgraph,
381 )
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:1759, in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys, fgraph)
1747 m = Maker(
1748 inputs,
1749 outputs,
(...)
1756 fgraph=fgraph,
1757 )
1758 with config.change_flags(compute_test_value="off"):
-> 1759 fn = m.create(defaults)
1760 finally:
1761 t2 = time.time()
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:1667, in FunctionMaker.create(self, input_storage, trustme, storage_map)
1664 import_time = aesara.link.c.cmodule.import_time - start_import_time
1665 self.profile.import_time += import_time
-> 1667 fn = self.function_builder(
1668 _fn,
1669 _i,
1670 _o,
1671 self.indices,
1672 self.outputs,
1673 defaults,
1674 self.unpack_single,
1675 self.return_none,
1676 self.output_keys,
1677 self,
1678 name=self.name,
1679 )
1681 fn.profile = self.profile
1682 return fn
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/compile/function/types.py:448, in Function.__init__(self, vm, input_storage, output_storage, indices, outputs, defaults, unpack_single, return_none, output_keys, maker, name)
446 assert not refeed
447 else:
--> 448 c.value = value
449 c.required = required
450 c.implicit = input.implicit
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/link/basic.py:115, in Container.__set__(self, value)
111 self.storage[0] = self.type.filter_inplace(
112 value, self.storage[0], **kwargs
113 )
114 except NotImplementedError:
--> 115 self.storage[0] = self.type.filter(value, **kwargs)
117 except Exception as e:
118 e.args = e.args + (f'Container name "{self.name}"',)
File ~/.virtualenvs/mfsera-env/lib/python3.10/site-packages/aesara/tensor/type.py:244, in TensorType.filter(self, data, strict, allow_downcast)
235 raise TypeError(
236 "The numpy.ndarray object is not aligned."
237 " Aesara C code does not support that.",
238 )
240 if not all(
241 ds == ts if ts is not None else True
242 for ds, ts in zip(data.shape, self.shape)
243 ):
--> 244 raise TypeError(
245 f"The type's shape ({self.shape}) is not compatible with the data's ({data.shape})"
246 )
248 if self.filter_checks_isfinite and not np.all(np.isfinite(data)):
249 raise ValueError("Non-finite elements not allowed")
TypeError: ("The type's shape ((844,)) is not compatible with the data's ((0,))", 'Container name "None"')
Apply node that caused the error: for{inplace{0,2,3,},cpu,grad_of_scan_fn}(TensorConstant{4}, Elemwise{pow,no_inplace}.0, Elemwise{mul,no_inplace}.0, Elemwise{Composite{(i0 - (i1 / i2))}}.0, Elemwise{pow}.0, Subtensor{int64:int64:int64}.0, TensorConstant{[[12. 13. .. 11. 10.]]}, Subtensor{int64:int64:int64}.0, Subtensor{::int64}.0, TensorConstant{(2,) of 0.0}, DeepCopyOp.0, DeepCopyOp.0, InplaceDimShuffle{x}.0, InplaceDimShuffle{x}.0, Elemwise{sqr,no_inplace}.0, Elemwise{log}.0, Elemwise{EQ}.0)
Toposort index: 79
Inputs types: [TensorType(int64, ()), TensorType(float64, (None, None)), TensorType(float64, (None, None)), TensorType(float64, (None, None)), TensorType(float64, (None, None)), TensorType(float64, (None, None)), TensorType(float64, (4, 844)), TensorType(float64, (None, None)), TensorType(float64, (None, None)), TensorType(float64, (2,)), TensorType(float64, (2,)), TensorType(float64, (2,)), TensorType(float64, (1,)), TensorType(float64, (1,)), TensorType(float64, (1,)), TensorType(float64, (1,)), TensorType(bool, (1,))]
HINT: Use a linker other than the C linker to print the inputs' shapes and strides.
HINT: Re-running with most Aesara optimizations disabled could provide a back-trace showing when this node was created. This can be done by setting the Aesara flag 'optimizer=fast_compile'. If that does not work, Aesara optimizations can be disabled with 'optimizer=None'.
HINT: Use the Aesara flag `exception_verbosity=high` for a debug print-out and storage map footprint of this Apply node.
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment