Last active
May 17, 2017 22:28
-
-
Save bartvm/c01de0e8f4bb563756d00ff1d4f011e4 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Reverse unpacking nightmares | |
def f(a, b, c=3, *d, **e): | |
print(a, b, c, d, e) | |
f(*(1, 2), **{'c': 3, 'foo': 'bar'}) | |
f(1, 2, 3) | |
f(1, 2, 3, 4) | |
f(*(1, 2, 3, 4)) | |
f(1, 2, **{'foo': 'bar'}) | |
# Dynamic typing nightmares | |
def f(x, y): | |
if x == 'foo': | |
return y * 2 | |
else: | |
return x * y | |
grad(f, wrt='x')(2, 3) # == 3 | |
grad(f, wrt='y')('foo', 3) . # == 2 | |
# Example where (a) some arguments aren't differentiable and | |
# (b) a higher order function where the adjoint of functional | |
# arguments is needed | |
y = np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) | |
y = np.piecewise(x, [x < 0, x >= 0], [lambda x, c: -x * c, lambda x, c: x * c], 2) | |
funcs = [x < 0, x >= 0] | |
y = np.piecewise(x, funcs, [lambda x, c: -x * c, lambda x, c: x * c], 2) | |
in_ = (x, [x < 0, x >= 0]) | |
y = np.piecewise(*in_, [lambda x, c: -x * c, lambda x, c: x * c], 2) | |
kwargs = {} | |
y = np.piecewise(*in_, [lambda x, c: -x * c, lambda x, c: x * c], 2, **kwargs) | |
# Number of unpacked arguments is variable | |
foo = [0] * np.random.randint(1, 4) | |
y = np.piecewise(x, [x < 0, x >= 0], funcs, *foo) | |
# Example with (a) variable number of arguments and | |
# (b) unpacking return values, and (c) keyword args | |
x, y = np.broadcast_arrays([1, 2], [[1, 2], [3, 4]], subok=False) | |
### Gradient specifications | |
@adjoint(np.piecewise) | |
def piecewise(x, y, dy, condlist, funclist, args, kw): | |
# We have to construct an adjoint for each function in funclist | |
# Each of these adjoints will take elements from x, the output y and its gradient dy | |
# Each of these will return a dx element, but also gradients w.r.t. args and kwargs, which must be summed | |
dx = np.zeros_like(x) | |
... |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment