- Site: https://graphite.dev
- Docs: https://docs.graphite.dev/
- Stacked PRs allow you to easily work like so:
- Work on branch
ACQ-000-part-A(which is based on master) - Work on branch
ACQ-000-part-B(which is based on part-A)
- Work on branch
| package main | |
| import ( | |
| "bufio" | |
| "flag" | |
| "fmt" | |
| "io" | |
| "math/rand" | |
| "os" | |
| "slices" |
| #!/usr/bin/env bash | |
| set -e | |
| COLOR='\033[1;32m' | |
| NC='\033[0m' # No Color | |
| log() { | |
| echo -e "${COLOR}#" $@ "${NC}" | |
| } |
ACQ-000-part-A (which is based on master)ACQ-000-part-B (which is based on part-A)| # Now available here: https://github.com/y0ast/pytorch-snippets/tree/main/subset_of_imagenet |
| # Now available here: https://github.com/y0ast/pytorch-snippets/tree/main/minimal_cifar |
| def get_jacobian(net, x, noutputs): | |
| x = x.squeeze() | |
| n = x.size()[0] | |
| x = x.repeat(noutputs, 1) | |
| x.requires_grad_(True) | |
| y = net(x) | |
| y.backward(torch.eye(noutputs)) | |
| return x.grad.data |
| import torch | |
| def jacobian(y, x, create_graph=False): | |
| jac = [] | |
| flat_y = y.reshape(-1) | |
| grad_y = torch.zeros_like(flat_y) | |
| for i in range(len(flat_y)): | |
| grad_y[i] = 1. | |
| grad_x, = torch.autograd.grad(flat_y, x, grad_y, retain_graph=True, create_graph=create_graph) | |
| jac.append(grad_x.reshape(x.shape)) |
| (* | |
| ---------------------------------------------------------------------------------- | |
| ABOUT THIS SCRIPT | |
| Written by:William Smith | |
| Professional Services Engineer | |
| Jamf | |
| [email protected] |
| def Rop(y, x, v): | |
| """Computes an Rop. | |
| Arguments: | |
| y (Variable): output of differentiated function | |
| x (Variable): differentiated input | |
| v (Variable): vector to be multiplied with Jacobian from the right | |
| """ | |
| w = torch.ones_like(y, requires_grad=True) | |
| return torch.autograd.grad(torch.autograd.grad(y, x, w), w, v) |