This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def reflect_pad(x, width, batch_ndim=1): | |
""" | |
Pad a tensor with a constant value. | |
Parameters | |
---------- | |
x : tensor | |
width : int, iterable of int, or iterable of tuple | |
Padding width. If an int, pads each axis symmetrically with the same | |
amount in the beginning and end. If an iterable of int, defines the | |
symmetric padding width separately for each axis. If an iterable of |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import theano | |
import lasagne.layers | |
from lasagne.layers import Conv2DLayer as C2D | |
from lasagne.nonlinearities import rectify as relu | |
from lasagne.layers import NonlinearityLayer as NL | |
from lasagne.layers import ElemwiseSumLayer as ESL | |
from lasagne.layers import batch_norm as BN | |
l_in = lasagne.layers.InputLayer(shape=(None,3,64,64)) # Assume incoming shape is a batch x RGB x W x H image | |
encoder_stem = C2D( |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import theano | |
import theano.tensor as T | |
import lasagne | |
import numpy as np | |
import time | |
# Subpixel Upsample Layer using Set_subtensor | |
# This layer uses a set of r^2 inc_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style | |
# as done in the ESPCN magic pony paper for super-resolution. There is almost certainly a more efficient way to do this, | |
# but I haven't figured it out at the moment and this seems to be fast enough. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
### | |
# Situationally faster dilated convolutions through subpixel reshapes | |
# A Brock, 2016 | |
# | |
# Script adapted from https://github.com/soumith/convnet-benchmarks/blob/master/theano/pylearn2_benchmark.py by Jan Schluter. | |
# | |
# Outputs of this script from my tests on a GTX 980 are available here: http://pastebin.com/JRBY4Qnf | |
# | |
# Outputs of this script from my tests on a Titan X are available here: http://pastebin.com/0zJ8Uvg0 | |
# |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
%% Benchmark Analysis Script | |
% A Brock, 11.16.2016 | |
% | |
% This quick script runs through and determines the fastest method from a | |
% given set of benchmarks. | |
% | |
% Note that this script is really only set up to work well with a single | |
% benchmark file, as the indexing isn't quite perfect for the multipl | |
% inputs case. Extending it should be easy enough if desired. | |
%% Clear the playing field |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class MyOp(theano.Op): | |
# Properties attribute | |
#itypes and otypes attributes are | |
#compulsory if make_node method is not defined. | |
#They're the type of input and output respectively | |
itypes = [cuda.CudaNdarrayType([False,False,False,False])] | |
otypes = [cuda.CudaNdarrayType([False]*4)] | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## Unrolled GAN | |
# A Brock, 2016 | |
# This code implements the toy experiment for unrolled GANs. | |
# TODO: Make shared variables and reduce the memory transfer overhead | |
# Imports | |
import numpy as np |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import torch | |
import math | |
import torch.optim | |
from torch.optim.optimizer import Optimizer, required | |
class AdamHD(Optimizer): | |
"""Implements Adam algorithm. | |
It has been proposed in `Adam: A Method for Stochastic Optimization`_. | |
Arguments: | |
params (iterable): iterable of parameters to optimize or dicts defining | |
parameter groups |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import math | |
from torch.optim.optimizer import Optimizer | |
# This version of Adam keeps an fp32 copy of the parameters and | |
# does all of the parameter updates in fp32, while still doing the | |
# forwards and backwards passes using fp16 (i.e. fp16 copies of the | |
# parameters and fp16 activations). | |
# | |
# Note that this calls .float().cuda() on the params such that it | |
# moves them to gpu 0--if you're using a different GPU or want to |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# A simple Layer Norm implementation | |
# Andy Brock, March 2017 | |
# | |
# Andy's Notes: | |
# -This is sort of hacky but it seems to work. | |
# -You may also want an affine transform in there. | |
# -Note the .cuda() call on the dummys! | |
class LayerNorm(nn.Module): | |
def __init__(self): | |
super(LayerNorm, self).__init__() |
OlderNewer