Results on MNIST
Feed Forward model with two hidden layers (300, 60).
l2_lambda | Accuracy@1 after 80k iters (Two Runs) |
---|---|
0.00 | 98.15, 98.04 |
0.01 | 98.31, 98.19 |
0.02 | 98.19, 98.15 |
0.04 | 97.93, 97.92 |
Results on MNIST
Feed Forward model with two hidden layers (300, 60).
l2_lambda | Accuracy@1 after 80k iters (Two Runs) |
---|---|
0.00 | 98.15, 98.04 |
0.01 | 98.31, 98.19 |
0.02 | 98.19, 98.15 |
0.04 | 97.93, 97.92 |
Transfer Learning
Tries to figure out how much information can we transfer between networks trained on different datasets.
Quantifies the transferability by layer.
Hypothesis:
#include <iostream> | |
using namespace std; | |
struct Foo { | |
int a; | |
double b; | |
}; | |
int main() { | |
const Foo f = { |
import gym | |
import logging | |
import sys | |
import numpy as np | |
import torch | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torch.optim as optim | |
import cPickle as pickle | |
import os |
import gym | |
import logging | |
import sys | |
import numpy as np | |
from gym import wrappers | |
SEED = 0 | |
NUM_EPISODES = 3000 | |
# Hyperparams |
import os | |
import sys | |
import urllib2 | |
def normalize_path(path): | |
if path[-1] == '/': | |
path = path[:-1] | |
return path | |
def get_dir_name(path): |
import gym | |
import logging | |
import sys | |
import numpy as np | |
from gym import wrappers | |
import torch | |
import torchvision | |
import torch.nn as nn | |
import torch.nn.functional as F |
import gym | |
import logging | |
import sys | |
import numpy as np | |
from gym import wrappers | |
import torch | |
import torchvision | |
import torch.nn as nn | |
import torch.nn.functional as F |
import gym | |
import logging | |
import sys | |
import numpy as np | |
from gym import wrappers | |
import torch | |
import torchvision | |
import torch.nn as nn | |
import torch.nn.functional as F |
import torch | |
import torchvision | |
import torch.nn as nn | |
import torch.nn.functional as F | |
import torchvision.transforms as transforms | |
import matplotlib.pyplot as plt | |
import numpy as np | |
import torch.optim as optim | |
from torch.autograd import Variable |