Skip to content

Instantly share code, notes, and snippets.

@cancan101
Last active August 29, 2015 14:11
Show Gist options
  • Save cancan101/7a23f365fc02856f2433 to your computer and use it in GitHub Desktop.
Save cancan101/7a23f365fc02856f2433 to your computer and use it in GitHub Desktop.
import os
os.environ['PYLEARN2_DATA_PATH'] = '/home/ubuntu/data'
import numpy as np
import cv2
from pylearn2.config import yaml_parse
from pylearn2.datasets.mnist import MNIST
from pylearn2.datasets import preprocessing
class Foo(preprocessing.Preprocessor):
def apply(self, dataset, can_fit):
topo = dataset.get_topological_view()
axes = dataset.view_converter.axes
new_array = np.array([cv2.resize(b, (150, 150), interpolation=cv2.INTER_LANCZOS4) for b in topo])[...,np.newaxis]
dataset.set_topological_view(new_array, axes=list(axes))
train_set = MNIST(which_set='train', preprocessor=Foo(), start=0, stop=5000)
valid = MNIST(which_set='test', preprocessor=Foo(),)
model_yml = """
!obj:pylearn2.models.mlp.MLP {
input_space: !obj:pylearn2.space.Conv2DSpace {
shape: [150, 150],
num_channels: 1
},
layers: [ !obj:pylearn2.models.mlp.ConvRectifiedLinear {
layer_name: 'h0',
output_channels: 8,
irange: .02,
kernel_shape: [9, 9],
pool_shape: [7, 7],
pool_stride: [7, 7],
max_kernel_norm: 1.9365,
init_bias: 1,
}, !obj:pylearn2.models.mlp.ConvRectifiedLinear {
layer_name: 'h1',
output_channels: 16,
irange: .02,
kernel_shape: [5, 5],
pool_shape: [2, 2],
pool_stride: [2, 2],
max_kernel_norm: 1.9365,
init_bias: 1,
}, !obj:pylearn2.models.mlp.ConvRectifiedLinear {
layer_name: 'h2',
output_channels: 32,
irange: .02,
kernel_shape: [5, 5],
pool_shape: [2, 2],
pool_stride: [2, 2],
max_kernel_norm: 1.9365,
init_bias: 1,
}, !obj:pylearn2.models.mlp.RectifiedLinear {
layer_name: 'h6',
dim: 500,
irange: 0.005,
max_col_norm: 40.,
init_bias: 1.
}, !obj:pylearn2.models.mlp.Softmax {
max_col_norm: 3.9365,
layer_name: 'y',
n_classes: 10,
irange: 0.005000
}
],
}
"""
train_yml = """
!obj:pylearn2.train.Train {
dataset: &train !import '__main__.train_set',
model: null,
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
batch_size: 64,
learning_rate: 0.001,
learning_rule: !obj:pylearn2.training_algorithms.learning_rule.Momentum {
init_momentum: 0.0
},
cost: !obj:pylearn2.costs.cost.SumOfCosts {
costs:[
!obj:pylearn2.costs.mlp.WeightDecay {
coeffs: [ 0., 0., 0., 0., 0., ],
},
!obj:pylearn2.costs.mlp.dropout.Dropout {
input_include_probs: { 'h0' : 1, 'h1' : 1, 'h2' : 1, },
input_scales: { 'h0' : 1, 'h1' : 1, 'h2' : 1, }
},
],
},
monitor_iteration_mode : "even_shuffled_sequential",
train_iteration_mode : "even_shuffled_sequential",
monitoring_dataset: {
'valid' : !import '__main__.valid',
},
termination_criterion: !obj:pylearn2.termination_criteria.And {
criteria: [
!obj:pylearn2.termination_criteria.MonitorBased {
channel_name: "valid_y_misclass",
prop_decrease: 0.01,
N: 100
},
!obj:pylearn2.termination_criteria.EpochCounter {
max_epochs: 150,
new_epochs: false
},
]
},
},
extensions: [
!obj:pylearn2.training_algorithms.learning_rule.MomentumAdjustor {
start: 2,
saturate: 5,
final_momentum: 0.90
},
!obj:pylearn2.training_algorithms.sgd.LinearDecayOverEpoch {
start: 20,
saturate: 100,
decay_factor: 0.1
},
]
}
"""
model = yaml_parse.load(model_yml)
train = yaml_parse.load(train_yml)
train.model = model
train.model.batch_size = None
train.model.force_batch_size = None
train.main_loop()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment