This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def load_dataset(): | |
url = 'http://deeplearning.net/data/mnist/mnist.pkl.gz' | |
filename = 'mnist.pkl.gz' | |
if not os.path.exists(filename): | |
print("Downloading MNIST dataset...") | |
urlretrieve(url, filename) | |
with gzip.open(filename, 'rb') as f: | |
data = pickle.load(f) | |
X_train, y_train = data[0] | |
X_val, y_val = data[1] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Conv Net Structure | |
batch_size=100 | |
output_size=10 | |
data_size=(None,1,28,28) | |
input_var = T.tensor4(name='inputs') | |
target_var =T.ivector(name='targets') | |
net = {} |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Input layer: | |
net['data'] = lasagne.layers.InputLayer(data_size, input_var=input_var) | |
#Convolution + Pooling + Normalization | |
net['conv1'] = lasagne.layers.Conv2DLayer(net['data'], num_filters=6, filter_size=3) | |
net['pool1'] = lasagne.layers.Pool2DLayer(net['conv1'], pool_size=2) | |
net['conv2'] = lasagne.layers.Conv2DLayer(net['pool1'], num_filters=10, filter_size=4) | |
net['pool2'] = lasagne.layers.Pool2DLayer(net['conv2'], pool_size=2) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
###Defining the cost function and the update rule | |
#Define hyperparameters. These could also be symbolic variables | |
lr = 1e-2 | |
weight_decay = 1e-5 | |
#Loss function: mean cross-entropy | |
prediction = lasagne.layers.get_output(net['out']) | |
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) | |
loss = loss.mean() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Get the update rule for Stochastic Gradient Descent | |
params = lasagne.layers.get_all_params(net['out'], trainable=True) | |
updates=lasagne.updates.adam(loss,params) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
###Compiling the training and testing functions | |
train_fn = theano.function([input_var, target_var], loss, updates=updates) | |
test_prediction = lasagne.layers.get_output(net['out'], deterministic=True) | |
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, | |
target_var) | |
test_loss = test_loss.mean() | |
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var), | |
dtype=theano.config.floatX) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
###Training the model | |
#Run the training function per mini-batches. | |
n_examples = X_train.shape[0] | |
n_batches = n_examples / batch_size | |
epochs=50 | |
for epoch in xrange(epochs): | |
for batch in xrange(n_batches): | |
x_batch = X_train[batch*batch_size: (batch+1) * batch_size] | |
y_batch = y_train[batch*batch_size: (batch+1) * batch_size] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# -*- coding: utf-8 -*- | |
""" | |
Created on Mon Dec 04 17:59:48 2017 | |
@author: Tathagat Dasgupta | |
""" | |
# -*- coding: utf-8 -*- | |
""" | |
Created on Sat Dec 02 23:56:30 2017 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import pandas as pd | |
import tensorflow as tf | |
from sklearn.model_selection import train_test_split | |
from sklearn.preprocessing import MinMaxScaler | |
df=pd.read_csv("cal_housing_clean.csv") | |
print(df.describe()) #to understand the dataset |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
y_val= df["medianHouseValue"] | |
x_data=df.drop("medianHouseValue",axis=1) | |
X_train, X_eval,y_train,y_eval=train_test_split(x_data,y_val,test_size=0.3,random_state=101) |