Skip to content

Instantly share code, notes, and snippets.

View Tathagatd96's full-sized avatar

Tathagat Dasgupta Tathagatd96

View GitHub Profile
def load_dataset():
url = 'http://deeplearning.net/data/mnist/mnist.pkl.gz'
filename = 'mnist.pkl.gz'
if not os.path.exists(filename):
print("Downloading MNIST dataset...")
urlretrieve(url, filename)
with gzip.open(filename, 'rb') as f:
data = pickle.load(f)
X_train, y_train = data[0]
X_val, y_val = data[1]
#Conv Net Structure
batch_size=100
output_size=10
data_size=(None,1,28,28)
input_var = T.tensor4(name='inputs')
target_var =T.ivector(name='targets')
net = {}
#Input layer:
net['data'] = lasagne.layers.InputLayer(data_size, input_var=input_var)
#Convolution + Pooling + Normalization
net['conv1'] = lasagne.layers.Conv2DLayer(net['data'], num_filters=6, filter_size=3)
net['pool1'] = lasagne.layers.Pool2DLayer(net['conv1'], pool_size=2)
net['conv2'] = lasagne.layers.Conv2DLayer(net['pool1'], num_filters=10, filter_size=4)
net['pool2'] = lasagne.layers.Pool2DLayer(net['conv2'], pool_size=2)
###Defining the cost function and the update rule
#Define hyperparameters. These could also be symbolic variables
lr = 1e-2
weight_decay = 1e-5
#Loss function: mean cross-entropy
prediction = lasagne.layers.get_output(net['out'])
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
#Get the update rule for Stochastic Gradient Descent
params = lasagne.layers.get_all_params(net['out'], trainable=True)
updates=lasagne.updates.adam(loss,params)
###Compiling the training and testing functions
train_fn = theano.function([input_var, target_var], loss, updates=updates)
test_prediction = lasagne.layers.get_output(net['out'], deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,
target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
###Training the model
#Run the training function per mini-batches.
n_examples = X_train.shape[0]
n_batches = n_examples / batch_size
epochs=50
for epoch in xrange(epochs):
for batch in xrange(n_batches):
x_batch = X_train[batch*batch_size: (batch+1) * batch_size]
y_batch = y_train[batch*batch_size: (batch+1) * batch_size]
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 04 17:59:48 2017
@author: Tathagat Dasgupta
"""
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 02 23:56:30 2017
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
df=pd.read_csv("cal_housing_clean.csv")
print(df.describe()) #to understand the dataset
y_val= df["medianHouseValue"]
x_data=df.drop("medianHouseValue",axis=1)
X_train, X_eval,y_train,y_eval=train_test_split(x_data,y_val,test_size=0.3,random_state=101)