Skip to content

Instantly share code, notes, and snippets.

@securetorobert
Created July 12, 2018 23:04
Show Gist options
  • Save securetorobert/0e14397b350f4747ac3bb553ed5b5cb8 to your computer and use it in GitHub Desktop.
Save securetorobert/0e14397b350f4747ac3bb553ed5b5cb8 to your computer and use it in GitHub Desktop.
Loss optimization in scientific python
import pandas as pd
import numpy as np
from scipy.optimize import fmin, minimize
#load training data
train_df = pd.read_csv('./train.csv', index_col='ID')
y = train_df['medv'].values
y = y.reshape(-1, 1)
train_df['constant'] = 1
columns = ['constant', 'crim', 'zn', 'indus', 'chas', 'nox', 'rm', 'age', 'dis', 'rad', 'tax', 'ptratio', 'black', 'lstat']
x = train_df[columns].values
#initialize vector of weights
w = np.zeros([x.shape[1], 1])
#define out linear regression function
def pred(x, w):
return np.dot(x, w)
#make a prediction
y_pred = pred(x, w)
#define our loss function
def loss(_w):
p = pred(x, _w)
e = y - p
se = np.power(e, 2)
rse = np.sqrt(np.sum(se))
rmse = rse / y.shape[0]
return rmse
#minimize our loss function
min = fmin(loss, w, maxiter=1000)
#make our prediction
y_min = pred(x, min)
out = pd.DataFrame({'y': y[:,0], 'y_pred': y_pred[:,0], 'y_min': pred(x, min)})
out.head(n=15)
#use minimize()
nms = minimize(loss, w, method='nelder-mead')
#make another prediction
out_2 = pd.DataFrame({'y': y[:,0], 'y_pred': y_pred[:,0], 'y_min': pred(x, nms.x)})
out_2.head()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment