Skip to content

Instantly share code, notes, and snippets.

View amaarora's full-sized avatar
🎯
Focusing

Aman Arora amaarora

🎯
Focusing
View GitHub Profile
#Cell
class TfmdList(FilteredBase, L, GetAttr):
"A `Pipeline` of `tfms` applied to a collection of `items`"
_default='tfms'
def __init__(self, items, tfms, use_list=None, do_setup=True, as_item=True, split_idx=None, train_setup=True, splits=None):
super().__init__(items, use_list=use_list)
self.splits = L([slice(None),[]] if splits is None else splits).map(mask2idxs)
if isinstance(tfms,TfmdList): tfms = tfms.tfms
if isinstance(tfms,Pipeline): do_setup=False
self.tfms = Pipeline(tfms, as_item=as_item, split_idx=split_idx)
#Cell
@docs
@delegates(TfmdList)
class DataSource(FilteredBase):
"A dataset that creates a tuple from each `tfms`, passed thru `item_tfms`"
def __init__(self, items=None, tfms=None, tls=None, n_inp=None, dl_type=None, **kwargs):
super().__init__(dl_type=dl_type)
self.tls = L(tls if tls else [TfmdList(items, t, **kwargs) for t in L(ifnone(tfms,[None]))])
self.n_inp = (1 if len(self.tls)==1 else len(self.tls)-1) if n_inp is None else n_inp
def reward_function(params):
'''
Example of rewarding the agent to follow center line
'''
import math
# Read input parameters
track_width = params['track_width']
distance_from_center = params['distance_from_center']
steering = abs(params['steering_angle'])
all_wheels_on_track = params['all_wheels_on_track']
def reward_function(params):
'''
Example of rewarding the agent to stay within boundary
'''
import math
# Read input parameters
track_width = params['track_width']
distance_from_center = params['distance_from_center']
steering = abs(params['steering_angle'])
all_wheels_on_track = params['all_wheels_on_track']
%%time
scores={}
for col in X_valid.columns:
X = X_valid.copy()
X[col] = np.random.choice(X[col], len(X))
scores[col]=rmse(m.predict(X), y_valid)-base
def get_weights(*dims): return nn.Parameter(torch.randn(dims)/dims[0])
def softmax(x): return torch.exp(x)/(torch.exp(x).sum(dim=1)[:,None])
class LogReg(nn.Module):
def __init__(self):
super().__init__()
self.l1_w = get_weights(28*28, 10) # Layer 1 weights
self.l1_b = get_weights(10) # Layer 1 bias
def forward(self, x):
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.ensemble import RandomForestRegressor
class TreeEnsemble():
def __init__(self, x, y, n_trees, sample_sz, min_leaf=5):
np.random.seed(42)
self.x,self.y,self.sample_sz,self.min_leaf = x,y,sample_sz,min_leaf
self.trees = [self.create_tree() for i in range(n_trees)]
def create_tree(self):
idxs = np.random.permutation(len(self.y))[:self.sample_sz]
return DecisionTree(self.x.iloc[idxs], self.y[idxs],
idxs=np.array(range(self.sample_sz)), min_leaf=self.min_leaf)
def find_varsplit(self):
for i in range(self.c): self.find_better_split(i)
if self.is_leaf: return
x = self.split_col
lhs = np.nonzero(x<=self.split)[0]
rhs = np.nonzero(x>self.split)[0]
self.lhs = DecisionTree(self.x, self.y, self.idxs[lhs])
self.rhs = DecisionTree(self.x, self.y, self.idxs[rhs])
def find_better_split(self, var_idx):
x,y = self.x.values[self.idxs,var_idx], self.y[self.idxs]
for i in range(self.n):
lhs = x<=x[i]
rhs = x>x[i]
if rhs.sum()<self.min_leaf or lhs.sum()<self.min_leaf: continue
lhs_std = y[lhs].std()
rhs_std = y[rhs].std()
curr_score = lhs_std*lhs.sum() + rhs_std*rhs.sum()