-
-
Save tristanwietsma/5486024 to your computer and use it in GitHub Desktop.
from __future__ import division | |
from numpy import * | |
class AdaBoost: | |
def __init__(self, training_set): | |
self.training_set = training_set | |
self.N = len(self.training_set) | |
self.weights = ones(self.N)/self.N | |
self.RULES = [] | |
self.ALPHA = [] | |
def set_rule(self, func, test=False): | |
errors = array([t[1]!=func(t[0]) for t in self.training_set]) | |
e = (errors*self.weights).sum() | |
if test: return e | |
alpha = 0.5 * log((1-e)/e) | |
print 'e=%.2f a=%.2f'%(e, alpha) | |
w = zeros(self.N) | |
for i in range(self.N): | |
if errors[i] == 1: w[i] = self.weights[i] * exp(alpha) | |
else: w[i] = self.weights[i] * exp(-alpha) | |
self.weights = w / w.sum() | |
self.RULES.append(func) | |
self.ALPHA.append(alpha) | |
def evaluate(self): | |
NR = len(self.RULES) | |
for (x,l) in self.training_set: | |
hx = [self.ALPHA[i]*self.RULES[i](x) for i in range(NR)] | |
print x, sign(l) == sign(sum(hx)) | |
if __name__ == '__main__': | |
examples = [] | |
examples.append(((1, 2 ), 1)) | |
examples.append(((1, 4 ), 1)) | |
examples.append(((2.5,5.5), 1)) | |
examples.append(((3.5,6.5), 1)) | |
examples.append(((4, 5.4), 1)) | |
examples.append(((2, 1 ),-1)) | |
examples.append(((2, 4 ),-1)) | |
examples.append(((3.5,3.5),-1)) | |
examples.append(((5, 2 ),-1)) | |
examples.append(((5, 5.5),-1)) | |
m = AdaBoost(examples) | |
m.set_rule(lambda x: 2*(x[0] < 1.5)-1) | |
m.set_rule(lambda x: 2*(x[0] < 4.5)-1) | |
m.set_rule(lambda x: 2*(x[1] > 5)-1) | |
m.evaluate() |
import numpy as np
class AdaBoostHalf:
def __init__(self, training_set):
self.training_set = training_set
self.N = len(self.training_set)
self.weights = np.ones(self.N)/self.N
self.RULES = []
self.ALPHA = []
def set_rule(self, func, test=False):
errors = np.array([t[1]!=func(t[0]) for t in self.training_set])
e = (errors*self.weights).sum()
if test: return e
alpha = 0.5 * np.log((1-e)/e) # Note, this line can be deleted since we are using the 1/2 trick
print ('e={} a={}'.format(e, alpha))
w = np.zeros(self.N)
sRight = np.sum(self.weights[errors])
sWrong = np.sum(self.weights[~errors])
for i in range(self.N):
if errors[i] == 1: w[i] = self.weights[i]/(2.0*sRight)
else: w[i] = self.weights[i]/(2.0*sWrong)
self.weights = w / w.sum()
print(self.weights)
self.RULES.append(func)
self.ALPHA.append(alpha)
print('alpha = {}'.format(alpha))
def evaluate(self):
NR = len(self.RULES)
for (x,l) in self.training_set:
hx = [alpha * rules(x) for alpha, rules in zip(self.ALPHA, self.RULES)]
print (x, np.sign(l) == np.sign(sum(hx)))
This version is based on Patrick Winston's lecture of Boosting (https://www.youtube.com/watch?v=UHBmv7qCey4). It turns out you do not have to calculate the error and do the exponential of the alpha. Instead, it is mathematically equivalent to just making sure that the weights for the correct predictions add up to 1/2 and the weights of the incorrect predictions add up to 1/2. I've included the alpha and error calculations above just to show that the weights are the same as the original code, but feel free to remove them and save some time.
-Tony
I'm totally new to adaboosting and still haven't found a way to make much sense of it. I'm not following on what the purpose of the function "set_rule" does and attributes RULES and ALPHA, as well as what the examples themselves are representing. Are they just coordinates on a graph and the values at those locations are either 1 or -1?
I wonder the function m.set_rule(lambda x: 2 * (x[0] < 1.5) - 1) m.set_rule(lambda x: 2 * (x[0] < 4.5) - 1) m.set_rule(lambda x: 2 * (x[1] > 5) - 1)
is based on which theory