-
-
Save tristanwietsma/5486024 to your computer and use it in GitHub Desktop.
from __future__ import division | |
from numpy import * | |
class AdaBoost: | |
def __init__(self, training_set): | |
self.training_set = training_set | |
self.N = len(self.training_set) | |
self.weights = ones(self.N)/self.N | |
self.RULES = [] | |
self.ALPHA = [] | |
def set_rule(self, func, test=False): | |
errors = array([t[1]!=func(t[0]) for t in self.training_set]) | |
e = (errors*self.weights).sum() | |
if test: return e | |
alpha = 0.5 * log((1-e)/e) | |
print 'e=%.2f a=%.2f'%(e, alpha) | |
w = zeros(self.N) | |
for i in range(self.N): | |
if errors[i] == 1: w[i] = self.weights[i] * exp(alpha) | |
else: w[i] = self.weights[i] * exp(-alpha) | |
self.weights = w / w.sum() | |
self.RULES.append(func) | |
self.ALPHA.append(alpha) | |
def evaluate(self): | |
NR = len(self.RULES) | |
for (x,l) in self.training_set: | |
hx = [self.ALPHA[i]*self.RULES[i](x) for i in range(NR)] | |
print x, sign(l) == sign(sum(hx)) | |
if __name__ == '__main__': | |
examples = [] | |
examples.append(((1, 2 ), 1)) | |
examples.append(((1, 4 ), 1)) | |
examples.append(((2.5,5.5), 1)) | |
examples.append(((3.5,6.5), 1)) | |
examples.append(((4, 5.4), 1)) | |
examples.append(((2, 1 ),-1)) | |
examples.append(((2, 4 ),-1)) | |
examples.append(((3.5,3.5),-1)) | |
examples.append(((5, 2 ),-1)) | |
examples.append(((5, 5.5),-1)) | |
m = AdaBoost(examples) | |
m.set_rule(lambda x: 2*(x[0] < 1.5)-1) | |
m.set_rule(lambda x: 2*(x[0] < 4.5)-1) | |
m.set_rule(lambda x: 2*(x[1] > 5)-1) | |
m.evaluate() |
I have commented a bit-
from __future__ import division
from numpy import *
class AdaBoost:
def __init__(self, training_set):
self.training_set = training_set
self.N = len(self.training_set)
self.weights = ones(self.N)/self.N#array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
self.RULES = []
self.ALPHA = []
def set_rule(self, func, test=False):
errors = array([t[1]!=func(t[0]) for t in self.training_set])#array([False, False, True, True, True, False, False, False, False, False], dtype=bool) it checks if input is not equal to output i.e. error
#print self.training_set,errors
e = (errors*self.weights).sum()
if test: return e
alpha = 0.5 * log((1-e)/e) #Here e is negatively proportional to alpha
print 'e=%.2f a=%.2f'%(e, alpha)
w = zeros(self.N)#array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
for i in range(self.N):
#Increase/Decrease weight based on if error(misclassified) ocurred
if errors[i] == 1: w[i] = self.weights[i] * exp(alpha)#Increase if error found
else: w[i] = self.weights[i] * exp(-alpha)#Decreased if correctly classifies
self.weights = w / w.sum() # Changed weights; It will affetc the value of e and thusly alpha
self.RULES.append(func)
self.ALPHA.append(alpha)
def evaluate(self):
NR = len(self.RULES)
for (x,l) in self.training_set:
hx = [self.ALPHA[i]*self.RULES[i](x) for i in range(NR)]
print x, sign(l) == sign(sum(hx))
if __name__ == '__main__':
examples = []
examples.append(((1, 2 ), 1))
examples.append(((1, 4 ), 1))
examples.append(((2.5,5.5), 1))
examples.append(((3.5,6.5), 1))
examples.append(((4, 5.4), 1))
examples.append(((2, 1 ),-1))
examples.append(((2, 4 ),-1))
examples.append(((3.5,3.5),-1))
examples.append(((5, 2 ),-1))
examples.append(((5, 5.5),-1))
m = AdaBoost(examples)
m.set_rule(lambda x: 2*(x[0] < 1.5)-1)# Here some thresolds are used to form a sort of decision tree
m.set_rule(lambda x: 2*(x[0] < 4.5)-1)
m.set_rule(lambda x: 2*(x[1] > 5)-1)
m.evaluate()
I ported your code to modern C++ to help me learn about adaboost:
#include <vector>
#include <functional>
#include <algorithm>
#include <cmath>
using namespace std;
typedef vector<float> data_point;
typedef pair<data_point, int> train_point;
typedef vector<train_point> training_set;
typedef function<int(const data_point&)> wc;
template<typename T>
int sign(T v) { return (v<0.0f)?-1:(v==0.0)?0:-1; }
class adaboost
{
public:
adaboost( training_set ts ) :
_ts( ts ),
_N( _ts.size() ),
_weights(_N, 1.0f/_N),
_rules(),
_alpha()
{
}
void set_rule( wc func )
{
vector<float> errors( _N );
transform( _ts.begin(), _ts.end(), errors.begin(),
[func](const train_point& tp){ return (func(tp.first) != tp.second)?1.0f:0.0f; } );
float e = inner_product( errors.begin(), errors.end(), _weights.begin(), 0.0f );
float alpha = 0.5f * log((1.0f-e)/e);
printf("e=%.2f a=%.2f\n",e,alpha);
vector<float> w(_N, 0.0f);
for( size_t i = 0; i < _N; ++i )
{
if( errors[i] == 1.0f )
w[i] = _weights[i] * exp(alpha);
else w[i] = _weights[i] * exp(-alpha);
}
float sum = accumulate( w.begin(), w.end(), 0.0f );
transform( w.begin(), w.end(), _weights.begin(), [sum]( float v ){ return v / sum; } );
_rules.push_back(func);
_alpha.push_back(alpha);
}
void eval()
{
size_t NR = _rules.size();
for( auto tp : _ts )
{
vector<float> hx(NR);
for( size_t i = 0; i < NR; ++i )
hx[i] = _alpha[i] * _rules[i](tp.first);
float sumHX = accumulate( hx.begin(), hx.end(), 0.0f );
// printing
printf("(");
for_each( tp.first.begin(), tp.first.end(), [](float v){printf("%.2f, ",v);} );
printf(") ");
printf("%s\n",(sign(tp.second)==sign(sumHX))?"True":"False");
}
}
private:
training_set _ts;
size_t _N;
vector<float> _weights;
vector<wc> _rules;
vector<float> _alpha;
};
int main( int argc, char* argv[] )
{
training_set ts;
ts.push_back( { {1.0f, 2.0f}, 1 } );
ts.push_back( { {1.0f, 4.0f}, 1 } );
ts.push_back( { {2.5f, 5.5f}, 1 } );
ts.push_back( { {3.5f, 6.5f}, 1 } );
ts.push_back( { {4.0f, 5.4f}, 1 } );
ts.push_back( { {2.0f, 1.0f}, -1 } );
ts.push_back( { {2.0f, 4.0f}, -1 } );
ts.push_back( { {3.5f, 3.5f}, -1 } );
ts.push_back( { {5.0f, 2.0f}, -1 } );
ts.push_back( { {5.0f, 5.5f}, -1 } );
adaboost m(ts);
m.set_rule( [](const data_point& dp){ return 2.0f * (dp[0] < 1.5f)-1.0f; } );
m.set_rule( [](const data_point& dp){ return 2.0f * (dp[0] < 4.5f)-1.0f; } );
m.set_rule( [](const data_point& dp){ return 2.0f * (dp[1] > 5.0f)-1.0f; } );
m.eval();
}
I wonder the function m.set_rule(lambda x: 2 * (x[0] < 1.5) - 1) m.set_rule(lambda x: 2 * (x[0] < 4.5) - 1) m.set_rule(lambda x: 2 * (x[1] > 5) - 1)
is based on which theory
import numpy as np
class AdaBoostHalf:
def __init__(self, training_set):
self.training_set = training_set
self.N = len(self.training_set)
self.weights = np.ones(self.N)/self.N
self.RULES = []
self.ALPHA = []
def set_rule(self, func, test=False):
errors = np.array([t[1]!=func(t[0]) for t in self.training_set])
e = (errors*self.weights).sum()
if test: return e
alpha = 0.5 * np.log((1-e)/e) # Note, this line can be deleted since we are using the 1/2 trick
print ('e={} a={}'.format(e, alpha))
w = np.zeros(self.N)
sRight = np.sum(self.weights[errors])
sWrong = np.sum(self.weights[~errors])
for i in range(self.N):
if errors[i] == 1: w[i] = self.weights[i]/(2.0*sRight)
else: w[i] = self.weights[i]/(2.0*sWrong)
self.weights = w / w.sum()
print(self.weights)
self.RULES.append(func)
self.ALPHA.append(alpha)
print('alpha = {}'.format(alpha))
def evaluate(self):
NR = len(self.RULES)
for (x,l) in self.training_set:
hx = [alpha * rules(x) for alpha, rules in zip(self.ALPHA, self.RULES)]
print (x, np.sign(l) == np.sign(sum(hx)))
This version is based on Patrick Winston's lecture of Boosting (https://www.youtube.com/watch?v=UHBmv7qCey4). It turns out you do not have to calculate the error and do the exponential of the alpha. Instead, it is mathematically equivalent to just making sure that the weights for the correct predictions add up to 1/2 and the weights of the incorrect predictions add up to 1/2. I've included the alpha and error calculations above just to show that the weights are the same as the original code, but feel free to remove them and save some time.
-Tony
I'm totally new to adaboosting and still haven't found a way to make much sense of it. I'm not following on what the purpose of the function "set_rule" does and attributes RULES and ALPHA, as well as what the examples themselves are representing. Are they just coordinates on a graph and the values at those locations are either 1 or -1?
Thank you, I find this easy to understand about what AdaBoost does.
If I may suggest though, instead of
hx = [self.ALPHA[i]*self.RULESi for i in range(NR)]
it'll be more pythonic to use
hx = [alpha * rules(x) for alpha, rules in zip(self.ALPHA, self.RULES)]
also, for the sake of readability, i think
import numpy as np
will be better than
from numpy import *.