-
-
Save chrisengelsma/108f7ab0a746323beaaf7d6634cf4add to your computer and use it in GitHub Desktop.
#ifndef _POLYNOMIAL_REGRESSION_H | |
#define _POLYNOMIAL_REGRESSION_H __POLYNOMIAL_REGRESSION_H | |
/** | |
* PURPOSE: | |
* | |
* Polynomial Regression aims to fit a non-linear relationship to a set of | |
* points. It approximates this by solving a series of linear equations using | |
* a least-squares approach. | |
* | |
* We can model the expected value y as an nth degree polynomial, yielding | |
* the general polynomial regression model: | |
* | |
* y = a0 + a1 * x + a2 * x^2 + ... + an * x^n | |
* | |
* LICENSE: | |
* | |
* MIT License | |
* | |
* Copyright (c) 2020 Chris Engelsma | |
* | |
* Permission is hereby granted, free of charge, to any person obtaining a copy | |
* of this software and associated documentation files (the "Software"), to deal | |
* in the Software without restriction, including without limitation the rights | |
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
* copies of the Software, and to permit persons to whom the Software is | |
* furnished to do so, subject to the following conditions: | |
* | |
* The above copyright notice and this permission notice shall be included in all | |
* copies or substantial portions of the Software. | |
* | |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
* SOFTWARE. | |
* | |
* @author Chris Engelsma | |
*/ | |
#include <vector> | |
#include <stdlib.h> | |
#include <cmath> | |
#include <stdexcept> | |
template <class TYPE> | |
class PolynomialRegression { | |
public: | |
PolynomialRegression(); | |
virtual ~PolynomialRegression(){}; | |
bool fitIt( | |
const std::vector<TYPE> & x, | |
const std::vector<TYPE> & y, | |
const int & order, | |
std::vector<TYPE> & coeffs); | |
}; | |
template <class TYPE> | |
PolynomialRegression<TYPE>::PolynomialRegression() {}; | |
template <class TYPE> | |
bool PolynomialRegression<TYPE>::fitIt( | |
const std::vector<TYPE> & x, | |
const std::vector<TYPE> & y, | |
const int & order, | |
std::vector<TYPE> & coeffs) | |
{ | |
// The size of xValues and yValues should be same | |
if (x.size() != y.size()) { | |
throw std::runtime_error( "The size of x & y arrays are different" ); | |
return false; | |
} | |
// The size of xValues and yValues cannot be 0, should not happen | |
if (x.size() == 0 || y.size() == 0) { | |
throw std::runtime_error( "The size of x or y arrays is 0" ); | |
return false; | |
} | |
size_t N = x.size(); | |
int n = order; | |
int np1 = n + 1; | |
int np2 = n + 2; | |
int tnp1 = 2 * n + 1; | |
TYPE tmp; | |
// X = vector that stores values of sigma(xi^2n) | |
std::vector<TYPE> X(tnp1); | |
for (int i = 0; i < tnp1; ++i) { | |
X[i] = 0; | |
for (int j = 0; j < N; ++j) | |
X[i] += (TYPE)pow(x[j], i); | |
} | |
// a = vector to store final coefficients. | |
std::vector<TYPE> a(np1); | |
// B = normal augmented matrix that stores the equations. | |
std::vector<std::vector<TYPE> > B(np1, std::vector<TYPE> (np2, 0)); | |
for (int i = 0; i <= n; ++i) | |
for (int j = 0; j <= n; ++j) | |
B[i][j] = X[i + j]; | |
// Y = vector to store values of sigma(xi^n * yi) | |
std::vector<TYPE> Y(np1); | |
for (int i = 0; i < np1; ++i) { | |
Y[i] = (TYPE)0; | |
for (int j = 0; j < N; ++j) { | |
Y[i] += (TYPE)pow(x[j], i)*y[j]; | |
} | |
} | |
// Load values of Y as last column of B | |
for (int i = 0; i <= n; ++i) | |
B[i][np1] = Y[i]; | |
n += 1; | |
int nm1 = n-1; | |
// Pivotisation of the B matrix. | |
for (int i = 0; i < n; ++i) | |
for (int k = i+1; k < n; ++k) | |
if (B[i][i] < B[k][i]) | |
for (int j = 0; j <= n; ++j) { | |
tmp = B[i][j]; | |
B[i][j] = B[k][j]; | |
B[k][j] = tmp; | |
} | |
// Performs the Gaussian elimination. | |
// (1) Make all elements below the pivot equals to zero | |
// or eliminate the variable. | |
for (int i=0; i<nm1; ++i) | |
for (int k =i+1; k<n; ++k) { | |
TYPE t = B[k][i] / B[i][i]; | |
for (int j=0; j<=n; ++j) | |
B[k][j] -= t*B[i][j]; // (1) | |
} | |
// Back substitution. | |
// (1) Set the variable as the rhs of last equation | |
// (2) Subtract all lhs values except the target coefficient. | |
// (3) Divide rhs by coefficient of variable being calculated. | |
for (int i=nm1; i >= 0; --i) { | |
a[i] = B[i][n]; // (1) | |
for (int j = 0; j<n; ++j) | |
if (j != i) | |
a[i] -= B[i][j] * a[j]; // (2) | |
a[i] /= B[i][i]; // (3) | |
} | |
coeffs.resize(a.size()); | |
for (size_t i = 0; i < a.size(); ++i) | |
coeffs[i] = a[i]; | |
return true; | |
} | |
#endif |
Hey this is like the fifth google result for "c++ quadratic fit". Can you specify a license for this file?
I've added an MIT license to this, cheers!
FYI This current revision (108f7ab0a746323beaaf7d6634cf4add) doesn't compile:
- Line 62:
SpPolynomialRegression::
should bePolynomialRegression::
- Line 91:
get
is not a method ofstd::vector
. - Line 154:
put
is not a method ofstd::vector
.
Once I fixed those it seems to pass my unit tests. Thank you!
FYI This current revision (108f7ab0a746323beaaf7d6634cf4add) doesn't compile:
- Line 62:
SpPolynomialRegression::
should bePolynomialRegression::
- Line 91:
get
is not a method ofstd::vector
.- Line 154:
put
is not a method ofstd::vector
.Once I fixed those it seems to pass my unit tests. Thank you!
Thanks! I realized I had some extra abstractions on my end - oops!
I made the following updates:
- Line 62 now has
PolynomialRegression::
- Line 91 went from
x.get(j)
tox[j]
- LIne 154 went from
coeffs.put(i, a[i])
tocoeffs.insert(i, a[i])
Make Line 154 coeffs[i] = a[i];
and you will be good. std::vector<TYPE>::insert
increases the array size (inserting, not replacing) so you end up with an array of a.size() * 2
elements.
Make Line 154
coeffs[i] = a[i];
and you will be good.std::vector<TYPE>::insert
increases the array size (inserting, not replacing) so you end up with an array ofa.size() * 2
elements.
Good call! Updated 👍
Last one! You need to add #include <cmath>
for your std::pow()
calls to work. I was including it elsewhere in my unit tests and didn't catch it before.
Last one! You need to add
#include <cmath>
for yourstd::pow()
calls to work. I was including it elsewhere in my unit tests and didn't catch it before.
Done!
Thanks for this code!
I had to #include <stdexcept>
to get this script to work. Otherwise the std::runtime_error
function is not found.
Thanks for this code!
I had to
#include <stdexcept>
to get this script to work.
I added it into the gist, thanks!
I've found a bug. TYPE
is double
.
fitIt( {16383.0, 16384.0, 16385.0}, {-6955950.0, -6958320.0, -6960690.0}, 2, coeffs );
coeffs
ends up being full of nan
s due to a divide by 0 at the a[i] /= B[i][i];
due to B[2]
being full of 0
s.
This happens because at B[k][j] -= t*B[i][j];
:
i = 0
j = 1
k = 2
t = 3.7252902892100557e-09
B[k][j] = 49152.0
B[i][j] = 13194139631616.0
t*B[i][j]
evaluates to 49152.0
rather than the expected 49152.000244140625
, causing B[k][j] -= t*B[i][j]
to be 49152.0 - 49152.0 = 0.0
.
I'm still trying to figure out exactly why it's happening and how to fix it.
I've found a bug.
TYPE
isdouble
.
fitIt( {16383.0, 16384.0, 16385.0}, {-6955950.0, -6958320.0, -6960690.0}, 2, coeffs );
coeffs
ends up being full ofnan
s due to a divide by 0 at thea[i] /= B[i][i];
due toB[2]
being full of0
s.This happens because at
B[k][j] -= t*B[i][j];
:* `i = 0` * `j = 1` * `k = 2` * `t = 3.7252902892100557e-09` * `B[k][j] = 49152.0` * `B[i][j] = 13194139631616.0`
t*B[i][j]
evaluates to49152.0
rather than the expected49152.000244140625
, causingB[k][j] -= t*B[i][j]
to be49152.0 - 49152.0 = 0.0
.I'm still trying to figure out exactly why it's happening and how to fix it.
Okay! This "Losing My Precision" article suggested employing log
s to convert multiplication/division to addition/subtraction.
I appear to have been able to fix the problems I was having by using betterMult(t, B[i][j])
when the standard multiplication result v
ends up testing true
for std::isnan(v) || std::fmod(v, 1.0) == 0.0
.
betterMult
in this case is a new method I created for the class that ultimately returns:
std::pow(TYPE(10), (std::log10(std::abs(lhs)) + std::log10(std::abs(rhs)))) * lhsSign * rhsSign
...where lhsSign
and rhsSign
are 1
if their respective value is positive, -1
if negative, and 0
if zero (see https://stackoverflow.com/questions/1903954 ).
code correction - removal of a critical error
After the declaration of the "a" array, all its elements should be assigned the value of zero
for (i = 0; i <n + 1; i ++)
a [i] = 0;
In the previous version, the program might sometimes work properly, and sometimes not
Krzysztof N. from Gdynia
There is either a flaw in the code or in my understanding of how to use the code. With the following code and data, the coefficients seems to be increasingly erroneous. With the data, a 6-order polynomial should be a near exact fit, but it is very off.
The code is:
`#include
#include
#include "PolynomialRegression.h"
int main( int argc, const char* argv[] ) {
const int order = atoi(argv[1]);
std::cout << "order: " << order << '\n';
int samples;
std::cin >> samples;
std::cout << "samples: " << samples << '\n';
std::vector x;
for ( int i = 0; i < samples; ++i ) {
double tmp;
std::cin >> tmp;
x.push_back(tmp);
}
std::vector y;
for ( int i = 0; i < samples; ++i ) {
double tmp;
std::cin >> tmp;
y.push_back(tmp);
}
PolynomialRegression polyreg;
std::vector coeffs;
polyreg.fitIt( x, y, order, coeffs );
std::cout << "coefficients:";
for ( int j = 0; j <= order; ++j ) {
std::cout << ' ' << coeffs[j];
}
std::cout << '\n';
std::cout << "entered :";
for ( int k = 0; k < samples; ++k ) {
std::cout << ' ' << y[k];
}
std::cout << '\n';
std::cout << "computed:";
for ( int l = 0; l < samples; ++l ) {
std::cout << ' ' << coeffs[0] + coeffs[1]*x[l] + coeffs[2]*x[l]*x[l];
}
std::cout << '\n';
}
`
The data is:
7
0.365616 1.854900 3.709790 7.419590 14.839200 29.678300 59.356700
0.246729 0.204710 0.174115 0.355591 0.878094 1.728551 2.897817
The problem above is actually in the test program that I provided. The evaluation polynomial wasn't growing with the order. The updated program follows.
`#include
#include
#include "PolynomialRegression.h"
int main( int argc, const char* argv[] ) {
// get the order from the command line.
const int order = atoi(argv[1]);
std::cout << "order: " << order << '\n';
// Read the number of samples.
int samples;
std::cin >> samples;
std::cout << "samples: " << samples << '\n';
// Read the x coordinates.
std::vector x;
for ( int i = 0; i < samples; ++i ) {
double tmp;
std::cin >> tmp;
x.push_back(tmp);
}
// Read the y coordinates.
std::vector y;
for ( int i = 0; i < samples; ++i ) {
double tmp;
std::cin >> tmp;
y.push_back(tmp);
}
// Do the regression.
PolynomialRegression polyreg;
std::vector coeffs;
polyreg.fitIt( x, y, order, coeffs );
// Print the coefficients.
std::cout << "coefficients:";
for ( int j = 0; j <= order; ++j ) {
std::cout << ' ' << coeffs[j];
}
std::cout << '\n';
// Print the entered y coordinates.
std::cout << "entered :";
for ( int k = 0; k < samples; ++k ) {
std::cout << ' ' << y[k];
}
std::cout << '\n';
// Print the computed y coordinates.
std::cout << "computed:";
for ( int l = 0; l < samples; ++l ) {
double value = coeffs[0];
for ( int m = 1; m <= order; ++m ) {
value += coeffs[m]*pow(x[l],m);
}
std::cout << ' ' << value;
}
std::cout << '\n';
}
`
Hey this is like the fifth google result for "c++ quadratic fit". Can you specify a license for this file?