-
Notifications
You must be signed in to change notification settings - Fork 18
/
main.cpp
170 lines (132 loc) · 4.41 KB
/
main.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
#include <iostream>
#include <iomanip>
#include <fstream>
#include <sstream>
#include <Eigen/Eigen>
#include <unsupported/Eigen/NonLinearOptimization>
struct LMFunctor
{
// 'm' pairs of (x, f(x))
Eigen::MatrixXf measuredValues;
// Compute 'm' errors, one for each data point, for the given parameter values in 'x'
int operator()(const Eigen::VectorXf &x, Eigen::VectorXf &fvec) const
{
// 'x' has dimensions n x 1
// It contains the current estimates for the parameters.
// 'fvec' has dimensions m x 1
// It will contain the error for each data point.
float aParam = x(0);
float bParam = x(1);
float cParam = x(2);
for (int i = 0; i < values(); i++) {
float xValue = measuredValues(i, 0);
float yValue = measuredValues(i, 1);
fvec(i) = yValue - (aParam * xValue * xValue + bParam * xValue + cParam);
}
return 0;
}
// Compute the jacobian of the errors
int df(const Eigen::VectorXf &x, Eigen::MatrixXf &fjac) const
{
// 'x' has dimensions n x 1
// It contains the current estimates for the parameters.
// 'fjac' has dimensions m x n
// It will contain the jacobian of the errors, calculated numerically in this case.
float epsilon;
epsilon = 1e-5f;
for (int i = 0; i < x.size(); i++) {
Eigen::VectorXf xPlus(x);
xPlus(i) += epsilon;
Eigen::VectorXf xMinus(x);
xMinus(i) -= epsilon;
Eigen::VectorXf fvecPlus(values());
operator()(xPlus, fvecPlus);
Eigen::VectorXf fvecMinus(values());
operator()(xMinus, fvecMinus);
Eigen::VectorXf fvecDiff(values());
fvecDiff = (fvecPlus - fvecMinus) / (2.0f * epsilon);
fjac.block(0, i, values(), 1) = fvecDiff;
}
return 0;
}
// Number of data points, i.e. values.
int m;
// Returns 'm', the number of values.
int values() const { return m; }
// The number of parameters, i.e. inputs.
int n;
// Returns 'n', the number of inputs.
int inputs() const { return n; }
};
int main(int argc, char *argv[])
{
//
// Goal
//
// Given a non-linear equation: f(x) = a(x^2) + b(x) + c
// and 'm' data points (x1, f(x1)), (x2, f(x2)), ..., (xm, f(xm))
// our goal is to estimate 'n' parameters (3 in this case: a, b, c)
// using LM optimization.
//
//
// Read values from file.
// Each row has two numbers, for example: 5.50 223.70
// The first number is the input value (5.50) i.e. the value of 'x'.
// The second number is the observed output value (223.70),
// i.e. the measured value of 'f(x)'.
//
std::ifstream infile("measured_data.txt");
if (!infile) {
std::cout << "Unable to read file." << std::endl;
return -1;
}
std::vector<float> x_values;
std::vector<float> y_values;
std::string line;
while (getline(infile, line)){
std::istringstream ss(line);
float x, y;
ss >> x >> y;
x_values.push_back(x);
y_values.push_back(y);
}
// 'm' is the number of data points.
int m = x_values.size();
// Move the data into an Eigen Matrix.
// The first column has the input values, x. The second column is the f(x) values.
Eigen::MatrixXf measuredValues(m, 2);
for (int i = 0; i < m; i++) {
measuredValues(i, 0) = x_values[i];
measuredValues(i, 1) = y_values[i];
}
// 'n' is the number of parameters in the function.
// f(x) = a(x^2) + b(x) + c has 3 parameters: a, b, c
int n = 3;
// 'x' is vector of length 'n' containing the initial values for the parameters.
// The parameters 'x' are also referred to as the 'inputs' in the context of LM optimization.
// The LM optimization inputs should not be confused with the x input values.
Eigen::VectorXf x(n);
x(0) = 0.0; // initial value for 'a'
x(1) = 0.0; // initial value for 'b'
x(2) = 0.0; // initial value for 'c'
//
// Run the LM optimization
// Create a LevenbergMarquardt object and pass it the functor.
//
LMFunctor functor;
functor.measuredValues = measuredValues;
functor.m = m;
functor.n = n;
Eigen::LevenbergMarquardt<LMFunctor, float> lm(functor);
int status = lm.minimize(x);
std::cout << "LM optimization status: " << status << std::endl;
//
// Results
// The 'x' vector also contains the results of the optimization.
//
std::cout << "Optimization results" << std::endl;
std::cout << "\ta: " << x(0) << std::endl;
std::cout << "\tb: " << x(1) << std::endl;
std::cout << "\tc: " << x(2) << std::endl;
return 0;
}