summaryrefslogtreecommitdiff
path: root/controller/fw/levmarq
diff options
context:
space:
mode:
Diffstat (limited to 'controller/fw/levmarq')
-rw-r--r--controller/fw/levmarq/levmarq.c272
1 files changed, 141 insertions, 131 deletions
diff --git a/controller/fw/levmarq/levmarq.c b/controller/fw/levmarq/levmarq.c
index bbf00c0..a3a77b5 100644
--- a/controller/fw/levmarq/levmarq.c
+++ b/controller/fw/levmarq/levmarq.c
@@ -20,6 +20,7 @@
#include <arm_math.h>
#include "levmarq.h"
+#include "simulation.h"
#define TOL 1e-20f /* smallest value allowed in cholesky_decomp() */
@@ -28,18 +29,20 @@
/* set parameters required by levmarq() to default values */
void levmarq_init(LMstat *lmstat)
{
- lmstat->max_it = 10000;
- lmstat->init_lambda = 0.0001f;
- lmstat->up_factor = 10.0f;
- lmstat->down_factor = 10.0f;
- lmstat->target_derr = 1e-12f;
+ lmstat->max_it = 10000;
+ lmstat->init_lambda = 0.0001f;
+ lmstat->up_factor = 10.0f;
+ lmstat->down_factor = 10.0f;
+ lmstat->target_derr = 1e-12f;
}
+#ifndef SIMULATION
float sqrtf(float arg) {
float out=NAN;
arm_sqrt_f32(arg, &out);
return out;
}
+#endif
/* perform least-squares minimization using the Levenberg-Marquardt
algorithm. The arguments are as follows:
@@ -49,140 +52,147 @@ float sqrtf(float arg) {
ny number of measurements to be fit
y array of measurements
dysq array of error in measurements, squared
- (set dysq=NULL for unweighted least-squares)
+ (set dysq=NULL for unweighted least-squares)
func function to be fit
grad gradient of "func" with respect to the input parameters
fdata pointer to any additional data required by the function
lmstat pointer to the "status" structure, where minimization parameters
- are set and the final status is returned.
+ are set and the final status is returned.
Before calling levmarq, several of the parameters in lmstat must be set.
For default values, call levmarq_init(lmstat).
- */
+ */
int levmarq(int npar, float *par, int ny, float *y, float *dysq,
- float (*func)(float *, int, void *),
- void (*grad)(float *, float *, int, void *),
- void *fdata, LMstat *lmstat)
+ float (*func)(float *, int, void *),
+ void (*grad)(float *, float *, int, void *),
+ void *fdata, LMstat *lmstat)
{
- int x,i,j,it,nit,ill;
- float lambda,up,down,mult,weight,err,newerr,derr,target_derr;
- float h[npar][npar],ch[npar][npar];
- float g[npar],d[npar],delta[npar],newpar[npar];
-
- nit = lmstat->max_it;
- lambda = lmstat->init_lambda;
- up = lmstat->up_factor;
- down = 1/lmstat->down_factor;
- target_derr = lmstat->target_derr;
- weight = 1;
- derr = newerr = 0; /* to avoid compiler warnings */
-
- /* calculate the initial error ("chi-squared") */
- err = error_func(par, ny, y, dysq, func, fdata);
-
- /* main iteration */
- for (it=0; it<nit; it++) {
-
- /* calculate the approximation to the Hessian and the "derivative" d */
- for (i=0; i<npar; i++) {
- d[i] = 0;
- for (j=0; j<=i; j++)
- h[i][j] = 0;
- }
- for (x=0; x<ny; x++) {
- if (dysq) weight = 1/dysq[x]; /* for weighted least-squares */
- grad(g, par, x, fdata);
- for (i=0; i<npar; i++) {
- d[i] += (y[x] - func(par, x, fdata))*g[i]*weight;
- for (j=0; j<=i; j++)
- h[i][j] += g[i]*g[j]*weight;
- }
- }
-
- /* make a step "delta." If the step is rejected, increase
- lambda and try again */
- mult = 1 + lambda;
- ill = 1; /* ill-conditioned? */
- while (ill && (it<nit)) {
- for (i=0; i<npar; i++)
- h[i][i] = h[i][i]*mult;
-
- ill = cholesky_decomp(npar, ch, h);
-
- if (!ill) {
- solve_axb_cholesky(npar, ch, delta, d);
- for (i=0; i<npar; i++)
- newpar[i] = par[i] + delta[i];
- newerr = error_func(newpar, ny, y, dysq, func, fdata);
- derr = newerr - err;
- ill = (derr > 0);
- }
- if (ill) {
- mult = (1 + lambda*up)/(1 + lambda);
- lambda *= up;
- it++;
- }
+ int x,i,j,it,nit,ill;
+ float lambda,up,down,mult,weight,err,newerr,derr,target_derr;
+ float h[npar][npar],ch[npar][npar];
+ float g[npar],d[npar],delta[npar],newpar[npar];
+
+ nit = lmstat->max_it;
+ lambda = lmstat->init_lambda;
+ up = lmstat->up_factor;
+ down = 1/lmstat->down_factor;
+ target_derr = lmstat->target_derr;
+ weight = 1;
+ derr = newerr = 0; /* to avoid compiler warnings */
+
+ /* calculate the initial error ("chi-squared") */
+ err = error_func(par, ny, y, dysq, func, fdata);
+
+ /* main iteration */
+ for (it=0; it<nit; it++) {
+ //DEBUG_PRINT("iteration %d", it);
+
+ /* calculate the approximation to the Hessian and the "derivative" d */
+ for (i=0; i<npar; i++) {
+ d[i] = 0;
+ for (j=0; j<=i; j++)
+ h[i][j] = 0;
+ }
+
+ for (x=0; x<ny; x++) {
+ if (dysq) weight = 1/dysq[x]; /* for weighted least-squares */
+ grad(g, par, x, fdata);
+ for (i=0; i<npar; i++) {
+ d[i] += (y[x] - func(par, x, fdata))*g[i]*weight;
+ for (j=0; j<=i; j++)
+ h[i][j] += g[i]*g[j]*weight;
+ }
+ }
+
+ /* make a step "delta." If the step is rejected, increase
+ lambda and try again */
+ mult = 1 + lambda;
+ ill = 1; /* ill-conditioned? */
+ while (ill && (it<nit)) {
+ for (i=0; i<npar; i++)
+ h[i][i] = h[i][i]*mult;
+
+ ill = cholesky_decomp(npar, ch, h);
+
+ if (!ill) {
+ solve_axb_cholesky(npar, ch, delta, d);
+ for (i=0; i<npar; i++)
+ newpar[i] = par[i] + delta[i];
+ newerr = error_func(newpar, ny, y, dysq, func, fdata);
+ derr = newerr - err;
+ ill = (derr > 0);
+ }
+ if (ill) {
+ mult = (1 + lambda*up)/(1 + lambda);
+ lambda *= up;
+ it++;
+ }
+ }
+ for (i=0; i<npar; i++)
+ par[i] = newpar[i];
+ err = newerr;
+ lambda *= down;
+
+ if ((!ill)&&(-derr<target_derr)) break;
}
- for (i=0; i<npar; i++)
- par[i] = newpar[i];
- err = newerr;
- lambda *= down;
- if ((!ill)&&(-derr<target_derr)) break;
- }
+ lmstat->final_it = it;
+ lmstat->final_err = err;
+ lmstat->final_derr = derr;
- lmstat->final_it = it;
- lmstat->final_err = err;
- lmstat->final_derr = derr;
+ if (it == nit) {
+ DEBUG_PRINT("did not converge");
+ return -1;
+ }
- return (it==nit);
+ return it;
}
/* calculate the error function (chi-squared) */
float error_func(float *par, int ny, float *y, float *dysq,
- float (*func)(float *, int, void *), void *fdata)
+ float (*func)(float *, int, void *), void *fdata)
{
- int x;
- float res,e=0;
-
- for (x=0; x<ny; x++) {
- res = func(par, x, fdata) - y[x];
- if (dysq) /* weighted least-squares */
- e += res*res/dysq[x];
- else
- e += res*res;
- }
- return e;
+ int x;
+ float res,e=0;
+
+ for (x=0; x<ny; x++) {
+ res = func(par, x, fdata) - y[x];
+ if (dysq) /* weighted least-squares */
+ e += res*res/dysq[x];
+ else
+ e += res*res;
+ }
+ return e;
}
/* solve the equation Ax=b for a symmetric positive-definite matrix A,
using the Cholesky decomposition A=LL^T. The matrix L is passed in "l".
Elements above the diagonal are ignored.
-*/
+ */
void solve_axb_cholesky(int n, float l[n][n], float x[n], float b[n])
{
- int i,j;
- float sum;
-
- /* solve L*y = b for y (where x[] is used to store y) */
-
- for (i=0; i<n; i++) {
- sum = 0;
- for (j=0; j<i; j++)
- sum += l[i][j] * x[j];
- x[i] = (b[i] - sum)/l[i][i];
- }
-
- /* solve L^T*x = y for x (where x[] is used to store both y and x) */
-
- for (i=n-1; i>=0; i--) {
- sum = 0;
- for (j=i+1; j<n; j++)
- sum += l[j][i] * x[j];
- x[i] = (x[i] - sum)/l[i][i];
- }
+ int i,j;
+ float sum;
+
+ /* solve L*y = b for y (where x[] is used to store y) */
+
+ for (i=0; i<n; i++) {
+ sum = 0;
+ for (j=0; j<i; j++)
+ sum += l[i][j] * x[j];
+ x[i] = (b[i] - sum)/l[i][i];
+ }
+
+ /* solve L^T*x = y for x (where x[] is used to store both y and x) */
+
+ for (i=n-1; i>=0; i--) {
+ sum = 0;
+ for (j=i+1; j<n; j++)
+ sum += l[j][i] * x[j];
+ x[i] = (x[i] - sum)/l[i][i];
+ }
}
@@ -190,26 +200,26 @@ void solve_axb_cholesky(int n, float l[n][n], float x[n], float b[n])
its (lower-triangular) Cholesky factor in "l". Elements above the
diagonal are neither used nor modified. The same array may be passed
as both l and a, in which case the decomposition is performed in place.
-*/
+ */
int cholesky_decomp(int n, float l[n][n], float a[n][n])
{
- int i,j,k;
- float sum;
-
- for (i=0; i<n; i++) {
- for (j=0; j<i; j++) {
- sum = 0;
- for (k=0; k<j; k++)
- sum += l[i][k] * l[j][k];
- l[i][j] = (a[i][j] - sum)/l[j][j];
+ int i,j,k;
+ float sum;
+
+ for (i=0; i<n; i++) {
+ for (j=0; j<i; j++) {
+ sum = 0;
+ for (k=0; k<j; k++)
+ sum += l[i][k] * l[j][k];
+ l[i][j] = (a[i][j] - sum)/l[j][j];
+ }
+
+ sum = 0;
+ for (k=0; k<i; k++)
+ sum += l[i][k] * l[i][k];
+ sum = a[i][i] - sum;
+ if (sum<TOL) return 1; /* not positive-definite */
+ l[i][i] = sqrtf(sum);
}
-
- sum = 0;
- for (k=0; k<i; k++)
- sum += l[i][k] * l[i][k];
- sum = a[i][i] - sum;
- if (sum<TOL) return 1; /* not positive-definite */
- l[i][i] = sqrtf(sum);
- }
- return 0;
+ return 0;
}