#ifndef math_methods_H__
#define math_methods_H__
#include "Matrix.h"
#include "Vector.h"
#include "utility.h"
#include <cstdlib>
#include <vector>
namespace meow {
/*!
* @brief Run the \b RANSAC method to approach the best solution.
*
* \b RANdom \b SAmple \b Consensus is an iterative method to estimate
* parameters of a mathematical model from a set of observed data which
* contains \c outliers. \n
* Each iterator it will choose a subset of elements, the smallest set which can
* form a valid parameters, from the data set. And then calculate how many
* elements in the whole data set is inliers. After iterator much times,
* we just say the best solution is the parameters that has the much
* inliers elements in whole iterators.
*
* Assume:
* - We need at least \f$ N \f$ element to form a valid parameters.
* - The probability of choosing a right element from data set each time is
* \f$ p_0 \f$.
* - We want the probability of our solution actually being the best solution
* be \f$ P \f$.
* - We need to iterator \f$ M \f$ times.
* .
* Then we can estimate the number of iterations \f$ M \f$ :
* \f[
* \begin{aligned}
* & (1 - p_0^N)^M \leq(1 - P) \\
* \Rightarrow & M \log(1 - p_0^N) \leq \log(1 - P) \\
* \Rightarrow & M \geq \frac{\log(1 - p)}{\log(1 - p_0^N)},~~
* \because (1-p_0^N<1 \Rightarrow \log(1-p_0^N)<0)
* \end{aligned}
* \f]
*
* So in this function we choose
* \f$ M = \lceil \frac{\log(1 - P)}{\log(1 - p_0^N)} \rceil \f$
*
* @param [in] data The whole data sett
* @param [in] w Weight function to give a floating number for a given
* parameters which means how best this solution is. Negitave
* number means invalid parameters.
* @param [in] N \f$ N \f$, defined above
* @param [in] p0 \f$ p_0 \f$, defined above
* @param [in] P \f$ P \f$, defined above
* @return solution.
*
* @author cat_leopard
*/
template<class Data, class WeightingClass>
inline std::vector<Data> ransac(std::vector<Data> const& data,
WeightingClass const& w,
size_t N,
double p0, double P) {
if (data.size() < N) {
return std::vector<Data>();
}
double ww = -1.0;
std::vector<Data> ret;
for (double count = ceil(log(1.0 - P) / log(1.0 - pow(p0, N)));
count > 0.0; count -= 1.0) {
std::vector<Data> sample;
std::vector<int> index(N);
for (size_t i = 0; i < N; i++) {
for (bool ok = false; !ok; ) {
index[i] = rand() % data.size();
ok = true;
for (size_t j = 0; ok && j < i; j++)
if (index[i] == index[j])
ok = false;
}
sample.push_back(data[index[i]]);
}
double w_now = w(sample, data);
if (w_now < 0) {
count += 0.5;
continue;
}
if (ww < w_now) {
ret = sample;
ww = w_now;
}
}
return ret;
}
/*
* @brief Run the \b Levenberg-Marquardt method to solve a non-linear
* least squares problem.
*
* Assume:
* - The function we want to optimize is
* \f$ F: \mathbb{R} ^N \mapsto \mathbb{R}^M \f$
* - We want to find the best solution \f$ v \f$ such that
* \f$ F(v)^T F(v) = 0\f$. But there is a gived threshold
* \f$ \epsilon \f$, we can just find a \f$ v \f$ such that
* \f$ F(v)^T F(v) < \epsilon \f$, which is mush easier.
* - User gived a initiial vector \f$ v_0 \f$
* .
* Then we just iteratilly find \f$ v_1, v_2, v_3, v_4... \f$ until a
* vector \f$ v_k \f$ satisified that \f$ F(v_k)^TF(v_k)<\epsilon \f$ .
* And each iterator we have:
* \f[
* v_{i+1} = v_i + (J(v_i)^TJ(v_i)+\lambda I_{N\times N})^{-1} J(v_i)^T F(v_i)
* \f]
* Where \f$ J(v) \f$ is a jacobian matrix defined below:
* \f[
* J(v) = \frac{d}{dv}F(v) =
* \left[ \begin{array}{ccccc}
* \frac{\partial F_1(v)}{\partial v_1} &
* \frac{\partial F_1(v)}{\partial v_2} &
* \frac{\partial F_1(v)}{\partial v_3} &
* ... &
* \frac{\partial F_1(v)}{\partial v_N} \\
* \frac{\partial F_2(v)}{\partial v_1} &
* \frac{\partial F_2(v)}{\partial v_2} &
* \frac{\partial F_2(v)}{\partial v_3} &
* ... &
* \frac{\partial F_2(v)}{\partial v_N} \\
* \frac{\partial F_3(v)}{\partial v_1} &
* \frac{\partial F_3(v)}{\partial v_2} &
* \frac{\partial F_3(v)}{\partial v_3} &
* ... &
* \frac{\partial F_3(v)}{\partial v_N} \\
* . & . & . & & . \\
* . & . & . & & . \\
* . & . & . & & . \\
* \frac{\partial F_M(v)}{\partial v_1} &
* \frac{\partial F_M(v)}{\partial v_2} &
* \frac{\partial F_M(v)}{\partial v_3} &
* ... &
* \frac{\partial F_M(v)}{\partial v_N} \\
* \end{array} \right]
* \f]
* And \f$ \lambda \f$ is a magic number....
* @param [in] func \f$ F \f$, a function(class with \c operator() )
* which input a vector and the output the squares errors.
* @param [in] jaco \f$ J \f$, a function which input a vector
* and then output \b func derivate by the vector
* @param [in] iden \f$ \lambda I_{N \times N} \f$, defined above
* @param [in] init \f$ v_0 \f$Initial vector
* @param [in] stop A function return a boolean which means the error is
* acceptable or not, so \f[
* S_{top}(v) = \begin{cases}
* true & if~F(v)<\epsilon \\
* false & else
* \end{cases}
* \f]
* @param [in] counter To prevent infinit loop.
* @return a vector which means the best solution this function found.
*
* @author cat_leopard
*/
template<class Scalar, class Function>
inline Vector<Scalar> levenbergMarquardt(Function const& f,
Vector<Scalar> const& init,
int counter = -1) {
Vector<Scalar> ans(init), residure_v;
for ( ; counter != 0 && !f.accept(residure_v = f.residure(ans)); --counter) {
Matrix<Scalar> m_j (f.jacobian(ans));
Matrix<Scalar> m_jt(m_j.transpose());
Matrix<Scalar> m(m_j * m_jt), M;
for (int i = 1; M.valid() == false; i++) {
M = (m + f.diagonal(ans, i)).inverse();
}
ans = ans - M * m_jt * residure_v;
}
return ans;
}
// residure
// jacobian
// identity
template<class Scalar, class Function>
inline Vector<Scalar> levenbergMarquardtTraining(Function & f,
Vector<Scalar> const& init,
Scalar const& init_mu,
Scalar const& mu_pow,
Scalar const& er_max,
int retry_number,
int counter) {
if (retry_number == 0) retry_number = 1;
Vector<Scalar> ans_now(init), rv_now(f.residure(ans_now));
Vector<Scalar> ans_nxt , rv_nxt;
Scalar er_now(rv_now.length2());
Scalar er_nxt;
Vector<Scalar> ans_best(ans_now);
Scalar er_best ( er_now);
Matrix<Scalar> m_ja, m_jt, m, iden(f.identity());
Scalar mu(init_mu);
for ( ; counter != 0 && er_now > er_max; --counter) {
m_ja = f.jacobian();
m_jt = m_ja.transpose();
m = m_jt * m_ja;
bool good = false;
for (int i = 0; i != retry_number; ++i, mu = mu * mu_pow) {
ans_nxt = ans_now + (m + iden * mu).inverse() * m_jt * rv_now.matrix();
rv_nxt = f.residure(ans_nxt);
er_nxt = rv_nxt.length2();
if (er_nxt <= er_now) {
good = true;
break;
}
}
if (good) {
mu = mu / mu_pow;
}
mu = inRange(0.0000001, 100.0, mu);
ans_now = ans_nxt;
rv_now = rv_nxt;
er_now = er_nxt;
if (er_now < er_best) {
ans_best = ans_now;
er_best = er_now;
}
}
return ans_best;
}
} // meow
#endif // math_methods_H__