23 #if (AE_COMPILER==AE_MSVC) 24 #pragma warning(disable:4100) 25 #pragma warning(disable:4127) 26 #pragma warning(disable:4702) 27 #pragma warning(disable:4996) 53 _mincgstate_owner::_mincgstate_owner()
57 throw ap_error(
"ALGLIB: malloc error");
59 throw ap_error(
"ALGLIB: malloc error");
66 throw ap_error(
"ALGLIB: malloc error");
68 throw ap_error(
"ALGLIB: malloc error");
77 throw ap_error(
"ALGLIB: malloc error");
81 _mincgstate_owner::~_mincgstate_owner()
96 mincgstate::mincgstate() :
_mincgstate_owner() ,needf(p_struct->needf),needfg(p_struct->needfg),xupdated(p_struct->xupdated),
f(p_struct->
f),
g(&p_struct->
g),
x(&p_struct->
x)
124 throw ap_error(
"ALGLIB: malloc error");
126 throw ap_error(
"ALGLIB: malloc error");
133 throw ap_error(
"ALGLIB: malloc error");
135 throw ap_error(
"ALGLIB: malloc error");
144 throw ap_error(
"ALGLIB: malloc error");
771 return *(
reinterpret_cast<bool*
>(&result));
787 throw ap_error(
"ALGLIB: error in 'mincgoptimize()' (func is NULL)");
795 func(state.
x, state.
f, ptr);
801 rep(state.
x, state.
f, ptr);
804 throw ap_error(
"ALGLIB: error in 'mincgoptimize' (some derivatives were not provided?)");
822 throw ap_error(
"ALGLIB: error in 'mincgoptimize()' (grad is NULL)");
830 grad(state.
x, state.
f, state.
g, ptr);
836 rep(state.
x, state.
f, ptr);
839 throw ap_error(
"ALGLIB: error in 'mincgoptimize' (some derivatives were not provided?)");
884 alglib_impl::mincgresults(const_cast<alglib_impl::mincgstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::mincgreport*>(rep.
c_ptr()), &_alglib_env_state);
1024 throw ap_error(
"ALGLIB: malloc error");
1026 throw ap_error(
"ALGLIB: malloc error");
1033 throw ap_error(
"ALGLIB: malloc error");
1035 throw ap_error(
"ALGLIB: malloc error");
1044 throw ap_error(
"ALGLIB: malloc error");
1119 throw ap_error(
"ALGLIB: malloc error");
1121 throw ap_error(
"ALGLIB: malloc error");
1128 throw ap_error(
"ALGLIB: malloc error");
1130 throw ap_error(
"ALGLIB: malloc error");
1139 throw ap_error(
"ALGLIB: malloc error");
1158 minbleicreport::minbleicreport() :
_minbleicreport_owner() ,iterationscount(
p_struct->iterationscount),nfev(
p_struct->nfev),varidx(
p_struct->varidx),terminationtype(
p_struct->terminationtype),debugeqerr(
p_struct->debugeqerr),debugfs(
p_struct->debugfs),debugff(
p_struct->debugff),debugdx(
p_struct->debugdx),debugfeasqpits(
p_struct->debugfeasqpits),debugfeasgpaits(
p_struct->debugfeasgpaits),inneriterationscount(
p_struct->inneriterationscount),outeriterationscount(
p_struct->outeriterationscount)
1162 minbleicreport::minbleicreport(
const minbleicreport &rhs):
_minbleicreport_owner(rhs) ,
iterationscount(
p_struct->
iterationscount),
nfev(
p_struct->
nfev),
varidx(
p_struct->
varidx),
terminationtype(
p_struct->
terminationtype),
debugeqerr(
p_struct->
debugeqerr),
debugfs(
p_struct->
debugfs),
debugff(
p_struct->
debugff),
debugdx(
p_struct->
debugdx),
debugfeasqpits(
p_struct->
debugfeasqpits),
debugfeasgpaits(
p_struct->
debugfeasgpaits),
inneriterationscount(
p_struct->
inneriterationscount),
outeriterationscount(
p_struct->
outeriterationscount)
1485 alglib_impl::minbleicsetbc(const_cast<alglib_impl::minbleicstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndl.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndu.
c_ptr()), &_alglib_env_state);
1537 alglib_impl::minbleicsetlc(const_cast<alglib_impl::minbleicstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_matrix*>(c.
c_ptr()), const_cast<alglib_impl::ae_vector*>(ct.
c_ptr()), k, &_alglib_env_state);
1588 throw ap_error(
"Error while calling 'minbleicsetlc': looks like one of arguments has wrong size");
1593 alglib_impl::minbleicsetlc(const_cast<alglib_impl::minbleicstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_matrix*>(c.
c_ptr()), const_cast<alglib_impl::ae_vector*>(ct.
c_ptr()), k, &_alglib_env_state);
1881 return *(
reinterpret_cast<bool*
>(&result));
1891 void (*func)(
const real_1d_array &x,
double &func,
void *ptr),
1892 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
1897 throw ap_error(
"ALGLIB: error in 'minbleicoptimize()' (func is NULL)");
1905 func(state.
x, state.
f, ptr);
1911 rep(state.
x, state.
f, ptr);
1914 throw ap_error(
"ALGLIB: error in 'minbleicoptimize' (some derivatives were not provided?)");
1927 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
1932 throw ap_error(
"ALGLIB: error in 'minbleicoptimize()' (grad is NULL)");
1940 grad(state.
x, state.
f, state.
g, ptr);
1946 rep(state.
x, state.
f, ptr);
1949 throw ap_error(
"ALGLIB: error in 'minbleicoptimize' (some derivatives were not provided?)");
2131 throw ap_error(
"ALGLIB: malloc error");
2133 throw ap_error(
"ALGLIB: malloc error");
2140 throw ap_error(
"ALGLIB: malloc error");
2142 throw ap_error(
"ALGLIB: malloc error");
2151 throw ap_error(
"ALGLIB: malloc error");
2198 throw ap_error(
"ALGLIB: malloc error");
2200 throw ap_error(
"ALGLIB: malloc error");
2207 throw ap_error(
"ALGLIB: malloc error");
2209 throw ap_error(
"ALGLIB: malloc error");
2218 throw ap_error(
"ALGLIB: malloc error");
2845 return *(
reinterpret_cast<bool*
>(&result));
2855 void (*func)(
const real_1d_array &x,
double &func,
void *ptr),
2856 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
2861 throw ap_error(
"ALGLIB: error in 'minlbfgsoptimize()' (func is NULL)");
2869 func(state.
x, state.
f, ptr);
2875 rep(state.
x, state.
f, ptr);
2878 throw ap_error(
"ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)");
2891 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
2896 throw ap_error(
"ALGLIB: error in 'minlbfgsoptimize()' (grad is NULL)");
2904 grad(state.
x, state.
f, state.
g, ptr);
2910 rep(state.
x, state.
f, ptr);
2913 throw ap_error(
"ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)");
3099 throw ap_error(
"ALGLIB: malloc error");
3101 throw ap_error(
"ALGLIB: malloc error");
3108 throw ap_error(
"ALGLIB: malloc error");
3110 throw ap_error(
"ALGLIB: malloc error");
3119 throw ap_error(
"ALGLIB: malloc error");
3191 throw ap_error(
"ALGLIB: malloc error");
3193 throw ap_error(
"ALGLIB: malloc error");
3200 throw ap_error(
"ALGLIB: malloc error");
3202 throw ap_error(
"ALGLIB: malloc error");
3211 throw ap_error(
"ALGLIB: malloc error");
3399 throw ap_error(
"'a' parameter is not symmetric matrix");
3742 alglib_impl::minqpsetbc(const_cast<alglib_impl::minqpstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndl.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndu.
c_ptr()), &_alglib_env_state);
3786 alglib_impl::minqpsetlc(const_cast<alglib_impl::minqpstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_matrix*>(c.
c_ptr()), const_cast<alglib_impl::ae_vector*>(ct.
c_ptr()), k, &_alglib_env_state);
3829 throw ap_error(
"Error while calling 'minqpsetlc': looks like one of arguments has wrong size");
3834 alglib_impl::minqpsetlc(const_cast<alglib_impl::minqpstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_matrix*>(c.
c_ptr()), const_cast<alglib_impl::ae_vector*>(ct.
c_ptr()), k, &_alglib_env_state);
3922 alglib_impl::minqpresults(const_cast<alglib_impl::minqpstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::minqpreport*>(rep.
c_ptr()), &_alglib_env_state);
3970 throw ap_error(
"ALGLIB: malloc error");
3972 throw ap_error(
"ALGLIB: malloc error");
3979 throw ap_error(
"ALGLIB: malloc error");
3981 throw ap_error(
"ALGLIB: malloc error");
3990 throw ap_error(
"ALGLIB: malloc error");
4009 minlmstate::minlmstate() :
_minlmstate_owner() ,needf(
p_struct->needf),needfg(
p_struct->needfg),needfgh(
p_struct->needfgh),needfi(
p_struct->needfi),needfij(
p_struct->needfij),xupdated(
p_struct->xupdated),
f(
p_struct->
f),fi(&
p_struct->fi),
g(&
p_struct->
g),h(&
p_struct->h),
j(&
p_struct->
j),x(&
p_struct->x)
4013 minlmstate::minlmstate(
const minlmstate &rhs):
_minlmstate_owner(rhs) ,
needf(
p_struct->
needf),
needfg(
p_struct->
needfg),
needfgh(
p_struct->
needfgh),
needfi(
p_struct->
needfi),
needfij(
p_struct->
needfij),
xupdated(
p_struct->
xupdated),
f(
p_struct->
f),
fi(&
p_struct->
fi),
g(&
p_struct->
g),
h(&
p_struct->
h),
j(&
p_struct->
j),x(&
p_struct->x)
4056 throw ap_error(
"ALGLIB: malloc error");
4058 throw ap_error(
"ALGLIB: malloc error");
4065 throw ap_error(
"ALGLIB: malloc error");
4067 throw ap_error(
"ALGLIB: malloc error");
4076 throw ap_error(
"ALGLIB: malloc error");
4787 alglib_impl::minlmsetbc(const_cast<alglib_impl::minlmstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndl.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndu.
c_ptr()), &_alglib_env_state);
4870 return *(
reinterpret_cast<bool*
>(&result));
4881 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
4886 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (fvec is NULL)");
4894 fvec(state.
x, state.
fi, ptr);
4900 rep(state.
x, state.
f, ptr);
4903 throw ap_error(
"ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
4917 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
4922 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (fvec is NULL)");
4924 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (jac is NULL)");
4932 fvec(state.
x, state.
fi, ptr);
4937 jac(state.
x, state.
fi, state.
j, ptr);
4943 rep(state.
x, state.
f, ptr);
4946 throw ap_error(
"ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
4958 void (*func)(
const real_1d_array &x,
double &func,
void *ptr),
4961 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
4966 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (func is NULL)");
4968 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (grad is NULL)");
4970 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (hess is NULL)");
4978 func(state.
x, state.
f, ptr);
4983 grad(state.
x, state.
f, state.
g, ptr);
4988 hess(state.
x, state.
f, state.
g, state.
h, ptr);
4994 rep(state.
x, state.
f, ptr);
4997 throw ap_error(
"ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
5009 void (*func)(
const real_1d_array &x,
double &func,
void *ptr),
5011 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
5016 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (func is NULL)");
5018 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (jac is NULL)");
5026 func(state.
x, state.
f, ptr);
5031 jac(state.
x, state.
fi, state.
j, ptr);
5037 rep(state.
x, state.
f, ptr);
5040 throw ap_error(
"ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
5052 void (*func)(
const real_1d_array &x,
double &func,
void *ptr),
5055 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
5060 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (func is NULL)");
5062 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (grad is NULL)");
5064 throw ap_error(
"ALGLIB: error in 'minlmoptimize()' (jac is NULL)");
5072 func(state.
x, state.
f, ptr);
5077 grad(state.
x, state.
f, state.
g, ptr);
5082 jac(state.
x, state.
fi, state.
j, ptr);
5088 rep(state.
x, state.
f, ptr);
5091 throw ap_error(
"ALGLIB: error in 'minlmoptimize' (some derivatives were not provided?)");
5123 alglib_impl::minlmresults(const_cast<alglib_impl::minlmstate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::minlmreport*>(rep.
c_ptr()), &_alglib_env_state);
5418 throw ap_error(
"ALGLIB: malloc error");
5420 throw ap_error(
"ALGLIB: malloc error");
5427 throw ap_error(
"ALGLIB: malloc error");
5429 throw ap_error(
"ALGLIB: malloc error");
5438 throw ap_error(
"ALGLIB: malloc error");
5485 throw ap_error(
"ALGLIB: malloc error");
5487 throw ap_error(
"ALGLIB: malloc error");
5494 throw ap_error(
"ALGLIB: malloc error");
5496 throw ap_error(
"ALGLIB: malloc error");
5505 throw ap_error(
"ALGLIB: malloc error");
5647 alglib_impl::minasacreate(n, const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndl.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndu.
c_ptr()), const_cast<alglib_impl::minasastate*>(state.
c_ptr()), &_alglib_env_state);
5669 throw ap_error(
"Error while calling 'minasacreate': looks like one of arguments has wrong size");
5674 alglib_impl::minasacreate(n, const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndl.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndu.
c_ptr()), const_cast<alglib_impl::minasastate*>(state.
c_ptr()), &_alglib_env_state);
5790 return *(
reinterpret_cast<bool*
>(&result));
5801 void (*rep)(
const real_1d_array &x,
double func,
void *ptr),
5806 throw ap_error(
"ALGLIB: error in 'minasaoptimize()' (grad is NULL)");
5814 grad(state.
x, state.
f, state.
g, ptr);
5820 rep(state.
x, state.
f, ptr);
5823 throw ap_error(
"ALGLIB: error in 'minasaoptimize' (some derivatives were not provided?)");
5848 alglib_impl::minasaresults(const_cast<alglib_impl::minasastate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::minasareport*>(rep.
c_ptr()), &_alglib_env_state);
5894 alglib_impl::minasarestartfrom(const_cast<alglib_impl::minasastate*>(state.
c_ptr()), const_cast<alglib_impl::ae_vector*>(x.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndl.
c_ptr()), const_cast<alglib_impl::ae_vector*>(bndu.
c_ptr()), &_alglib_env_state);
5914 static ae_int_t cqmodels_newtonrefinementits = 3;
5915 static ae_bool cqmodels_cqmrebuild(convexquadraticmodel* s,
5917 static void cqmodels_cqmsolveea(convexquadraticmodel* s,
5923 static ae_int_t snnls_iterativerefinementits = 3;
5931 static void sactivesets_constraineddescent(sactiveset* state,
5938 static void sactivesets_reactivateconstraints(sactiveset* state,
5944 static ae_int_t mincg_rscountdownlen = 10;
5945 static double mincg_gtol = 0.3;
5947 static void mincg_preconditionedmultiply(
mincgstate* state,
5952 static double mincg_preconditionedmultiply2(
mincgstate* state,
5958 static void mincg_mincginitinternal(
ae_int_t n,
5964 static double minbleic_gtol = 0.4;
5965 static double minbleic_maxnonmonotoniclen = 1.0E-5;
5966 static double minbleic_initialdecay = 0.5;
5967 static double minbleic_mindecay = 0.1;
5968 static double minbleic_decaycorrection = 0.8;
5969 static double minbleic_penaltyfactor = 100;
5970 static void minbleic_clearrequestfields(
minbleicstate* state,
5972 static void minbleic_minbleicinitinternal(
ae_int_t n,
5977 static void minbleic_updateestimateofgoodstep(
double* estimate,
5982 static double minlbfgs_gtol = 0.4;
5983 static void minlbfgs_clearrequestfields(
minlbfgsstate* state,
5987 static ae_int_t minqp_maxlagrangeits = 10;
5988 static ae_int_t minqp_maxbadnewtonits = 7;
5989 static double minqp_penaltyfactor = 100.0;
5994 static double minqp_minqpmodelvalue(convexquadraticmodel*
a,
6001 convexquadraticmodel* a,
6011 static double minlm_lambdaup = 2.0;
6012 static double minlm_lambdadown = 0.33;
6013 static double minlm_suspiciousnu = 16;
6014 static ae_int_t minlm_smallmodelage = 3;
6015 static ae_int_t minlm_additers = 5;
6016 static void minlm_lmprepare(
ae_int_t n,
6022 static ae_bool minlm_increaselambda(
double* lambdav,
6025 static void minlm_decreaselambda(
double* lambdav,
6028 static double minlm_boundedscaledantigradnorm(
minlmstate* state,
6036 static double mincomp_stpmin = 1.0E-300;
6037 static double mincomp_gtol = 0.3;
6038 static double mincomp_gpaftol = 0.0001;
6039 static double mincomp_gpadecay = 0.5;
6040 static double mincomp_asarho = 0.5;
6041 static double mincomp_asaboundedantigradnorm(
minasastate* state,
6046 static void mincomp_clearrequestfields(
minasastate* state,
6069 *threshold = 10*(
ae_fabs(f, _state)+1);
6099 for(i=0; i<=n-1; i++)
6150 for(i=0; i<=nmain-1; i++)
6165 for(i=0; i<=nslack-1; i++)
6223 for(i=0; i<=nmain-1; i++)
6235 for(i=0; i<=nslack-1; i++)
6318 double* valuetofreeze,
6326 *variabletofreeze = 0;
6331 *variabletofreeze = -1;
6333 *maxsteplen = initval;
6334 for(i=0; i<=nmain-1; i++)
6339 prevmax = *maxsteplen;
6343 *variabletofreeze =
i;
6350 prevmax = *maxsteplen;
6354 *variabletofreeze =
i;
6359 for(i=0; i<=nslack-1; i++)
6364 prevmax = *maxsteplen;
6368 *variabletofreeze = nmain+
i;
6373 if(
ae_fp_eq(*maxsteplen,initval) )
6436 double valuetofreeze,
6451 if( variabletofreeze>=0&&
ae_fp_eq(steptaken,maxsteplen) )
6453 x->
ptr.
p_double[variabletofreeze] = valuetofreeze;
6455 for(i=0; i<=nmain-1; i++)
6466 for(i=0; i<=nslack-1; i++)
6478 for(i=0; i<=nmain-1; i++)
6481 wasactivated = wasactivated||variabletofreeze==
i;
6487 for(i=0; i<=nslack-1; i++)
6490 wasactivated = wasactivated||variabletofreeze==nmain+
i;
6558 for(i=0; i<=nmain+nslack-1; i++)
6562 scalednorm =
ae_sqrt(scalednorm, _state);
6563 for(i=0; i<=nmain-1; i++)
6573 for(i=0; i<=nslack-1; i++)
6633 for(i=0; i<=nmain-1; i++)
6652 for(i=0; i<=nslack-1; i++)
6729 double armijobeststep;
6730 double armijobestfeas;
6743 ae_bool werechangesinconstraints;
6789 maxitswithintolerance = 3;
6818 for(i=0; i<=k-1; i++)
6821 for(j=0; j<=nmain+nslack-1; j++)
6846 for(i=0; i<=nmain+nslack-1; i++)
6850 for(j=0; j<=k-1; j++)
6867 itswithintolerance = 0;
6877 for(i=0; i<=k-1; i++)
6892 for(j=0; j<=nmain+nslack-1; j++)
6897 feaserr = feaserr+
ae_sqr(v, _state);
6900 feaserr =
ae_sqrt(feaserr, _state);
6955 armijobeststep = 0.0;
6956 armijobestfeas = 0.0;
6957 for(i=0; i<=nmain+nslack-1; i++)
6961 for(i=0; i<=k-1; i++)
6965 armijobestfeas = armijobestfeas+
ae_sqr(v, _state);
6968 armijobestfeas =
ae_sqrt(armijobestfeas, _state);
6969 for(i=0; i<=nmain-1; i++)
6980 for(i=0; i<=nslack-1; i++)
6988 for(i=0; i<=nmain+nslack-1; i++)
7008 calculatestepbound(x, &newtonstep, 1.0, bndl, havebndl, bndu, havebndu, nmain, nslack, &vartofreeze, &valtofreeze, &maxsteplen, _state);
7009 if( vartofreeze>=0&&
ae_fp_eq(maxsteplen,0) )
7017 if( vartofreeze>=0 )
7019 armijostep =
ae_minreal(1.0, maxsteplen, _state);
7031 for(i=0; i<=k-1; i++)
7035 feaserr = feaserr+
ae_sqr(v, _state);
7037 feaserr =
ae_sqrt(feaserr, _state);
7042 armijobestfeas = feaserr;
7043 armijobeststep = armijostep;
7044 armijostep = 2.0*armijostep;
7053 for(i=0; i<=nmain-1; i++)
7066 nactive = nactive+1;
7069 for(i=0; i<=nslack-1; i++)
7078 nactive = nactive+1;
7081 nfree = nmain+nslack-nactive;
7091 tagsortbuf(&activeconstraints, nmain+nslack, &p1, &p2, &buf, _state);
7092 for(i=0; i<=k-1; i++)
7094 for(j=0; j<=nmain+nslack-1; j++)
7099 for(j=0; j<=nmain+nslack-1; j++)
7103 for(j=0; j<=nmain+nslack-1; j++)
7109 for(i=0; i<=k-1; i++)
7125 for(i=0; i<=nfree-1; i++)
7129 for(i=0; i<=k-1; i++)
7134 for(i=0; i<=k-1; i++)
7153 if( !
rmatrixsvd(&a, k, nfree, 0, 1, 2, &w, &u, &vt, _state) )
7159 for(i=0; i<=nsvd-1; i++)
7164 for(i=0; i<=nsvd-1; i++)
7180 for(i=0; i<=nmain+nslack-1; i++)
7184 for(i=0; i<=nsvd-1; i++)
7189 for(j=nmain+nslack-1; j>=0; j--)
7214 calculatestepbound(x, &newtonstep, 1.0, bndl, havebndl, bndu, havebndu, nmain, nslack, &vartofreeze, &valtofreeze, &maxsteplen, _state);
7215 if( vartofreeze>=0&&
ae_fp_eq(maxsteplen,0) )
7224 if( vartofreeze>=0 )
7234 postprocessboundedstep(&xn, x, bndl, havebndl, bndu, havebndu, nmain, nslack, vartofreeze, valtofreeze, v, maxsteplen, _state);
7246 armijobeststep = 0.0;
7247 armijobestfeas = 0.0;
7248 for(i=0; i<=k-1; i++)
7252 armijobestfeas = armijobestfeas+
ae_sqr(v, _state);
7254 armijobestfeas =
ae_sqrt(armijobestfeas, _state);
7255 armijostep = 1-maxsteplen;
7256 for(j=0; j<=maxarmijoruns-1; j++)
7262 for(i=0; i<=k-1; i++)
7266 feaserr = feaserr+
ae_sqr(v, _state);
7268 feaserr =
ae_sqrt(feaserr, _state);
7271 armijobestfeas = feaserr;
7272 armijobeststep = armijostep;
7274 armijostep = 0.5*armijostep;
7299 for(i=0; i<=k-1; i++)
7306 feasold =
ae_sqrt(feasold, _state);
7307 feasnew =
ae_sqrt(feasnew, _state);
7340 werechangesinconstraints =
ae_false;
7341 for(gparuns=1; gparuns<=
k; gparuns++)
7348 for(i=0; i<=nmain+nslack-1; i++)
7352 for(i=0; i<=k-1; i++)
7360 feaserr = feaserr+
ae_sqr(v, _state);
7369 filterdirection(&pg, x, bndl, havebndl, bndu, havebndu, &s, nmain, nslack, 1.0E-9, _state);
7370 for(i=0; i<=nmain+nslack-1; i++)
7387 feaserr =
ae_sqrt(feaserr, _state);
7388 pgnorm =
ae_sqrt(pgnorm, _state);
7401 for(i=0; i<=k-1; i++)
7404 vd = vd+
ae_sqr(v, _state);
7412 calculatestepbound(x, &pg, -1.0, bndl, havebndl, bndu, havebndu, nmain, nslack, &vartofreeze, &valtofreeze, &maxsteplen, _state);
7413 if( vartofreeze>=0&&
ae_fp_eq(maxsteplen,0) )
7419 if( vartofreeze>=0 )
7429 postprocessboundedstep(&xn, x, bndl, havebndl, bndu, havebndu, nmain, nslack, vartofreeze, valtofreeze, v, maxsteplen, _state);
7435 werechangesinconstraints = werechangesinconstraints||
numberofchangedconstraints(&xn, x, bndl, havebndl, bndu, havebndu, nmain, nslack, _state)>0;
7437 *gpaits = *gpaits+1;
7438 if( !werechangesinconstraints )
7454 for(i=0; i<=k-1; i++)
7458 feaserr = feaserr+
ae_sqr(v, _state);
7460 feaserr =
ae_sqrt(feaserr, _state);
7463 itswithintolerance = itswithintolerance+1;
7467 itswithintolerance = 0;
7469 if( !werechangesinconstraints||itswithintolerance>=maxitswithintolerance )
7475 itscount = itscount+1;
7524 h = 0.5*f0+0.125*df0+0.5*f1-0.125*df1;
7525 dh = -1.5*f0-0.25*df0+1.5*f1-0.25*df1;
7582 for(i=0; i<=n-1; i++)
7622 for(i=0; i<=s->
n-1; i++)
7624 for(j=i; j<=s->
n-1; j++)
7674 for(i=0; i<=n-1; i++)
7676 for(j=0; j<=n-1; j++)
7683 for(i=0; i<=s->
n-1; i++)
7718 for(i=0; i<=s->
n-1; i++)
7764 for(i=0; i<=s->
n-1; i++)
7789 ae_assert(k>=0,
"CQMSetQ: K<0", _state);
7815 for(i=0; i<=s->
k-1; i++)
7817 for(j=0; j<=s->
n-1; j++)
7847 ae_assert(x->
cnt>=s->
n,
"CQMSetActiveSet: Length(X)<N", _state);
7848 ae_assert(activeset->
cnt>=s->
n,
"CQMSetActiveSet: Length(ActiveSet)<N", _state);
7849 for(i=0; i<=s->
n-1; i++)
7890 for(i=0; i<=n-1; i++)
7892 for(j=0; j<=n-1; j++)
7900 for(i=0; i<=n-1; i++)
7911 for(i=0; i<=s->
k-1; i++)
7921 for(i=0; i<=s->
n-1; i++)
7970 for(i=0; i<=n-1; i++)
7972 for(j=0; j<=n-1; j++)
7982 for(i=0; i<=n-1; i++)
8002 for(i=0; i<=s->
k-1; i++)
8006 for(j=0; j<=n-1; j++)
8020 for(i=0; i<=s->
n-1; i++)
8029 *noise = n*(*noise);
8059 for(i=0; i<=n-1; i++)
8069 for(i=0; i<=n-1; i++)
8072 for(j=0; j<=n-1; j++)
8081 for(i=0; i<=n-1; i++)
8092 for(i=0; i<=s->
k-1; i++)
8103 for(i=0; i<=n-1; i++)
8135 for(i=0; i<=n-1; i++)
8137 for(j=0; j<=n-1; j++)
8145 for(i=0; i<=n-1; i++)
8179 for(i=0; i<=n-1; i++)
8185 for(i=0; i<=n-1; i++)
8193 for(i=0; i<=n-1; i++)
8232 if( !cqmodels_cqmrebuild(s, _state) )
8248 for(i=0; i<=n-1; i++)
8272 for(itidx=0; itidx<=cqmodels_newtonrefinementits-1; itidx++)
8281 for(i=0; i<=n-1; i++)
8296 cqmodels_cqmsolveea(s, &s->
txc, &s->
tmp0, _state);
8307 cqmodels_cqmsolveea(s, &s->
tmp1, &s->
tmp0, _state);
8308 for(i=0; i<=k-1; i++)
8314 for(i=0; i<=nfree-1; i++)
8318 for(i=0; i<=k-1; i++)
8323 cqmodels_cqmsolveea(s, &s->
tmp1, &s->
tmp0, _state);
8332 for(i=0; i<=n-1; i++)
8385 for(i=0; i<=n-1; i++)
8440 if( !cqmodels_cqmrebuild(s, _state) )
8442 result = _state->
v_nan;
8452 for(i=0; i<=n-1; i++)
8456 ae_assert(j<nfree,
"CQMDebugConstrainedEvalT: internal error", _state);
8472 for(i=0; i<=nfree-1; i++)
8474 for(j=0; j<=nfree-1; j++)
8486 for(i=0; i<=nfree-1; i++)
8491 for(i=0; i<=nfree-1; i++)
8495 result = result+s->
tq0;
8502 for(i=0; i<=s->
k-1; i++)
8505 for(j=0; j<=nfree-1; j++)
8509 result = result+0.5*
ae_sqr(v, _state);
8511 for(i=0; i<=nfree-1; i++)
8515 result = result+s->
tk0;
8521 for(i=0; i<=n-1; i++)
8564 if( !cqmodels_cqmrebuild(s, _state) )
8566 result = _state->
v_nan;
8576 for(i=0; i<=n-1; i++)
8580 ae_assert(j<nfree,
"CQMDebugConstrainedEvalE: internal error", _state);
8596 for(i=0; i<=nfree-1; i++)
8599 for(j=i; j<=nfree-1; j++)
8603 result = result+0.5*
ae_sqr(v, _state);
8612 for(i=0; i<=nfree-1; i++)
8621 for(i=0; i<=s->
k-1; i++)
8624 for(j=0; j<=nfree-1; j++)
8628 result = result+0.5*
ae_sqr(v, _state);
8634 for(i=0; i<=nfree-1; i++)
8642 result = result+s->
ec;
8690 for(i=0; i<=n-1; i++)
8698 for(i=0; i<=n-1; i++)
8758 for(i=0; i<=n-1; i++)
8762 for(j=0; j<=n-1; j++)
8830 for(i=0; i<=n-1; i++)
8836 rmatrixmv(s->
nfree, n-s->
nfree, &s->
tq2dense, 0, s->
nfree, 0, &s->
txc, s->
nfree, &s->
tq1, 0, _state);
8843 for(i=s->
nfree; i<=n-1; i++)
8845 for(j=s->
nfree; j<=n-1; j++)
8876 for(i=0; i<=n-1; i++)
8888 for(i=0; i<=n-1; i++)
8937 for(j=0; j<=n-1; j++)
8939 for(i=0; i<=k-1; i++)
8957 for(i=0; i<=k-1; i++)
8961 for(j=0; j<=n-1; j++)
8965 for(i=0; i<=k-1; i++)
8974 for(i=0; i<=k-1; i++)
8981 for(i=0; i<=k-1; i++)
8990 for(i=0; i<=k-1; i++)
9001 for(i=0; i<=k-1; i++)
9014 for(i=0; i<=k-1; i++)
9029 for(i=0; i<=n-1; i++)
9056 for(i=0; i<=nfree-1; i++)
9058 for(j=i; j<=nfree-1; j++)
9076 for(i=0; i<=nfree-1; i++)
9093 for(i=0; i<=k-1; i++)
9095 for(j=0; j<=nfree-1; j++)
9124 rmatrixrighttrsm(k, nfree, &s->
ecadense, 0, 0,
ae_true,
ae_false, 0, &s->
tmp2, 0, 0, _state);
9128 for(i=0; i<=k-1; i++)
9130 for(j=0; j<=nfree-1; j++)
9136 for(i=0; i<=k-1; i++)
9138 for(j=0; j<=k-1; j++)
9144 rmatrixsyrk(k, nfree, 1.0, &s->
tmp2, 0, 0, 0, 1.0, &s->
eccm, 0, 0,
ae_true, _state);
9158 for(i=0; i<=nfree-1; i++)
9163 for(i=nfree; i<=n-1; i++)
9220 for(i=0; i<=s->
nfree-1; i++)
9497 ae_assert(nd>=0,
"SNNLSSetProblem: ND<0", _state);
9498 ae_assert(ns>=0,
"SNNLSSetProblem: NS<0", _state);
9499 ae_assert(nr>0,
"SNNLSSetProblem: NR<=0", _state);
9500 ae_assert(ns<=nr, "SNNLSSetProblem: NS>NR
", _state); 9501 ae_assert(a->rows>=nr||nd==0, "SNNLSSetProblem: rows(A)<NR
", _state); 9502 ae_assert(a->cols>=nd, "SNNLSSetProblem: cols(A)<ND
", _state); 9503 ae_assert(b->cnt>=nr, "SNNLSSetProblem:
length(B)<NR
", _state); 9504 ae_assert(apservisfinitematrix(a, nr, nd, _state), "SNNLSSetProblem: A contains
INF/NAN
", _state); 9505 ae_assert(isfinitevector(b, nr, _state), "SNNLSSetProblem: B contains
INF/NAN
", _state); 9515 rmatrixsetlengthatleast(&s->densea, nr, nd, _state); 9516 for(i=0; i<=nr-1; i++) 9518 ae_v_move(&s->densea.ptr.pp_double[i][0], 1, &a->ptr.pp_double[i][0], 1, ae_v_len(0,nd-1)); 9521 rvectorsetlengthatleast(&s->b, nr, _state); 9522 ae_v_move(&s->b.ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,nr-1)); 9523 bvectorsetlengthatleast(&s->nnc, ns+nd, _state); 9524 for(i=0; i<=ns+nd-1; i++) 9526 s->nnc.ptr.p_bool[i] = ae_true; 9531 /************************************************************************* 9532 This subroutine drops non-negativity constraint from the problem set by 9533 SNNLSSetProblem() call. This function must be called AFTER problem is set, 9534 because each SetProblem() call resets constraints to their default state 9535 (all constraints are present). 9538 S - SNNLS solver, must be initialized with SNNLSInit() call, 9539 problem must be set with SNNLSSetProblem() call. 9540 Idx - constraint index, 0<=IDX<NS+ND 9543 Copyright 10.10.2012 by Bochkanov Sergey 9544 *************************************************************************/ 9545 void snnlsdropnnc(snnlssolver* s, ae_int_t idx, ae_state *_state) 9549 ae_assert(idx>=0, "SNNLSDropNNC: Idx<0
", _state); 9550 ae_assert(idx<s->ns+s->nd, "SNNLSDropNNC: Idx>=NS+ND
", _state); 9551 s->nnc.ptr.p_bool[idx] = ae_false; 9555 /************************************************************************* 9556 This subroutine is used to solve NNLS problem. 9559 S - SNNLS solver, must be initialized with SNNLSInit() call and 9560 problem must be set up with SNNLSSetProblem() call. 9561 X - possibly preallocated buffer, automatically resized if needed 9564 X - array[NS+ND], solution 9567 1. You can have NS+ND=0, solver will correctly accept such combination 9568 and return empty array as problem solution. 9570 2. Internal field S.DebugFLOPS contains rough estimate of FLOPs used 9571 to solve problem. It can be used for debugging purposes. This field 9575 Copyright 10.10.2012 by Bochkanov Sergey 9576 *************************************************************************/ 9577 void snnlssolve(snnlssolver* s, 9578 /* Real */ ae_vector* x, 9589 ae_bool terminationneeded; 9595 double noisetolerance; 9600 ae_bool wasactivation; 9615 s->debugflops = 0.0; 9618 * Handle special cases: 9628 rvectorsetlengthatleast(x, ns, _state); 9629 for(i=0; i<=ns-1; i++) 9631 x->ptr.p_double[i] = s->b.ptr.p_double[i]; 9632 if( s->nnc.ptr.p_bool[i] ) 9634 x->ptr.p_double[i] = ae_maxreal(x->ptr.p_double[i], 0.0, _state); 9641 * Main cycle of BLEIC-SNNLS algorithm. 9642 * Below we assume that ND>0. 9644 rvectorsetlengthatleast(x, ns+nd, _state); 9645 rvectorsetlengthatleast(&s->xn, ns+nd, _state); 9646 rvectorsetlengthatleast(&s->g, ns+nd, _state); 9647 rvectorsetlengthatleast(&s->d, ns+nd, _state); 9648 rvectorsetlengthatleast(&s->r, nr, _state); 9649 rvectorsetlengthatleast(&s->diagaa, nd, _state); 9650 rvectorsetlengthatleast(&s->dx, ns+nd, _state); 9651 for(i=0; i<=ns+nd-1; i++) 9653 x->ptr.p_double[i] = 0.0; 9655 eps = 2*ae_machineepsilon; 9656 noisetolerance = 10.0; 9657 lambdav = 1.0E6*ae_machineepsilon; 9663 * Phase 1: perform steepest descent step. 9665 * TerminationNeeded control variable is set on exit from this loop: 9666 * * TerminationNeeded=False in case we have to proceed to Phase 2 (Newton step) 9667 * * TerminationNeeded=True in case we found solution (step along projected gradient is small enough) 9672 * NOTE 1. It is assumed that initial point X is feasible. This feasibility 9673 * is retained during all iterations. 9675 terminationneeded = ae_false; 9680 * Calculate gradient G and constrained descent direction D 9682 for(i=0; i<=nr-1; i++) 9684 v = ae_v_dotproduct(&s->densea.ptr.pp_double[i][0], 1, &x->ptr.p_double[ns], 1, ae_v_len(0,nd-1)); 9687 v = v+x->ptr.p_double[i]; 9689 s->r.ptr.p_double[i] = v-s->b.ptr.p_double[i]; 9691 for(i=0; i<=ns-1; i++) 9693 s->g.ptr.p_double[i] = s->r.ptr.p_double[i]; 9695 for(i=ns; i<=ns+nd-1; i++) 9697 s->g.ptr.p_double[i] = 0.0; 9699 for(i=0; i<=nr-1; i++) 9701 v = s->r.ptr.p_double[i]; 9702 ae_v_addd(&s->g.ptr.p_double[ns], 1, &s->densea.ptr.pp_double[i][0], 1, ae_v_len(ns,ns+nd-1), v); 9704 for(i=0; i<=ns+nd-1; i++) 9706 if( (s->nnc.ptr.p_bool[i]&&ae_fp_less_eq(x->ptr.p_double[i],0))&&ae_fp_greater(s->g.ptr.p_double[i],0) ) 9708 s->d.ptr.p_double[i] = 0.0; 9712 s->d.ptr.p_double[i] = -s->g.ptr.p_double[i]; 9715 s->debugflops = s->debugflops+2*2*nr*nd; 9718 * Build quadratic model of F along descent direction: 9719 * F(x+alpha*d) = D2*alpha^2 + D1*alpha + D0 9721 * Estimate numerical noise in the X (noise level is used 9722 * to classify step as singificant or insignificant). Noise 9723 * comes from two sources: 9724 * * noise when calculating rows of (I|A)*x 9725 * * noise when calculating norm of residual 9727 * In case function curvature is negative or product of descent 9728 * direction and gradient is non-negative, iterations are terminated. 9730 * NOTE: D0 is not actually used, but we prefer to maintain it. 9732 fprev = ae_v_dotproduct(&s->r.ptr.p_double[0], 1, &s->r.ptr.p_double[0], 1, ae_v_len(0,nr-1)); 9735 for(i=0; i<=nr-1; i++) 9739 * Estimate noise introduced by I-th row of (I|A)*x 9744 v = eps*x->ptr.p_double[i]; 9746 for(j=0; j<=nd-1; j++) 9748 v = ae_maxreal(v, eps*ae_fabs(s->densea.ptr.pp_double[i][j]*x->ptr.p_double[ns+j], _state), _state); 9750 v = 2*ae_fabs(s->r.ptr.p_double[i]*v, _state)+v*v; 9753 * Add to summary noise in the model 9755 noiselevel = noiselevel+v; 9757 noiselevel = ae_maxreal(noiselevel, eps*fprev, _state); 9759 for(i=0; i<=nr-1; i++) 9761 v = ae_v_dotproduct(&s->densea.ptr.pp_double[i][0], 1, &s->d.ptr.p_double[ns], 1, ae_v_len(0,nd-1)); 9764 v = v+s->d.ptr.p_double[i]; 9766 d2 = d2+0.5*ae_sqr(v, _state); 9768 v = ae_v_dotproduct(&s->d.ptr.p_double[0], 1, &s->g.ptr.p_double[0], 1, ae_v_len(0,ns+nd-1)); 9771 if( ae_fp_less_eq(d2,0)||ae_fp_greater_eq(d1,0) ) 9773 terminationneeded = ae_true; 9776 s->debugflops = s->debugflops+2*nr*nd; 9777 touchreal(&d0, _state); 9780 * Perform full (unconstrained) step with length StpLen in direction D. 9782 * We can terminate iterations in case one of two criteria is met: 9783 * 1. function change is dominated by noise (or function actually increased 9784 * instead of decreasing) 9785 * 2. relative change in X is small enough 9787 * First condition is not enough to guarantee algorithm termination because 9788 * sometimes our noise estimate is too optimistic (say, in situations when 9789 * function value at solition is zero). 9791 stplen = -d1/(2*d2); 9792 ae_v_move(&s->xn.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,ns+nd-1)); 9793 ae_v_addd(&s->xn.ptr.p_double[0], 1, &s->d.ptr.p_double[0], 1, ae_v_len(0,ns+nd-1), stplen); 9795 for(i=0; i<=nr-1; i++) 9797 v = ae_v_dotproduct(&s->densea.ptr.pp_double[i][0], 1, &s->xn.ptr.p_double[ns], 1, ae_v_len(0,nd-1)); 9800 v = v+s->xn.ptr.p_double[i]; 9802 fcand = fcand+0.5*ae_sqr(v-s->b.ptr.p_double[i], _state); 9804 s->debugflops = s->debugflops+2*nr*nd; 9805 if( ae_fp_greater_eq(fcand,fprev-noiselevel*noisetolerance) ) 9807 terminationneeded = ae_true; 9811 for(i=0; i<=ns+nd-1; i++) 9813 v0 = ae_fabs(x->ptr.p_double[i], _state); 9814 v1 = ae_fabs(s->xn.ptr.p_double[i], _state); 9815 if( ae_fp_neq(v0,0)||ae_fp_neq(v1,0) ) 9817 v = ae_maxreal(v, ae_fabs(x->ptr.p_double[i]-s->xn.ptr.p_double[i], _state)/ae_maxreal(v0, v1, _state), _state); 9820 if( ae_fp_less_eq(v,eps*noisetolerance) ) 9822 terminationneeded = ae_true; 9827 * Perform step one more time, now with non-negativity constraints. 9829 * NOTE: complicated code below which deals with VarIdx temporary makes 9830 * sure that in case unconstrained step leads us outside of feasible 9831 * area, we activate at least one constraint. 9833 wasactivation = snnls_boundedstepandactivation(x, &s->xn, &s->nnc, ns+nd, _state); 9835 for(i=0; i<=nr-1; i++) 9837 v = ae_v_dotproduct(&s->densea.ptr.pp_double[i][0], 1, &x->ptr.p_double[ns], 1, ae_v_len(0,nd-1)); 9840 v = v+x->ptr.p_double[i]; 9842 fcur = fcur+0.5*ae_sqr(v-s->b.ptr.p_double[i], _state); 9844 s->debugflops = s->debugflops+2*nr*nd; 9847 * Depending on results, decide what to do: 9848 * 1. In case step was performed without activation of constraints, 9849 * we proceed to Newton method 9850 * 2. In case there was activated at least one constraint, we repeat 9851 * steepest descent step. 9853 if( !wasactivation ) 9857 * Step without activation, proceed to Newton 9862 if( terminationneeded ) 9868 * Phase 2: Newton method. 9870 rvectorsetlengthatleast(&s->cx, ns+nd, _state); 9871 ivectorsetlengthatleast(&s->columnmap, ns+nd, _state); 9872 ivectorsetlengthatleast(&s->rowmap, nr, _state); 9873 rmatrixsetlengthatleast(&s->tmpca, nr, nd, _state); 9874 rmatrixsetlengthatleast(&s->tmpz, nd, nd, _state); 9875 rvectorsetlengthatleast(&s->cborg, nr, _state); 9876 rvectorsetlengthatleast(&s->cb, nr, _state); 9877 terminationneeded = ae_false; 9882 * Prepare equality constrained subproblem with NSC<=NS "sparse
" 9883 * variables and NDC<=ND "dense
" variables. 9885 * First, we reorder variables (columns) and move all unconstrained 9886 * variables "to the left
", ColumnMap stores this permutation. 9888 * Then, we reorder first NS rows of A and first NS elements of B in 9889 * such way that we still have identity matrix in first NSC columns 9890 * of problem. This permutation is stored in RowMap. 9894 for(i=0; i<=ns-1; i++) 9896 if( !(s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],0)) ) 9898 s->columnmap.ptr.p_int[nsc] = i; 9902 for(i=ns; i<=ns+nd-1; i++) 9904 if( !(s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],0)) ) 9906 s->columnmap.ptr.p_int[nsc+ndc] = i; 9910 for(i=0; i<=nsc-1; i++) 9912 s->rowmap.ptr.p_int[i] = s->columnmap.ptr.p_int[i]; 9915 for(i=0; i<=ns-1; i++) 9917 if( s->nnc.ptr.p_bool[i]&&ae_fp_eq(x->ptr.p_double[i],0) ) 9919 s->rowmap.ptr.p_int[j] = i; 9923 for(i=ns; i<=nr-1; i++) 9925 s->rowmap.ptr.p_int[i] = i; 9929 * Now, permutations are ready, and we can copy/reorder 9930 * A, B and X to CA, CB and CX. 9932 for(i=0; i<=nsc+ndc-1; i++) 9934 s->cx.ptr.p_double[i] = x->ptr.p_double[s->columnmap.ptr.p_int[i]]; 9936 for(i=0; i<=nr-1; i++) 9938 for(j=0; j<=ndc-1; j++) 9940 s->tmpca.ptr.pp_double[i][j] = s->densea.ptr.pp_double[s->rowmap.ptr.p_int[i]][s->columnmap.ptr.p_int[nsc+j]-ns]; 9942 s->cb.ptr.p_double[i] = s->b.ptr.p_double[s->rowmap.ptr.p_int[i]]; 9946 * Solve equality constrained subproblem. 9954 * Solve subproblem using Newton-type algorithm. We have a 9955 * NR*(NSC+NDC) linear least squares subproblem 9957 * | ( I AU ) ( XU ) ( BU ) |^2 9958 * min | ( ) * ( ) - ( ) | 9959 * | ( 0 AL ) ( XL ) ( BL ) | 9962 * * I is a NSC*NSC identity matrix 9963 * * AU is NSC*NDC dense matrix (first NSC rows of CA) 9964 * * AL is (NR-NSC)*NDC dense matrix (next NR-NSC rows of CA) 9965 * * BU and BL are correspondingly sized parts of CB 9967 * After conversion to normal equations and small regularization, 9970 * ( I AU ) ( XU ) ( BU ) 9972 * ( AU' Y ) ( XL ) ( AU'*BU+AL'*BL ) 9974 * where Y = AU'*AU + AL'*AL + lambda*diag(AU'*AU+AL'*AL). 9976 * With Schur Complement Method this system can be solved in 9977 * O(NR*NDC^2+NDC^3) operations. In order to solve it we multiply 9978 * first row by AU' and subtract it from the second one. As result, 9981 * Z*XL = AL'*BL, where Z=AL'*AL+lambda*diag(AU'*AU+AL'*AL) 9983 * We can easily solve it for XL, and we can get XU as XU = BU-AU*XL. 9985 * We will start solution from calculating Cholesky decomposition of Z. 9987 for(i=0; i<=nr-1; i++) 9989 s->cborg.ptr.p_double[i] = s->cb.ptr.p_double[i]; 9991 for(i=0; i<=ndc-1; i++) 9993 s->diagaa.ptr.p_double[i] = 0; 9995 for(i=0; i<=nr-1; i++) 9997 for(j=0; j<=ndc-1; j++) 9999 s->diagaa.ptr.p_double[j] = s->diagaa.ptr.p_double[j]+ae_sqr(s->tmpca.ptr.pp_double[i][j], _state); 10002 for(j=0; j<=ndc-1; j++) 10004 if( ae_fp_eq(s->diagaa.ptr.p_double[j],0) ) 10006 s->diagaa.ptr.p_double[j] = 1; 10013 * NOTE: we try to factorize Z. In case of failure we increase 10014 * regularization parameter and try again. 10016 s->debugflops = s->debugflops+2*(nr-nsc)*ae_sqr(ndc, _state)+ae_pow(ndc, 3, _state)/3; 10017 for(i=0; i<=ndc-1; i++) 10019 for(j=0; j<=ndc-1; j++) 10021 s->tmpz.ptr.pp_double[i][j] = 0.0; 10024 rmatrixsyrk(ndc, nr-nsc, 1.0, &s->tmpca, nsc, 0, 2, 0.0, &s->tmpz, 0, 0, ae_true, _state); 10025 for(i=0; i<=ndc-1; i++) 10027 s->tmpz.ptr.pp_double[i][i] = s->tmpz.ptr.pp_double[i][i]+lambdav*s->diagaa.ptr.p_double[i]; 10029 if( spdmatrixcholeskyrec(&s->tmpz, 0, ndc, ae_true, &s->tmpcholesky, _state) ) 10033 lambdav = lambdav*10; 10037 * We have Cholesky decomposition of Z, now we can solve system: 10038 * * we start from initial point CX 10039 * * we perform several iterations of refinement: 10040 * * BU_new := BU_orig - XU_cur - AU*XL_cur 10041 * * BL_new := BL_orig - AL*XL_cur 10042 * * solve for BU_new/BL_new, obtain solution dx 10043 * * XU_cur := XU_cur + dx_u 10044 * * XL_cur := XL_cur + dx_l 10045 * * BU_new/BL_new are stored in CB, original right part is 10046 * stored in CBOrg, correction to X is stored in DX, current 10047 * X is stored in CX 10049 for(rfsits=1; rfsits<=s->refinementits; rfsits++) 10051 for(i=0; i<=nr-1; i++) 10053 v = ae_v_dotproduct(&s->tmpca.ptr.pp_double[i][0], 1, &s->cx.ptr.p_double[nsc], 1, ae_v_len(0,ndc-1)); 10054 s->cb.ptr.p_double[i] = s->cborg.ptr.p_double[i]-v; 10057 s->cb.ptr.p_double[i] = s->cb.ptr.p_double[i]-s->cx.ptr.p_double[i]; 10060 s->debugflops = s->debugflops+2*nr*ndc; 10061 for(i=0; i<=ndc-1; i++) 10063 s->dx.ptr.p_double[i] = 0.0; 10065 for(i=nsc; i<=nr-1; i++) 10067 v = s->cb.ptr.p_double[i]; 10068 ae_v_addd(&s->dx.ptr.p_double[0], 1, &s->tmpca.ptr.pp_double[i][0], 1, ae_v_len(0,ndc-1), v); 10070 fblscholeskysolve(&s->tmpz, 1.0, ndc, ae_true, &s->dx, &s->tmpcholesky, _state); 10071 s->debugflops = s->debugflops+2*ndc*ndc; 10072 ae_v_add(&s->cx.ptr.p_double[nsc], 1, &s->dx.ptr.p_double[0], 1, ae_v_len(nsc,nsc+ndc-1)); 10073 for(i=0; i<=nsc-1; i++) 10075 v = ae_v_dotproduct(&s->tmpca.ptr.pp_double[i][0], 1, &s->dx.ptr.p_double[0], 1, ae_v_len(0,ndc-1)); 10076 s->cx.ptr.p_double[i] = s->cx.ptr.p_double[i]+s->cb.ptr.p_double[i]-v; 10078 s->debugflops = s->debugflops+2*nsc*ndc; 10087 * We have a NR*NSC linear least squares subproblem 10091 * solution is easy to find - it is XU=BU! 10093 for(i=0; i<=nsc-1; i++) 10095 s->cx.ptr.p_double[i] = s->cb.ptr.p_double[i]; 10098 for(i=0; i<=ns+nd-1; i++) 10100 s->xn.ptr.p_double[i] = x->ptr.p_double[i]; 10102 for(i=0; i<=nsc+ndc-1; i++) 10104 s->xn.ptr.p_double[s->columnmap.ptr.p_int[i]] = s->cx.ptr.p_double[i]; 10106 newtoncnt = newtoncnt+1; 10109 * Step to candidate point. 10110 * If no constraints was added, accept candidate point XN and move to next phase. 10111 * Terminate, if number of Newton iterations exceeded DebugMaxNewton counter. 10113 terminationneeded = s->debugmaxnewton>0&&newtoncnt>=s->debugmaxnewton; 10114 if( !snnls_boundedstepandactivation(x, &s->xn, &s->nnc, ns+nd, _state) ) 10118 if( terminationneeded ) 10123 if( terminationneeded ) 10131 /************************************************************************* 10132 Having feasible current point XC and possibly infeasible candidate point 10133 XN, this function performs longest step from XC to XN which retains 10134 feasibility. In case XN is found to be infeasible, at least one constraint 10137 For example, if we have: 10141 then this function will move us to X=0 and activate constraint "x>=0
". 10144 XC - current point, must be feasible with respect to 10146 XN - candidate point, can be infeasible with respect to some 10148 NNC - NNC[i] is True when I-th variable is non-negatively 10156 True in case at least one constraint was activated by step 10159 Copyright 19.10.2012 by Bochkanov Sergey 10160 *************************************************************************/ 10161 static ae_bool snnls_boundedstepandactivation(/* Real */ ae_vector* xc, 10162 /* Real */ ae_vector* xn, 10163 /* Boolean */ ae_vector* nnc, 10177 * Check constraints. 10179 * NOTE: it is important to test for XN[i]<XC[i] (strict inequality, 10180 * allows to handle correctly situations with XC[i]=0 without 10181 * activating already active constraints), but to check for 10182 * XN[i]<=0 (non-strict inequality, correct handling of some 10183 * special cases when unconstrained step ends at the boundary). 10187 vmax = ae_maxrealnumber; 10188 for(i=0; i<=n-1; i++) 10190 if( (nnc->ptr.p_bool[i]&&ae_fp_less(xn->ptr.p_double[i],xc->ptr.p_double[i]))&&ae_fp_less_eq(xn->ptr.p_double[i],0.0) ) 10193 vmax = safeminposrv(xc->ptr.p_double[i], xc->ptr.p_double[i]-xn->ptr.p_double[i], vmax, _state); 10194 if( ae_fp_less(vmax,v) ) 10200 stplen = ae_minreal(vmax, 1.0, _state); 10203 * Perform step with activation. 10205 * NOTE: it is important to use (1-StpLen)*XC + StpLen*XN because 10206 * it allows us to step exactly to XN when StpLen=1, even in 10207 * the presence of numerical errors. 10209 for(i=0; i<=n-1; i++) 10211 xc->ptr.p_double[i] = (1-stplen)*xc->ptr.p_double[i]+stplen*xn->ptr.p_double[i]; 10215 xc->ptr.p_double[varidx] = 0.0; 10218 for(i=0; i<=n-1; i++) 10220 if( nnc->ptr.p_bool[i]&&ae_fp_less(xc->ptr.p_double[i],0.0) ) 10222 xc->ptr.p_double[i] = 0.0; 10230 ae_bool _snnlssolver_init(void* _p, ae_state *_state, ae_bool make_automatic) 10232 snnlssolver *p = (snnlssolver*)_p; 10233 ae_touch_ptr((void*)p); 10234 if( !ae_matrix_init(&p->densea, 0, 0, DT_REAL, _state, make_automatic) ) 10236 if( !ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic) ) 10238 if( !ae_vector_init(&p->nnc, 0, DT_BOOL, _state, make_automatic) ) 10240 if( !ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic) ) 10242 if( !ae_matrix_init(&p->tmpz, 0, 0, DT_REAL, _state, make_automatic) ) 10244 if( !ae_matrix_init(&p->tmpca, 0, 0, DT_REAL, _state, make_automatic) ) 10246 if( !ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic) ) 10248 if( !ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic) ) 10250 if( !ae_vector_init(&p->dx, 0, DT_REAL, _state, make_automatic) ) 10252 if( !ae_vector_init(&p->diagaa, 0, DT_REAL, _state, make_automatic) ) 10254 if( !ae_vector_init(&p->cb, 0, DT_REAL, _state, make_automatic) ) 10256 if( !ae_vector_init(&p->cx, 0, DT_REAL, _state, make_automatic) ) 10258 if( !ae_vector_init(&p->cborg, 0, DT_REAL, _state, make_automatic) ) 10260 if( !ae_vector_init(&p->columnmap, 0, DT_INT, _state, make_automatic) ) 10262 if( !ae_vector_init(&p->rowmap, 0, DT_INT, _state, make_automatic) ) 10264 if( !ae_vector_init(&p->tmpcholesky, 0, DT_REAL, _state, make_automatic) ) 10266 if( !ae_vector_init(&p->r, 0, DT_REAL, _state, make_automatic) ) 10272 ae_bool _snnlssolver_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 10274 snnlssolver *dst = (snnlssolver*)_dst; 10275 snnlssolver *src = (snnlssolver*)_src; 10279 if( !ae_matrix_init_copy(&dst->densea, &src->densea, _state, make_automatic) ) 10281 if( !ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic) ) 10283 if( !ae_vector_init_copy(&dst->nnc, &src->nnc, _state, make_automatic) ) 10285 dst->refinementits = src->refinementits; 10286 dst->debugflops = src->debugflops; 10287 dst->debugmaxnewton = src->debugmaxnewton; 10288 if( !ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic) ) 10290 if( !ae_matrix_init_copy(&dst->tmpz, &src->tmpz, _state, make_automatic) ) 10292 if( !ae_matrix_init_copy(&dst->tmpca, &src->tmpca, _state, make_automatic) ) 10294 if( !ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic) ) 10296 if( !ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic) ) 10298 if( !ae_vector_init_copy(&dst->dx, &src->dx, _state, make_automatic) ) 10300 if( !ae_vector_init_copy(&dst->diagaa, &src->diagaa, _state, make_automatic) ) 10302 if( !ae_vector_init_copy(&dst->cb, &src->cb, _state, make_automatic) ) 10304 if( !ae_vector_init_copy(&dst->cx, &src->cx, _state, make_automatic) ) 10306 if( !ae_vector_init_copy(&dst->cborg, &src->cborg, _state, make_automatic) ) 10308 if( !ae_vector_init_copy(&dst->columnmap, &src->columnmap, _state, make_automatic) ) 10310 if( !ae_vector_init_copy(&dst->rowmap, &src->rowmap, _state, make_automatic) ) 10312 if( !ae_vector_init_copy(&dst->tmpcholesky, &src->tmpcholesky, _state, make_automatic) ) 10314 if( !ae_vector_init_copy(&dst->r, &src->r, _state, make_automatic) ) 10320 void _snnlssolver_clear(void* _p) 10322 snnlssolver *p = (snnlssolver*)_p; 10323 ae_touch_ptr((void*)p); 10324 ae_matrix_clear(&p->densea); 10325 ae_vector_clear(&p->b); 10326 ae_vector_clear(&p->nnc); 10327 ae_vector_clear(&p->xn); 10328 ae_matrix_clear(&p->tmpz); 10329 ae_matrix_clear(&p->tmpca); 10330 ae_vector_clear(&p->g); 10331 ae_vector_clear(&p->d); 10332 ae_vector_clear(&p->dx); 10333 ae_vector_clear(&p->diagaa); 10334 ae_vector_clear(&p->cb); 10335 ae_vector_clear(&p->cx); 10336 ae_vector_clear(&p->cborg); 10337 ae_vector_clear(&p->columnmap); 10338 ae_vector_clear(&p->rowmap); 10339 ae_vector_clear(&p->tmpcholesky); 10340 ae_vector_clear(&p->r); 10344 void _snnlssolver_destroy(void* _p) 10346 snnlssolver *p = (snnlssolver*)_p; 10347 ae_touch_ptr((void*)p); 10348 ae_matrix_destroy(&p->densea); 10349 ae_vector_destroy(&p->b); 10350 ae_vector_destroy(&p->nnc); 10351 ae_vector_destroy(&p->xn); 10352 ae_matrix_destroy(&p->tmpz); 10353 ae_matrix_destroy(&p->tmpca); 10354 ae_vector_destroy(&p->g); 10355 ae_vector_destroy(&p->d); 10356 ae_vector_destroy(&p->dx); 10357 ae_vector_destroy(&p->diagaa); 10358 ae_vector_destroy(&p->cb); 10359 ae_vector_destroy(&p->cx); 10360 ae_vector_destroy(&p->cborg); 10361 ae_vector_destroy(&p->columnmap); 10362 ae_vector_destroy(&p->rowmap); 10363 ae_vector_destroy(&p->tmpcholesky); 10364 ae_vector_destroy(&p->r); 10370 /************************************************************************* 10371 This subroutine is used to initialize active set. By default, empty 10372 N-variable model with no constraints is generated. Previously allocated 10373 buffer variables are reused as much as possible. 10375 Two use cases for this object are described below. 10377 CASE 1 - STEEPEST DESCENT: 10381 SASReactivateConstraints() 10382 SASDescentDirection() 10383 SASExploreDirection() 10387 CASE 1 - PRECONDITIONED STEEPEST DESCENT: 10391 SASReactivateConstraintsPrec() 10392 SASDescentDirectionPrec() 10393 SASExploreDirection() 10398 Copyright 21.12.2012 by Bochkanov Sergey 10399 *************************************************************************/ 10400 void sasinit(ae_int_t n, sactiveset* s, ae_state *_state) 10411 s->constraintschanged = ae_true; 10414 rvectorsetlengthatleast(&s->bndl, n, _state); 10415 bvectorsetlengthatleast(&s->hasbndl, n, _state); 10416 rvectorsetlengthatleast(&s->bndu, n, _state); 10417 bvectorsetlengthatleast(&s->hasbndu, n, _state); 10418 for(i=0; i<=n-1; i++) 10420 s->bndl.ptr.p_double[i] = _state->v_neginf; 10421 s->bndu.ptr.p_double[i] = _state->v_posinf; 10422 s->hasbndl.ptr.p_bool[i] = ae_false; 10423 s->hasbndu.ptr.p_bool[i] = ae_false; 10427 * current point, scale 10429 s->hasxc = ae_false; 10430 rvectorsetlengthatleast(&s->xc, n, _state); 10431 rvectorsetlengthatleast(&s->s, n, _state); 10432 rvectorsetlengthatleast(&s->h, n, _state); 10433 for(i=0; i<=n-1; i++) 10435 s->xc.ptr.p_double[i] = 0.0; 10436 s->s.ptr.p_double[i] = 1.0; 10437 s->h.ptr.p_double[i] = 1.0; 10443 rvectorsetlengthatleast(&s->unitdiagonal, n, _state); 10444 for(i=0; i<=n-1; i++) 10446 s->unitdiagonal.ptr.p_double[i] = 1.0; 10451 /************************************************************************* 10452 This function sets scaling coefficients for SAS object. 10454 ALGLIB optimizers use scaling matrices to test stopping conditions (step 10455 size and gradient are scaled before comparison with tolerances). Scale of 10456 the I-th variable is a translation invariant measure of: 10457 a) "how large
" the variable is 10458 b) how large the step should be to make significant changes in the function 10460 During orthogonalization phase, scale is used to calculate drop tolerances 10461 (whether vector is significantly non-zero or not). 10464 State - structure stores algorithm state 10465 S - array[N], non-zero scaling coefficients 10466 S[i] may be negative, sign doesn't matter. 10469 Copyright 21.12.2012 by Bochkanov Sergey 10470 *************************************************************************/ 10471 void sassetscale(sactiveset* state, 10472 /* Real */ ae_vector* s, 10478 ae_assert(state->algostate==0, "SASSetScale: you may change scale only
in modification
mode", _state); 10479 ae_assert(s->cnt>=state->n, "SASSetScale: Length(S)<N
", _state); 10480 for(i=0; i<=state->n-1; i++) 10482 ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "SASSetScale: S contains infinite
or NAN elements
", _state); 10483 ae_assert(ae_fp_neq(s->ptr.p_double[i],0), "SASSetScale: S contains
zero elements
", _state); 10485 for(i=0; i<=state->n-1; i++) 10487 state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); 10492 /************************************************************************* 10493 Modification of the preconditioner: diagonal of approximate Hessian is 10497 State - structure which stores algorithm state 10498 D - diagonal of the approximate Hessian, array[0..N-1], 10499 (if larger, only leading N elements are used). 10501 NOTE 1: D[i] should be positive. Exception will be thrown otherwise. 10503 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. 10506 Copyright 21.12.2012 by Bochkanov Sergey 10507 *************************************************************************/ 10508 void sassetprecdiag(sactiveset* state, 10509 /* Real */ ae_vector* d, 10515 ae_assert(state->algostate==0, "SASSetPrecDiag: you may change preconditioner only
in modification
mode", _state); 10516 ae_assert(d->cnt>=state->n, "SASSetPrecDiag: D is too
short", _state); 10517 for(i=0; i<=state->n-1; i++) 10519 ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "SASSetPrecDiag: D contains infinite
or NAN elements
", _state); 10520 ae_assert(ae_fp_greater(d->ptr.p_double[i],0), "SASSetPrecDiag: D contains non-positive elements
", _state); 10522 for(i=0; i<=state->n-1; i++) 10524 state->h.ptr.p_double[i] = d->ptr.p_double[i]; 10529 /************************************************************************* 10530 This function sets/changes boundary constraints. 10533 State - structure stores algorithm state 10534 BndL - lower bounds, array[N]. 10535 If some (all) variables are unbounded, you may specify 10536 very small number or -INF. 10537 BndU - upper bounds, array[N]. 10538 If some (all) variables are unbounded, you may specify 10539 very large number or +INF. 10541 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th 10542 variable will be "frozen
" at X[i]=BndL[i]=BndU[i]. 10545 Copyright 21.12.2012 by Bochkanov Sergey 10546 *************************************************************************/ 10547 void sassetbc(sactiveset* state, 10548 /* Real */ ae_vector* bndl, 10549 /* Real */ ae_vector* bndu, 10556 ae_assert(state->algostate==0, "SASSetBC: you may change constraints only
in modification
mode", _state); 10558 ae_assert(bndl->cnt>=n, "SASSetBC: Length(BndL)<N
", _state); 10559 ae_assert(bndu->cnt>=n, "SASSetBC: Length(BndU)<N
", _state); 10560 for(i=0; i<=n-1; i++) 10562 ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "SASSetBC: BndL contains NAN
or +
INF", _state); 10563 ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "SASSetBC: BndL contains NAN
or -
INF", _state); 10564 state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; 10565 state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); 10566 state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; 10567 state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); 10569 state->constraintschanged = ae_true; 10573 /************************************************************************* 10574 This function sets linear constraints for SAS object. 10576 Linear constraints are inactive by default (after initial creation). 10579 State - SAS structure 10580 C - linear constraints, array[K,N+1]. 10581 Each row of C represents one constraint, either equality 10582 or inequality (see below): 10583 * first N elements correspond to coefficients, 10584 * last element corresponds to the right part. 10585 All elements of C (including right part) must be finite. 10586 CT - type of constraints, array[K]: 10587 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] 10588 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] 10589 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] 10590 K - number of equality/inequality constraints, K>=0 10592 NOTE 1: linear (non-bound) constraints are satisfied only approximately: 10593 * there always exists some minor violation (about Epsilon in magnitude) 10594 due to rounding errors 10595 * numerical differentiation, if used, may lead to function evaluations 10596 outside of the feasible area, because algorithm does NOT change 10597 numerical differentiation formula according to linear constraints. 10598 If you want constraints to be satisfied exactly, try to reformulate your 10599 problem in such manner that all constraints will become boundary ones 10600 (this kind of constraints is always satisfied exactly, both in the final 10601 solution and in all intermediate points). 10604 Copyright 28.11.2010 by Bochkanov Sergey 10605 *************************************************************************/ 10606 void sassetlc(sactiveset* state, 10607 /* Real */ ae_matrix* c, 10608 /* Integer */ ae_vector* ct, 10616 ae_assert(state->algostate==0, "SASSetLC: you may change constraints only
in modification
mode", _state); 10620 * First, check for errors in the inputs 10622 ae_assert(k>=0, "SASSetLC:
K<0
", _state); 10623 ae_assert(c->cols>=n+1||k==0, "SASSetLC: Cols(C)<N+1
", _state); 10624 ae_assert(c->rows>=k, "SASSetLC: Rows(C)<
K", _state); 10625 ae_assert(ct->cnt>=k, "SASSetLC: Length(CT)<
K", _state); 10626 ae_assert(apservisfinitematrix(c, k, n+1, _state), "SASSetLC: C contains infinite
or NaN values!
", _state); 10635 state->constraintschanged = ae_true; 10640 * Equality constraints are stored first, in the upper 10641 * NEC rows of State.CLEIC matrix. Inequality constraints 10642 * are stored in the next NIC rows. 10644 * NOTE: we convert inequality constraints to the form 10645 * A*x<=b before copying them. 10647 rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); 10650 for(i=0; i<=k-1; i++) 10652 if( ct->ptr.p_int[i]==0 ) 10654 ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 10655 state->nec = state->nec+1; 10658 for(i=0; i<=k-1; i++) 10660 if( ct->ptr.p_int[i]!=0 ) 10662 if( ct->ptr.p_int[i]>0 ) 10664 ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 10668 ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 10670 state->nic = state->nic+1; 10675 * Mark state as changed 10677 state->constraintschanged = ae_true; 10681 /************************************************************************* 10682 Another variation of SASSetLC(), which accepts linear constraints using 10683 another representation. 10685 Linear constraints are inactive by default (after initial creation). 10688 State - SAS structure 10689 CLEIC - linear constraints, array[NEC+NIC,N+1]. 10690 Each row of C represents one constraint: 10691 * first N elements correspond to coefficients, 10692 * last element corresponds to the right part. 10693 First NEC rows store equality constraints, next NIC - are 10695 All elements of C (including right part) must be finite. 10696 NEC - number of equality constraints, NEC>=0 10697 NIC - number of inequality constraints, NIC>=0 10699 NOTE 1: linear (non-bound) constraints are satisfied only approximately: 10700 * there always exists some minor violation (about Epsilon in magnitude) 10701 due to rounding errors 10702 * numerical differentiation, if used, may lead to function evaluations 10703 outside of the feasible area, because algorithm does NOT change 10704 numerical differentiation formula according to linear constraints. 10705 If you want constraints to be satisfied exactly, try to reformulate your 10706 problem in such manner that all constraints will become boundary ones 10707 (this kind of constraints is always satisfied exactly, both in the final 10708 solution and in all intermediate points). 10711 Copyright 28.11.2010 by Bochkanov Sergey 10712 *************************************************************************/ 10713 void sassetlcx(sactiveset* state, 10714 /* Real */ ae_matrix* cleic, 10724 ae_assert(state->algostate==0, "SASSetLCX: you may change constraints only
in modification
mode", _state); 10728 * First, check for errors in the inputs 10730 ae_assert(nec>=0, "SASSetLCX: NEC<0
", _state); 10731 ae_assert(nic>=0, "SASSetLCX: NIC<0
", _state); 10732 ae_assert(cleic->cols>=n+1||nec+nic==0, "SASSetLCX: Cols(CLEIC)<N+1
", _state); 10733 ae_assert(cleic->rows>=nec+nic, "SASSetLCX: Rows(CLEIC)<NEC+NIC
", _state); 10734 ae_assert(apservisfinitematrix(cleic, nec+nic, n+1, _state), "SASSetLCX: CLEIC contains infinite
or NaN values!
", _state); 10737 * Store constraints 10739 rmatrixsetlengthatleast(&state->cleic, nec+nic, n+1, _state); 10742 for(i=0; i<=nec+nic-1; i++) 10744 for(j=0; j<=n; j++) 10746 state->cleic.ptr.pp_double[i][j] = cleic->ptr.pp_double[i][j]; 10751 * Mark state as changed 10753 state->constraintschanged = ae_true; 10757 /************************************************************************* 10758 This subroutine turns on optimization mode: 10759 1. feasibility in X is enforced (in case X=S.XC and constraints have not 10760 changed, algorithm just uses X without any modifications at all) 10761 2. constraints are marked as "candidate
" or "inactive
" 10764 S - active set object 10765 X - initial point (candidate), array[N]. It is expected that X 10766 contains only finite values (we do not check it). 10769 S - state is changed 10770 X - initial point can be changed to enforce feasibility 10773 True in case feasible point was found (mode was changed to "optimization
") 10774 False in case no feasible point was found (mode was not changed) 10777 Copyright 21.12.2012 by Bochkanov Sergey 10778 *************************************************************************/ 10779 ae_bool sasstartoptimization(sactiveset* state, 10780 /* Real */ ae_vector* x, 10792 ae_assert(state->algostate==0, "SASStartOptimization: already
in optimization
mode", _state); 10799 * Enforce feasibility and calculate set of "candidate
"/"active
" constraints. 10800 * Always active equality constraints are marked as "active
", all other constraints 10801 * are marked as "candidate
". 10803 ivectorsetlengthatleast(&state->activeset, n+nec+nic, _state); 10804 for(i=0; i<=n-1; i++) 10806 if( state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i] ) 10808 if( ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 10814 ae_v_move(&state->xc.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); 10815 if( state->nec+state->nic>0 ) 10819 * General linear constraints are present; general code is used. 10821 rvectorsetlengthatleast(&state->tmp0, n, _state); 10822 rvectorsetlengthatleast(&state->tmpfeas, n+state->nic, _state); 10823 rmatrixsetlengthatleast(&state->tmpm0, state->nec+state->nic, n+state->nic+1, _state); 10824 for(i=0; i<=state->nec+state->nic-1; i++) 10826 ae_v_move(&state->tmpm0.ptr.pp_double[i][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); 10827 for(j=n; j<=n+state->nic-1; j++) 10829 state->tmpm0.ptr.pp_double[i][j] = 0; 10831 if( i>=state->nec ) 10833 state->tmpm0.ptr.pp_double[i][n+i-state->nec] = 1.0; 10835 state->tmpm0.ptr.pp_double[i][n+state->nic] = state->cleic.ptr.pp_double[i][n]; 10837 ae_v_move(&state->tmpfeas.ptr.p_double[0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 10838 for(i=0; i<=state->nic-1; i++) 10840 v = ae_v_dotproduct(&state->cleic.ptr.pp_double[i+state->nec][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 10841 state->tmpfeas.ptr.p_double[i+n] = ae_maxreal(state->cleic.ptr.pp_double[i+state->nec][n]-v, 0.0, _state); 10843 if( !findfeasiblepoint(&state->tmpfeas, &state->bndl, &state->hasbndl, &state->bndu, &state->hasbndu, n, state->nic, &state->tmpm0, state->nec+state->nic, 1.0E-6, &i, &j, _state) ) 10847 ae_v_move(&state->xc.ptr.p_double[0], 1, &state->tmpfeas.ptr.p_double[0], 1, ae_v_len(0,n-1)); 10848 for(i=0; i<=n-1; i++) 10850 if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 10852 state->activeset.ptr.p_int[i] = 1; 10855 if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))||(state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i])) ) 10857 state->activeset.ptr.p_int[i] = 0; 10860 state->activeset.ptr.p_int[i] = -1; 10862 for(i=0; i<=state->nec-1; i++) 10864 state->activeset.ptr.p_int[n+i] = 1; 10866 for(i=0; i<=state->nic-1; i++) 10868 if( ae_fp_eq(state->tmpfeas.ptr.p_double[n+i],0) ) 10870 state->activeset.ptr.p_int[n+state->nec+i] = 0; 10874 state->activeset.ptr.p_int[n+state->nec+i] = -1; 10882 * Only bound constraints are present, quick code can be used 10884 for(i=0; i<=n-1; i++) 10886 state->activeset.ptr.p_int[i] = -1; 10887 if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 10889 state->activeset.ptr.p_int[i] = 1; 10890 state->xc.ptr.p_double[i] = state->bndl.ptr.p_double[i]; 10893 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) 10895 state->xc.ptr.p_double[i] = state->bndl.ptr.p_double[i]; 10896 state->activeset.ptr.p_int[i] = 0; 10899 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 10901 state->xc.ptr.p_double[i] = state->bndu.ptr.p_double[i]; 10902 state->activeset.ptr.p_int[i] = 0; 10909 * Change state, allocate temporaries 10912 state->algostate = 1; 10913 state->basisisready = ae_false; 10914 state->hasxc = ae_true; 10915 rmatrixsetlengthatleast(&state->pbasis, ae_minint(nec+nic, n, _state), n+1, _state); 10916 rmatrixsetlengthatleast(&state->ibasis, ae_minint(nec+nic, n, _state), n+1, _state); 10917 rmatrixsetlengthatleast(&state->sbasis, ae_minint(nec+nic, n, _state), n+1, _state); 10922 /************************************************************************* 10923 This function explores search direction and calculates bound for step as 10924 well as information for activation of constraints. 10927 State - SAS structure which stores current point and all other 10928 active set related information 10929 D - descent direction to explore 10932 StpMax - upper limit on step length imposed by yet inactive 10933 constraints. Can be zero in case some constraints 10934 can be activated by zero step. Equal to some large 10935 value in case step is unlimited. 10936 CIdx - -1 for unlimited step, in [0,N+NEC+NIC) in case of 10938 VVal - value which is assigned to X[CIdx] during activation. 10939 For CIdx<0 or CIdx>=N some dummy value is assigned to 10941 *************************************************************************/ 10942 void sasexploredirection(sactiveset* state, 10943 /* Real */ ae_vector* d, 10961 ae_assert(state->algostate==1, "SASExploreDirection: is not
in optimization
mode", _state); 10968 for(i=0; i<=n-1; i++) 10970 if( state->activeset.ptr.p_int[i]<=0 ) 10972 ae_assert(!state->hasbndl.ptr.p_bool[i]||ae_fp_greater_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]), "SASExploreDirection:
internal error - infeasible X
", _state); 10973 ae_assert(!state->hasbndu.ptr.p_bool[i]||ae_fp_less_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]), "SASExploreDirection:
internal error - infeasible X
", _state); 10974 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(d->ptr.p_double[i],0) ) 10977 *stpmax = safeminposrv(state->xc.ptr.p_double[i]-state->bndl.ptr.p_double[i], -d->ptr.p_double[i], *stpmax, _state); 10978 if( ae_fp_less(*stpmax,prevmax) ) 10981 *vval = state->bndl.ptr.p_double[i]; 10984 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(d->ptr.p_double[i],0) ) 10987 *stpmax = safeminposrv(state->bndu.ptr.p_double[i]-state->xc.ptr.p_double[i], d->ptr.p_double[i], *stpmax, _state); 10988 if( ae_fp_less(*stpmax,prevmax) ) 10991 *vval = state->bndu.ptr.p_double[i]; 10996 for(i=nec; i<=nec+nic-1; i++) 10998 if( state->activeset.ptr.p_int[n+i]<=0 ) 11000 vc = ae_v_dotproduct(&state->cleic.ptr.pp_double[i][0], 1, &state->xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 11001 vc = vc-state->cleic.ptr.pp_double[i][n]; 11002 vd = ae_v_dotproduct(&state->cleic.ptr.pp_double[i][0], 1, &d->ptr.p_double[0], 1, ae_v_len(0,n-1)); 11003 if( ae_fp_less_eq(vd,0) ) 11007 if( ae_fp_less(vc,0) ) 11011 * XC is strictly feasible with respect to I-th constraint, 11012 * we can perform non-zero step because there is non-zero distance 11013 * between XC and bound. 11016 *stpmax = safeminposrv(-vc, vd, *stpmax, _state); 11017 if( ae_fp_less(*stpmax,prevmax) ) 11026 * XC is at the boundary (or slightly beyond it), and step vector 11027 * points beyond the boundary. 11029 * The only thing we can do is to perform zero step and activate 11040 /************************************************************************* 11041 This subroutine moves current point to XN, in the direction previously 11042 explored with SASExploreDirection() function. 11044 Step may activate one constraint. It is assumed than XN is approximately 11045 feasible (small error as large as several ulps is possible). Strict 11046 feasibility with respect to bound constraints is enforced during 11047 activation, feasibility with respect to general linear constraints is not 11051 S - active set object 11053 NeedAct - True in case one constraint needs activation 11054 CIdx - index of constraint, in [0,N+NEC+NIC). 11055 Ignored if NeedAct is false. 11056 This value is calculated by SASExploreDirection(). 11057 CVal - for CIdx in [0,N) this field stores value which is 11058 assigned to XC[CIdx] during activation. CVal is ignored in 11060 This value is calculated by SASExploreDirection(). 11063 S - current point and list of active constraints are changed. 11066 >0, in case at least one inactive non-candidate constraint was activated 11067 =0, in case only "candidate
" constraints were activated 11068 <0, in case no constraints were activated by the step 11070 NOTE: in general case State.XC<>XN because activation of constraints may 11071 slightly change current point (to enforce feasibility). 11074 Copyright 21.12.2012 by Bochkanov Sergey 11075 *************************************************************************/ 11076 ae_int_t sasmoveto(sactiveset* state, 11077 /* Real */ ae_vector* xn, 11087 ae_bool wasactivation; 11091 ae_assert(state->algostate==1, "SASMoveTo: is not
in optimization
mode", _state); 11097 * Save previous state, update current point 11099 rvectorsetlengthatleast(&state->mtx, n, _state); 11100 ivectorsetlengthatleast(&state->mtas, n+nec+nic, _state); 11101 for(i=0; i<=n-1; i++) 11103 state->mtx.ptr.p_double[i] = state->xc.ptr.p_double[i]; 11104 state->xc.ptr.p_double[i] = xn->ptr.p_double[i]; 11106 for(i=0; i<=n+nec+nic-1; i++) 11108 state->mtas.ptr.p_int[i] = state->activeset.ptr.p_int[i]; 11112 * Activate constraints 11114 wasactivation = ae_false; 11121 ae_assert(cidx>=0&&cidx<n+nec+nic, "SASMoveTo: incorrect CIdx
", _state); 11126 * CIdx in [0,N-1] means that bound constraint was activated. 11127 * We activate it explicitly to avoid situation when roundoff-error 11128 * prevents us from moving EXACTLY to x=CVal. 11130 state->xc.ptr.p_double[cidx] = cval; 11132 state->activeset.ptr.p_int[cidx] = 1; 11133 wasactivation = ae_true; 11135 for(i=0; i<=n-1; i++) 11139 * Post-check (some constraints may be activated because of numerical errors) 11141 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) 11143 state->xc.ptr.p_double[i] = state->bndl.ptr.p_double[i]; 11144 state->activeset.ptr.p_int[i] = 1; 11145 wasactivation = ae_true; 11147 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 11149 state->xc.ptr.p_double[i] = state->bndu.ptr.p_double[i]; 11150 state->activeset.ptr.p_int[i] = 1; 11151 wasactivation = ae_true; 11156 * Determine return status: 11157 * * -1 in case no constraints were activated 11158 * * 0 in case only "candidate
" constraints were activated 11159 * * +1 in case at least one "non-candidate
" constraint was activated 11161 if( wasactivation ) 11165 * Step activated one/several constraints, but sometimes it is spurious 11166 * activation - RecalculateConstraints() tells us that constraint is 11167 * inactive (negative Largrange multiplier), but step activates it 11168 * because of numerical noise. 11170 * This block of code checks whether step activated truly new constraints 11171 * (ones which were not in the active set at the solution): 11173 * * for non-boundary constraint it is enough to check that previous value 11174 * of ActiveSet[i] is negative (=far from boundary), and new one is 11175 * positive (=we are at the boundary, constraint is activated). 11177 * * for boundary constraints previous criterion won't work. Each variable 11178 * has two constraints, and simply checking their status is not enough - 11179 * we have to correctly identify cases when we leave one boundary 11180 * (PrevActiveSet[i]=0) and move to another boundary (ActiveSet[i]>0). 11181 * Such cases can be identified if we compare previous X with new X. 11183 * In case only "candidate
" constraints were activated, result variable 11184 * is set to 0. In case at least one new constraint was activated, result 11188 for(i=0; i<=n-1; i++) 11190 if( state->activeset.ptr.p_int[i]>0&&ae_fp_neq(state->xc.ptr.p_double[i],state->mtx.ptr.p_double[i]) ) 11195 for(i=n; i<=n+state->nec+state->nic-1; i++) 11197 if( state->mtas.ptr.p_int[i]<0&&state->activeset.ptr.p_int[i]>0 ) 11207 * No activation, return -1 11215 state->basisisready = ae_false; 11220 /************************************************************************* 11221 This subroutine performs immediate activation of one constraint: 11222 * "immediate
" means that we do not have to move to activate it 11223 * in case boundary constraint is activated, we enforce current point to be 11224 exactly at the boundary 11227 S - active set object 11228 CIdx - index of constraint, in [0,N+NEC+NIC). 11229 This value is calculated by SASExploreDirection(). 11230 CVal - for CIdx in [0,N) this field stores value which is 11231 assigned to XC[CIdx] during activation. CVal is ignored in 11233 This value is calculated by SASExploreDirection(). 11236 Copyright 21.12.2012 by Bochkanov Sergey 11237 *************************************************************************/ 11238 void sasimmediateactivation(sactiveset* state, 11245 ae_assert(state->algostate==1, "SASMoveTo: is not
in optimization
mode", _state); 11246 if( cidx<state->n ) 11248 state->xc.ptr.p_double[cidx] = cval; 11250 state->activeset.ptr.p_int[cidx] = 1; 11251 state->basisisready = ae_false; 11255 /************************************************************************* 11256 This subroutine calculates descent direction subject to current active set. 11259 S - active set object 11260 G - array[N], gradient 11261 D - possibly prealocated buffer; 11262 automatically resized if needed. 11265 D - descent direction projected onto current active set. 11266 Components of D which correspond to active boundary 11267 constraints are forced to be exactly zero. 11268 In case D is non-zero, it is normalized to have unit norm. 11270 NOTE: in case active set has N active constraints (or more), descent 11271 direction is forced to be exactly zero. 11274 Copyright 21.12.2012 by Bochkanov Sergey 11275 *************************************************************************/ 11276 void sasconstraineddescent(sactiveset* state, 11277 /* Real */ ae_vector* g, 11278 /* Real */ ae_vector* d, 11283 ae_assert(state->algostate==1, "SASConstrainedDescent: is not
in optimization
mode", _state); 11284 sasrebuildbasis(state, _state); 11285 sactivesets_constraineddescent(state, g, &state->unitdiagonal, &state->ibasis, ae_true, d, _state); 11289 /************************************************************************* 11290 This subroutine calculates preconditioned descent direction subject to 11291 current active set. 11294 S - active set object 11295 G - array[N], gradient 11296 D - possibly prealocated buffer; 11297 automatically resized if needed. 11300 D - descent direction projected onto current active set. 11301 Components of D which correspond to active boundary 11302 constraints are forced to be exactly zero. 11303 In case D is non-zero, it is normalized to have unit norm. 11305 NOTE: in case active set has N active constraints (or more), descent 11306 direction is forced to be exactly zero. 11309 Copyright 21.12.2012 by Bochkanov Sergey 11310 *************************************************************************/ 11311 void sasconstraineddescentprec(sactiveset* state, 11312 /* Real */ ae_vector* g, 11313 /* Real */ ae_vector* d, 11318 ae_assert(state->algostate==1, "SASConstrainedDescentPrec: is not
in optimization
mode", _state); 11319 sasrebuildbasis(state, _state); 11320 sactivesets_constraineddescent(state, g, &state->h, &state->pbasis, ae_true, d, _state); 11324 /************************************************************************* 11325 This subroutine calculates product of direction vector and preconditioner 11326 multiplied subject to current active set. 11329 S - active set object 11330 D - array[N], direction 11333 D - preconditioned direction projected onto current active set. 11334 Components of D which correspond to active boundary 11335 constraints are forced to be exactly zero. 11337 NOTE: in case active set has N active constraints (or more), descent 11338 direction is forced to be exactly zero. 11341 Copyright 21.12.2012 by Bochkanov Sergey 11342 *************************************************************************/ 11343 void sasconstraineddirection(sactiveset* state, 11344 /* Real */ ae_vector* d, 11350 ae_assert(state->algostate==1, "SASConstrainedAntigradientPrec: is not
in optimization
mode", _state); 11351 sasrebuildbasis(state, _state); 11352 sactivesets_constraineddescent(state, d, &state->unitdiagonal, &state->ibasis, ae_false, &state->cdtmp, _state); 11353 for(i=0; i<=state->n-1; i++) 11355 d->ptr.p_double[i] = -state->cdtmp.ptr.p_double[i]; 11360 /************************************************************************* 11361 This subroutine calculates product of direction vector and preconditioner 11362 multiplied subject to current active set. 11365 S - active set object 11366 D - array[N], direction 11369 D - preconditioned direction projected onto current active set. 11370 Components of D which correspond to active boundary 11371 constraints are forced to be exactly zero. 11373 NOTE: in case active set has N active constraints (or more), descent 11374 direction is forced to be exactly zero. 11377 Copyright 21.12.2012 by Bochkanov Sergey 11378 *************************************************************************/ 11379 void sasconstraineddirectionprec(sactiveset* state, 11380 /* Real */ ae_vector* d, 11386 ae_assert(state->algostate==1, "SASConstrainedAntigradientPrec: is not
in optimization
mode", _state); 11387 sasrebuildbasis(state, _state); 11388 sactivesets_constraineddescent(state, d, &state->h, &state->pbasis, ae_false, &state->cdtmp, _state); 11389 for(i=0; i<=state->n-1; i++) 11391 d->ptr.p_double[i] = -state->cdtmp.ptr.p_double[i]; 11396 /************************************************************************* 11397 This subroutine performs correction of some (possibly infeasible) point 11398 with respect to a) current active set, b) all boundary constraints, both 11399 active and inactive: 11401 0) we calculate L1 penalty term for violation of active linear constraints 11402 (one which is returned by SASActiveLCPenalty1() function). 11403 1) first, it performs projection (orthogonal with respect to scale matrix 11404 S) of X into current active set: X -> X1. 11405 2) next, we perform projection with respect to ALL boundary constraints 11406 which are violated at X1: X1 -> X2. 11407 3) X is replaced by X2. 11409 The idea is that this function can preserve and enforce feasibility during 11410 optimization, and additional penalty parameter can be used to prevent algo 11411 from leaving feasible set because of rounding errors. 11414 S - active set object 11415 X - array[N], candidate point 11418 X - "improved
" candidate point: 11419 a) feasible with respect to all boundary constraints 11420 b) feasibility with respect to active set is retained at 11422 Penalty - penalty term, which can be added to function value if user 11423 wants to penalize violation of constraints (recommended). 11425 NOTE: this function is not intended to find exact projection (i.e. best 11426 approximation) of X into feasible set. It just improves situation a 11428 Regular use of this function will help you to retain feasibility 11429 - if you already have something to start with and constrain your 11430 steps is such way that the only source of infeasibility are roundoff 11434 Copyright 21.12.2012 by Bochkanov Sergey 11435 *************************************************************************/ 11436 void sascorrection(sactiveset* state, 11437 /* Real */ ae_vector* x, 11448 ae_assert(state->algostate==1, "SASCorrection: is not
in optimization
mode", _state); 11449 sasrebuildbasis(state, _state); 11451 rvectorsetlengthatleast(&state->corrtmp, n, _state); 11454 * Calculate penalty term. 11456 *penalty = sasactivelcpenalty1(state, x, _state); 11459 * Perform projection 1. 11461 * This projecton is given by: 11463 * x_proj = x - S*S*As'*(As*x-b) 11465 * where x is original x before projection, S is a scale matrix, 11466 * As is a matrix of equality constraints (active set) which were 11467 * orthogonalized with respect to inner product given by S (i.e. we 11468 * have As*S*S'*As'=I), b is a right part of the orthogonalized 11471 * NOTE: you can verify that x_proj is strictly feasible w.r.t. 11472 * active set by multiplying it by As - you will get 11473 * As*x_proj = As*x - As*x + b = b. 11475 * This formula for projection can be obtained by solving 11476 * following minimization problem. 11478 * min ||inv(S)*(x_proj-x)||^2 s.t. As*x_proj=b 11481 ae_v_move(&state->corrtmp.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); 11482 for(i=0; i<=state->basissize-1; i++) 11484 v = -state->sbasis.ptr.pp_double[i][n]; 11485 for(j=0; j<=n-1; j++) 11487 v = v+state->sbasis.ptr.pp_double[i][j]*state->corrtmp.ptr.p_double[j]; 11489 for(j=0; j<=n-1; j++) 11491 state->corrtmp.ptr.p_double[j] = state->corrtmp.ptr.p_double[j]-v*state->sbasis.ptr.pp_double[i][j]*ae_sqr(state->s.ptr.p_double[j], _state); 11494 for(i=0; i<=n-1; i++) 11496 if( state->activeset.ptr.p_int[i]>0 ) 11498 state->corrtmp.ptr.p_double[i] = state->xc.ptr.p_double[i]; 11503 * Perform projection 2 11505 for(i=0; i<=n-1; i++) 11507 x->ptr.p_double[i] = state->corrtmp.ptr.p_double[i]; 11508 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(x->ptr.p_double[i],state->bndl.ptr.p_double[i]) ) 11510 x->ptr.p_double[i] = state->bndl.ptr.p_double[i]; 11512 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(x->ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 11514 x->ptr.p_double[i] = state->bndu.ptr.p_double[i]; 11520 /************************************************************************* 11521 This subroutine returns L1 penalty for violation of active general linear 11522 constraints (violation of boundary or inactive linear constraints is not 11525 Penalty term is equal to: 11527 Penalty = SUM( Abs((C_i*x-R_i)/Alpha_i) ) 11530 * summation is performed for I=0...NEC+NIC-1, ActiveSet[N+I]>0 11531 (only for rows of CLEIC which are in active set) 11532 * C_i is I-th row of CLEIC 11533 * R_i is corresponding right part 11534 * S is a scale matrix 11535 * Alpha_i = ||S*C_i|| - is a scaling coefficient which "normalizes
" 11536 I-th summation term according to its scale. 11539 S - active set object 11540 X - array[N], candidate point 11543 Copyright 21.12.2012 by Bochkanov Sergey 11544 *************************************************************************/ 11545 double sasactivelcpenalty1(sactiveset* state, 11546 /* Real */ ae_vector* x, 11560 ae_assert(state->algostate==1, "SASActiveLCPenalty1: is not
in optimization
mode", _state); 11561 sasrebuildbasis(state, _state); 11567 * Calculate penalty term. 11570 for(i=0; i<=nec+nic-1; i++) 11572 if( state->activeset.ptr.p_int[n+i]>0 ) 11575 p = -state->cleic.ptr.pp_double[i][n]; 11576 for(j=0; j<=n-1; j++) 11578 v = state->cleic.ptr.pp_double[i][j]; 11579 p = p+v*x->ptr.p_double[j]; 11580 alpha = alpha+ae_sqr(v*state->s.ptr.p_double[j], _state); 11582 alpha = ae_sqrt(alpha, _state); 11583 if( ae_fp_neq(alpha,0) ) 11585 result = result+ae_fabs(p/alpha, _state); 11593 /************************************************************************* 11594 This subroutine calculates scaled norm of vector after projection onto 11595 subspace of active constraints. Most often this function is used to test 11596 stopping conditions. 11599 S - active set object 11600 D - vector whose norm is calculated 11603 Vector norm (after projection and scaling) 11605 NOTE: projection is performed first, scaling is performed after projection 11608 Copyright 21.12.2012 by Bochkanov Sergey 11609 *************************************************************************/ 11610 double sasscaledconstrainednorm(sactiveset* state, 11611 /* Real */ ae_vector* d, 11620 ae_assert(state->algostate==1, "SASMoveTo: is not
in optimization
mode", _state); 11622 rvectorsetlengthatleast(&state->scntmp, n, _state); 11625 * Prepare basis (if needed) 11627 sasrebuildbasis(state, _state); 11630 * Calculate descent direction 11632 for(i=0; i<=n-1; i++) 11634 if( state->activeset.ptr.p_int[i]>0 ) 11636 state->scntmp.ptr.p_double[i] = 0; 11640 state->scntmp.ptr.p_double[i] = d->ptr.p_double[i]; 11643 for(i=0; i<=state->basissize-1; i++) 11645 v = ae_v_dotproduct(&state->ibasis.ptr.pp_double[i][0], 1, &state->scntmp.ptr.p_double[0], 1, ae_v_len(0,n-1)); 11646 ae_v_subd(&state->scntmp.ptr.p_double[0], 1, &state->ibasis.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); 11649 for(i=0; i<=n-1; i++) 11651 v = v+ae_sqr(state->s.ptr.p_double[i]*state->scntmp.ptr.p_double[i], _state); 11653 result = ae_sqrt(v, _state); 11658 /************************************************************************* 11659 This subroutine turns off optimization mode. 11662 S - active set object 11665 S - state is changed 11667 NOTE: this function can be called many times for optimizer which was 11671 Copyright 21.12.2012 by Bochkanov Sergey 11672 *************************************************************************/ 11673 void sasstopoptimization(sactiveset* state, ae_state *_state) 11677 state->algostate = 0; 11681 /************************************************************************* 11682 This function recalculates constraints - activates and deactivates them 11683 according to gradient value at current point. Algorithm assumes that we 11684 want to make steepest descent step from current point; constraints are 11685 activated and deactivated in such way that we won't violate any constraint 11686 by steepest descent step. 11688 After call to this function active set is ready to try steepest descent 11689 step (SASDescentDirection-SASExploreDirection-SASMoveTo). 11691 Only already "active
" and "candidate
" elements of ActiveSet are examined; 11692 constraints which are not active are not examined. 11695 State - active set object 11696 GC - array[N], gradient at XC 11699 State - active set object, with new set of constraint 11702 Copyright 26.09.2012 by Bochkanov Sergey 11703 *************************************************************************/ 11704 void sasreactivateconstraints(sactiveset* state, 11705 /* Real */ ae_vector* gc, 11710 ae_assert(state->algostate==1, "SASReactivateConstraints: must be
in optimization
mode", _state); 11711 sactivesets_reactivateconstraints(state, gc, &state->unitdiagonal, _state); 11715 /************************************************************************* 11716 This function recalculates constraints - activates and deactivates them 11717 according to gradient value at current point. 11719 Algorithm assumes that we want to make Quasi-Newton step from current 11720 point with diagonal Quasi-Newton matrix H. Constraints are activated and 11721 deactivated in such way that we won't violate any constraint by step. 11723 After call to this function active set is ready to try preconditioned 11724 steepest descent step (SASDescentDirection-SASExploreDirection-SASMoveTo). 11726 Only already "active
" and "candidate
" elements of ActiveSet are examined; 11727 constraints which are not active are not examined. 11730 State - active set object 11731 GC - array[N], gradient at XC 11734 State - active set object, with new set of constraint 11737 Copyright 26.09.2012 by Bochkanov Sergey 11738 *************************************************************************/ 11739 void sasreactivateconstraintsprec(sactiveset* state, 11740 /* Real */ ae_vector* gc, 11745 ae_assert(state->algostate==1, "SASReactivateConstraintsPrec: must be
in optimization
mode", _state); 11746 sactivesets_reactivateconstraints(state, gc, &state->h, _state); 11750 /************************************************************************* 11751 This function builds three orthonormal basises for current active set: 11752 * P-orthogonal one, which is orthogonalized with inner product 11753 (x,y) = x'*P*y, where P=inv(H) is current preconditioner 11754 * S-orthogonal one, which is orthogonalized with inner product 11755 (x,y) = x'*S'*S*y, where S is diagonal scaling matrix 11756 * I-orthogonal one, which is orthogonalized with standard dot product 11758 NOTE: all sets of orthogonal vectors are guaranteed to have same size. 11759 P-orthogonal basis is built first, I/S-orthogonal basises are forced 11760 to have same number of vectors as P-orthogonal one (padded by zero 11761 vectors if needed). 11763 NOTE: this function tracks changes in active set; first call will result 11764 in reorthogonalization 11767 State - active set object 11768 H - diagonal preconditioner, H[i]>0 11771 State - active set object with new basis 11774 Copyright 20.06.2012 by Bochkanov Sergey 11775 *************************************************************************/ 11776 void sasrebuildbasis(sactiveset* state, ae_state *_state) 11784 ae_int_t nactivelin; 11785 ae_int_t nactivebnd; 11791 if( state->basisisready ) 11798 rmatrixsetlengthatleast(&state->tmpbasis, nec+nic, n+1, _state); 11799 state->basissize = 0; 11800 state->basisisready = ae_true; 11803 * Determine number of active boundary and non-boundary 11804 * constraints, move them to TmpBasis. Quick exit if no 11805 * non-boundary constraints were detected. 11809 for(i=0; i<=nec+nic-1; i++) 11811 if( state->activeset.ptr.p_int[n+i]>0 ) 11813 nactivelin = nactivelin+1; 11816 for(j=0; j<=n-1; j++) 11818 if( state->activeset.ptr.p_int[j]>0 ) 11820 nactivebnd = nactivebnd+1; 11823 if( nactivelin==0 ) 11829 * Orthogonalize linear constraints (inner product is given by preconditioner) 11830 * with respect to each other and boundary ones: 11831 * * normalize all constraints 11832 * * orthogonalize with respect to boundary ones 11834 * * if basisSize+nactivebnd=n - TERMINATE 11835 * * choose largest row from TmpBasis 11836 * * if row norm is too small - TERMINATE 11837 * * add row to basis, normalize 11838 * * remove from TmpBasis, orthogonalize other constraints with respect to this one 11841 for(i=0; i<=nec+nic-1; i++) 11843 if( state->activeset.ptr.p_int[n+i]>0 ) 11845 ae_v_move(&state->tmpbasis.ptr.pp_double[nactivelin][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n)); 11846 nactivelin = nactivelin+1; 11849 for(i=0; i<=nactivelin-1; i++) 11852 for(j=0; j<=n-1; j++) 11854 v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j], _state)/state->h.ptr.p_double[j]; 11856 if( ae_fp_greater(v,0) ) 11858 v = 1/ae_sqrt(v, _state); 11859 for(j=0; j<=n; j++) 11861 state->tmpbasis.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]*v; 11865 for(j=0; j<=n-1; j++) 11867 if( state->activeset.ptr.p_int[j]>0 ) 11869 for(i=0; i<=nactivelin-1; i++) 11871 state->tmpbasis.ptr.pp_double[i][n] = state->tmpbasis.ptr.pp_double[i][n]-state->tmpbasis.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; 11872 state->tmpbasis.ptr.pp_double[i][j] = 0.0; 11876 while(state->basissize+nactivebnd<n) 11880 * Find largest vector, add to basis 11884 for(i=0; i<=nactivelin-1; i++) 11887 for(j=0; j<=n-1; j++) 11889 v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j], _state)/state->h.ptr.p_double[j]; 11891 v = ae_sqrt(v, _state); 11892 if( ae_fp_greater(v,vmax) ) 11898 if( ae_fp_less(vmax,1.0E4*ae_machineepsilon) ) 11903 ae_v_moved(&state->pbasis.ptr.pp_double[state->basissize][0], 1, &state->tmpbasis.ptr.pp_double[kmax][0], 1, ae_v_len(0,n), v); 11904 state->basissize = state->basissize+1; 11907 * Reorthogonalize other vectors with respect to chosen one. 11908 * Remove it from the array. 11910 for(i=0; i<=nactivelin-1; i++) 11915 for(j=0; j<=n-1; j++) 11917 v = v+state->pbasis.ptr.pp_double[state->basissize-1][j]*state->tmpbasis.ptr.pp_double[i][j]/state->h.ptr.p_double[j]; 11919 ae_v_subd(&state->tmpbasis.ptr.pp_double[i][0], 1, &state->pbasis.ptr.pp_double[state->basissize-1][0], 1, ae_v_len(0,n), v); 11922 for(j=0; j<=n; j++) 11924 state->tmpbasis.ptr.pp_double[kmax][j] = 0; 11929 * Orthogonalize linear constraints using traditional dot product 11930 * with respect to each other and boundary ones. 11932 * NOTE: we force basis size to be equal to one which was computed 11933 * at the previous step, with preconditioner-based inner product. 11936 for(i=0; i<=nec+nic-1; i++) 11938 if( state->activeset.ptr.p_int[n+i]>0 ) 11940 ae_v_move(&state->tmpbasis.ptr.pp_double[nactivelin][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n)); 11941 nactivelin = nactivelin+1; 11944 for(i=0; i<=nactivelin-1; i++) 11947 for(j=0; j<=n-1; j++) 11949 v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j], _state); 11951 if( ae_fp_greater(v,0) ) 11953 v = 1/ae_sqrt(v, _state); 11954 for(j=0; j<=n; j++) 11956 state->tmpbasis.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]*v; 11960 for(j=0; j<=n-1; j++) 11962 if( state->activeset.ptr.p_int[j]>0 ) 11964 for(i=0; i<=nactivelin-1; i++) 11966 state->tmpbasis.ptr.pp_double[i][n] = state->tmpbasis.ptr.pp_double[i][n]-state->tmpbasis.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; 11967 state->tmpbasis.ptr.pp_double[i][j] = 0.0; 11971 for(t=0; t<=state->basissize-1; t++) 11975 * Find largest vector, add to basis. 11979 for(i=0; i<=nactivelin-1; i++) 11982 for(j=0; j<=n-1; j++) 11984 v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j], _state); 11986 v = ae_sqrt(v, _state); 11987 if( ae_fp_greater(v,vmax) ) 11993 if( ae_fp_eq(vmax,0) ) 11995 for(j=0; j<=n; j++) 11997 state->ibasis.ptr.pp_double[t][j] = 0.0; 12002 ae_v_moved(&state->ibasis.ptr.pp_double[t][0], 1, &state->tmpbasis.ptr.pp_double[kmax][0], 1, ae_v_len(0,n), v); 12005 * Reorthogonalize other vectors with respect to chosen one. 12006 * Remove it from the array. 12008 for(i=0; i<=nactivelin-1; i++) 12013 for(j=0; j<=n-1; j++) 12015 v = v+state->ibasis.ptr.pp_double[t][j]*state->tmpbasis.ptr.pp_double[i][j]; 12017 ae_v_subd(&state->tmpbasis.ptr.pp_double[i][0], 1, &state->ibasis.ptr.pp_double[t][0], 1, ae_v_len(0,n), v); 12020 for(j=0; j<=n; j++) 12022 state->tmpbasis.ptr.pp_double[kmax][j] = 0; 12027 * Orthogonalize linear constraints using inner product given by 12030 * NOTE: we force basis size to be equal to one which was computed 12031 * with preconditioner-based inner product. 12034 for(i=0; i<=nec+nic-1; i++) 12036 if( state->activeset.ptr.p_int[n+i]>0 ) 12038 ae_v_move(&state->tmpbasis.ptr.pp_double[nactivelin][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n)); 12039 nactivelin = nactivelin+1; 12042 for(i=0; i<=nactivelin-1; i++) 12045 for(j=0; j<=n-1; j++) 12047 v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j]*state->s.ptr.p_double[j], _state); 12049 if( ae_fp_greater(v,0) ) 12051 v = 1/ae_sqrt(v, _state); 12052 for(j=0; j<=n; j++) 12054 state->tmpbasis.ptr.pp_double[i][j] = state->tmpbasis.ptr.pp_double[i][j]*v; 12058 for(j=0; j<=n-1; j++) 12060 if( state->activeset.ptr.p_int[j]>0 ) 12062 for(i=0; i<=nactivelin-1; i++) 12064 state->tmpbasis.ptr.pp_double[i][n] = state->tmpbasis.ptr.pp_double[i][n]-state->tmpbasis.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; 12065 state->tmpbasis.ptr.pp_double[i][j] = 0.0; 12069 for(t=0; t<=state->basissize-1; t++) 12073 * Find largest vector, add to basis. 12077 for(i=0; i<=nactivelin-1; i++) 12080 for(j=0; j<=n-1; j++) 12082 v = v+ae_sqr(state->tmpbasis.ptr.pp_double[i][j]*state->s.ptr.p_double[j], _state); 12084 v = ae_sqrt(v, _state); 12085 if( ae_fp_greater(v,vmax) ) 12091 if( ae_fp_eq(vmax,0) ) 12093 for(j=0; j<=n; j++) 12095 state->sbasis.ptr.pp_double[t][j] = 0.0; 12100 ae_v_moved(&state->sbasis.ptr.pp_double[t][0], 1, &state->tmpbasis.ptr.pp_double[kmax][0], 1, ae_v_len(0,n), v); 12103 * Reorthogonalize other vectors with respect to chosen one. 12104 * Remove it from the array. 12106 for(i=0; i<=nactivelin-1; i++) 12111 for(j=0; j<=n-1; j++) 12113 v = v+state->sbasis.ptr.pp_double[t][j]*state->tmpbasis.ptr.pp_double[i][j]*ae_sqr(state->s.ptr.p_double[j], _state); 12115 ae_v_subd(&state->tmpbasis.ptr.pp_double[i][0], 1, &state->sbasis.ptr.pp_double[t][0], 1, ae_v_len(0,n), v); 12118 for(j=0; j<=n; j++) 12120 state->tmpbasis.ptr.pp_double[kmax][j] = 0; 12126 /************************************************************************* 12127 This subroutine calculates preconditioned descent direction subject to 12128 current active set. 12131 State - active set object 12132 G - array[N], gradient 12133 H - array[N], Hessian matrix 12134 HA - active constraints orthogonalized in such way 12135 that HA*inv(H)*HA'= I. 12136 Normalize- whether we need normalized descent or not 12137 D - possibly preallocated buffer; automatically resized. 12140 D - descent direction projected onto current active set. 12141 Components of D which correspond to active boundary 12142 constraints are forced to be exactly zero. 12143 In case D is non-zero and Normalize is True, it is 12144 normalized to have unit norm. 12147 Copyright 21.12.2012 by Bochkanov Sergey 12148 *************************************************************************/ 12149 static void sactivesets_constraineddescent(sactiveset* state, 12150 /* Real */ ae_vector* g, 12151 /* Real */ ae_vector* h, 12152 /* Real */ ae_matrix* ha, 12154 /* Real */ ae_vector* d, 12164 ae_assert(state->algostate==1, "SAS:
internal error in ConstrainedDescent() - not
in optimization
mode", _state); 12165 ae_assert(state->basisisready, "SAS:
internal error in ConstrainedDescent() - no basis
", _state); 12167 rvectorsetlengthatleast(d, n, _state); 12170 * Calculate preconditioned constrained descent direction: 12172 * d := -inv(H)*( g - HA'*(HA*inv(H)*g) ) 12174 * Formula above always gives direction which is orthogonal to rows of HA. 12175 * You can verify it by multiplication of both sides by HA[i] (I-th row), 12176 * taking into account that HA*inv(H)*HA'= I (by definition of HA - it is 12177 * orthogonal basis with inner product given by inv(H)). 12180 for(i=0; i<=n-1; i++) 12182 if( state->activeset.ptr.p_int[i]>0 ) 12184 d->ptr.p_double[i] = 0; 12185 nactive = nactive+1; 12189 d->ptr.p_double[i] = g->ptr.p_double[i]; 12192 for(i=0; i<=state->basissize-1; i++) 12195 for(j=0; j<=n-1; j++) 12197 v = v+ha->ptr.pp_double[i][j]*d->ptr.p_double[j]/h->ptr.p_double[j]; 12199 ae_v_subd(&d->ptr.p_double[0], 1, &ha->ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); 12200 nactive = nactive+1; 12203 for(i=0; i<=n-1; i++) 12205 if( state->activeset.ptr.p_int[i]>0 ) 12207 d->ptr.p_double[i] = 0; 12211 d->ptr.p_double[i] = -d->ptr.p_double[i]/h->ptr.p_double[i]; 12212 v = v+ae_sqr(d->ptr.p_double[i], _state); 12215 v = ae_sqrt(v, _state); 12219 for(i=0; i<=n-1; i++) 12221 d->ptr.p_double[i] = 0; 12224 if( normalize&&ae_fp_greater(v,0) ) 12226 for(i=0; i<=n-1; i++) 12228 d->ptr.p_double[i] = d->ptr.p_double[i]/v; 12234 /************************************************************************* 12235 This function recalculates constraints - activates and deactivates them 12236 according to gradient value at current point. 12238 Algorithm assumes that we want to make Quasi-Newton step from current 12239 point with diagonal Quasi-Newton matrix H. Constraints are activated and 12240 deactivated in such way that we won't violate any constraint by step. 12242 Only already "active
" and "candidate
" elements of ActiveSet are examined; 12243 constraints which are not active are not examined. 12246 State - active set object 12247 GC - array[N], gradient at XC 12248 H - array[N], Hessian matrix 12251 State - active set object, with new set of constraint 12254 Copyright 26.09.2012 by Bochkanov Sergey 12255 *************************************************************************/ 12256 static void sactivesets_reactivateconstraints(sactiveset* state, 12257 /* Real */ ae_vector* gc, 12258 /* Real */ ae_vector* h, 12269 ae_int_t nactivebnd; 12270 ae_int_t nactivelin; 12271 ae_int_t nactiveconstraints; 12275 ae_assert(state->algostate==1, "SASReactivateConstraintsPrec: must be
in optimization
mode", _state); 12283 state->basisisready = ae_false; 12286 * Handle important special case - no linear constraints, 12287 * only boundary constraints are present 12291 for(i=0; i<=n-1; i++) 12293 if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 12295 state->activeset.ptr.p_int[i] = 1; 12298 if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_greater_eq(gc->ptr.p_double[i],0) ) 12300 state->activeset.ptr.p_int[i] = 1; 12303 if( (state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_less_eq(gc->ptr.p_double[i],0) ) 12305 state->activeset.ptr.p_int[i] = 1; 12308 state->activeset.ptr.p_int[i] = -1; 12315 * Allocate temporaries. 12317 rvectorsetlengthatleast(&state->rctmpg, n, _state); 12318 rvectorsetlengthatleast(&state->rctmprightpart, n, _state); 12319 rvectorsetlengthatleast(&state->rctmps, n, _state); 12320 rmatrixsetlengthatleast(&state->rctmpdense0, n, nec+nic, _state); 12321 rmatrixsetlengthatleast(&state->rctmpdense1, n, nec+nic, _state); 12322 bvectorsetlengthatleast(&state->rctmpisequality, n+nec+nic, _state); 12323 ivectorsetlengthatleast(&state->rctmpconstraintidx, n+nec+nic, _state); 12326 * Calculate descent direction 12328 ae_v_moveneg(&state->rctmpg.ptr.p_double[0], 1, &gc->ptr.p_double[0], 1, ae_v_len(0,n-1)); 12331 * Determine candidates to the active set. 12333 * After this block constraints become either "inactive
" (ActiveSet[i]<0) 12334 * or "candidates
" (ActiveSet[i]=0). Previously active constraints always 12335 * become "candidates
". 12337 for(i=0; i<=n+nec+nic-1; i++) 12339 if( state->activeset.ptr.p_int[i]>0 ) 12341 state->activeset.ptr.p_int[i] = 0; 12345 state->activeset.ptr.p_int[i] = -1; 12348 nactiveconstraints = 0; 12351 for(i=0; i<=n-1; i++) 12355 * Activate boundary constraints: 12356 * * copy constraint index to RCTmpConstraintIdx 12357 * * set corresponding element of ActiveSet[] to "candidate
" 12358 * * fill RCTmpS by either +1 (lower bound) or -1 (upper bound) 12359 * * set RCTmpIsEquality to False (BndL<BndU) or True (BndL=BndU) 12360 * * increase counters 12362 if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 12366 * Equality constraint is activated 12368 state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = i; 12369 state->activeset.ptr.p_int[i] = 0; 12370 state->rctmps.ptr.p_double[i] = 1.0; 12371 state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ae_true; 12372 nactiveconstraints = nactiveconstraints+1; 12373 nactivebnd = nactivebnd+1; 12376 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) 12380 * Lower bound is activated 12382 state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = i; 12383 state->activeset.ptr.p_int[i] = 0; 12384 state->rctmps.ptr.p_double[i] = -1.0; 12385 state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ae_false; 12386 nactiveconstraints = nactiveconstraints+1; 12387 nactivebnd = nactivebnd+1; 12390 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 12394 * Upper bound is activated 12396 state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = i; 12397 state->activeset.ptr.p_int[i] = 0; 12398 state->rctmps.ptr.p_double[i] = 1.0; 12399 state->rctmpisequality.ptr.p_bool[nactiveconstraints] = ae_false; 12400 nactiveconstraints = nactiveconstraints+1; 12401 nactivebnd = nactivebnd+1; 12405 for(i=0; i<=nec+nic-1; i++) 12411 * Inequality constraints are skipped if we too far away from 12415 v = -state->cleic.ptr.pp_double[i][n]; 12416 for(j=0; j<=n-1; j++) 12418 v = v+state->cleic.ptr.pp_double[i][j]*state->xc.ptr.p_double[j]; 12419 rowscale = ae_maxreal(rowscale, ae_fabs(state->cleic.ptr.pp_double[i][j]*state->s.ptr.p_double[j], _state), _state); 12421 if( ae_fp_less_eq(v,-1.0E5*ae_machineepsilon*rowscale) ) 12425 * NOTE: it is important to check for non-strict inequality 12426 * because we have to correctly handle zero constraint 12432 ae_v_move(&state->rctmpdense0.ptr.pp_double[0][nactivelin], state->rctmpdense0.stride, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); 12433 state->rctmpconstraintidx.ptr.p_int[nactiveconstraints] = n+i; 12434 state->activeset.ptr.p_int[n+i] = 0; 12435 state->rctmpisequality.ptr.p_bool[nactiveconstraints] = i<nec; 12436 nactiveconstraints = nactiveconstraints+1; 12437 nactivelin = nactivelin+1; 12441 * Skip if no "candidate
" constraints was found 12443 if( nactiveconstraints==0 ) 12445 for(i=0; i<=n-1; i++) 12447 if( (state->hasbndl.ptr.p_bool[i]&&state->hasbndu.ptr.p_bool[i])&&ae_fp_eq(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 12449 state->activeset.ptr.p_int[i] = 1; 12452 if( (state->hasbndl.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndl.ptr.p_double[i]))&&ae_fp_greater_eq(gc->ptr.p_double[i],0) ) 12454 state->activeset.ptr.p_int[i] = 1; 12457 if( (state->hasbndu.ptr.p_bool[i]&&ae_fp_eq(state->xc.ptr.p_double[i],state->bndu.ptr.p_double[i]))&&ae_fp_less_eq(gc->ptr.p_double[i],0) ) 12459 state->activeset.ptr.p_int[i] = 1; 12469 * APPROACH TO CONSTRAINTS ACTIVATION/DEACTIVATION 12471 * We have NActiveConstraints "candidates
": NActiveBnd boundary candidates, 12472 * NActiveLin linear candidates. Indexes of boundary constraints are stored 12473 * in RCTmpConstraintIdx[0:NActiveBnd-1], indexes of linear ones are stored 12474 * in RCTmpConstraintIdx[NActiveBnd:NActiveBnd+NActiveLin-1]. Some of the 12475 * constraints are equality ones, some are inequality - as specified by 12476 * RCTmpIsEquality[i]. 12478 * Now we have to determine active subset of "candidates
" set. In order to 12479 * do so we solve following constrained minimization problem: 12481 * min ( SUM(lambda[i]*A[i]) + G ) 12484 * * G is a gradient (column vector) 12485 * * A[i] is a column vector, linear (left) part of I-th constraint. 12486 * I=0..NActiveConstraints-1, first NActiveBnd elements of A are just 12487 * subset of identity matrix (boundary constraints), next NActiveLin 12488 * elements are subset of rows of the matrix of general linear constraints. 12489 * * lambda[i] is a Lagrange multiplier corresponding to I-th constraint 12491 * NOTE: for preconditioned setting A is replaced by A*H^(-0.5), G is 12492 * replaced by G*H^(-0.5). We apply this scaling at the last stage, 12493 * before passing data to NNLS solver. 12495 * Minimization is performed subject to non-negativity constraints on 12496 * lambda[i] corresponding to inequality constraints. Inequality constraints 12497 * which correspond to non-zero lambda are activated, equality constraints 12498 * are always considered active. 12500 * Informally speaking, we "decompose
" descent direction -G and represent 12501 * it as sum of constraint vectors and "residual
" part (which is equal to 12502 * the actual descent direction subject to constraints). 12504 * SOLUTION OF THE NNLS PROBLEM 12506 * We solve this optimization problem with Non-Negative Least Squares solver, 12507 * which can efficiently solve least squares problems of the form 12510 * min ( [ | ]*x-b ) s.t. non-negativity constraints on some x[i] 12513 * In order to use this solver we have to rearrange rows of A[] and G in 12514 * such way that first NActiveBnd columns of A store identity matrix (before 12515 * sorting non-zero elements are randomly distributed in the first NActiveBnd 12516 * columns of A, during sorting we move them to first NActiveBnd rows). 12518 * Then we create instance of NNLS solver (we reuse instance left from the 12519 * previous run of the optimization problem) and solve NNLS problem. 12523 for(i=0; i<=n-1; i++) 12525 if( state->activeset.ptr.p_int[i]>=0 ) 12527 v = 1/ae_sqrt(h->ptr.p_double[i], _state); 12528 for(j=0; j<=nactivelin-1; j++) 12530 state->rctmpdense1.ptr.pp_double[idx0][j] = state->rctmpdense0.ptr.pp_double[i][j]/state->rctmps.ptr.p_double[i]*v; 12532 state->rctmprightpart.ptr.p_double[idx0] = state->rctmpg.ptr.p_double[i]/state->rctmps.ptr.p_double[i]*v; 12537 v = 1/ae_sqrt(h->ptr.p_double[i], _state); 12538 for(j=0; j<=nactivelin-1; j++) 12540 state->rctmpdense1.ptr.pp_double[idx1][j] = state->rctmpdense0.ptr.pp_double[i][j]*v; 12542 state->rctmprightpart.ptr.p_double[idx1] = state->rctmpg.ptr.p_double[i]*v; 12546 snnlsinit(n, nec+nic, n, &state->solver, _state); 12547 snnlssetproblem(&state->solver, &state->rctmpdense1, &state->rctmprightpart, nactivebnd, nactiveconstraints-nactivebnd, n, _state); 12548 for(i=0; i<=nactiveconstraints-1; i++) 12550 if( state->rctmpisequality.ptr.p_bool[i] ) 12552 snnlsdropnnc(&state->solver, i, _state); 12555 snnlssolve(&state->solver, &state->rctmplambdas, _state); 12558 * After solution of the problem we activate equality constraints (always active) 12559 * and inequality constraints with non-zero Lagrange multipliers. Then we reorthogonalize 12560 * active constraints. 12562 for(i=0; i<=nactiveconstraints-1; i++) 12564 if( state->rctmpisequality.ptr.p_bool[i]||ae_fp_greater(state->rctmplambdas.ptr.p_double[i],0) ) 12566 state->activeset.ptr.p_int[state->rctmpconstraintidx.ptr.p_int[i]] = 1; 12570 state->activeset.ptr.p_int[state->rctmpconstraintidx.ptr.p_int[i]] = 0; 12573 sasrebuildbasis(state, _state); 12577 ae_bool _sactiveset_init(void* _p, ae_state *_state, ae_bool make_automatic) 12579 sactiveset *p = (sactiveset*)_p; 12580 ae_touch_ptr((void*)p); 12581 if( !ae_vector_init(&p->xc, 0, DT_REAL, _state, make_automatic) ) 12583 if( !ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic) ) 12585 if( !ae_vector_init(&p->h, 0, DT_REAL, _state, make_automatic) ) 12587 if( !ae_vector_init(&p->activeset, 0, DT_INT, _state, make_automatic) ) 12589 if( !ae_matrix_init(&p->sbasis, 0, 0, DT_REAL, _state, make_automatic) ) 12591 if( !ae_matrix_init(&p->pbasis, 0, 0, DT_REAL, _state, make_automatic) ) 12593 if( !ae_matrix_init(&p->ibasis, 0, 0, DT_REAL, _state, make_automatic) ) 12595 if( !ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic) ) 12597 if( !ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic) ) 12599 if( !ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic) ) 12601 if( !ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic) ) 12603 if( !ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic) ) 12605 if( !ae_vector_init(&p->mtx, 0, DT_REAL, _state, make_automatic) ) 12607 if( !ae_vector_init(&p->mtas, 0, DT_INT, _state, make_automatic) ) 12609 if( !ae_vector_init(&p->cdtmp, 0, DT_REAL, _state, make_automatic) ) 12611 if( !ae_vector_init(&p->corrtmp, 0, DT_REAL, _state, make_automatic) ) 12613 if( !ae_vector_init(&p->unitdiagonal, 0, DT_REAL, _state, make_automatic) ) 12615 if( !_snnlssolver_init(&p->solver, _state, make_automatic) ) 12617 if( !ae_vector_init(&p->scntmp, 0, DT_REAL, _state, make_automatic) ) 12619 if( !ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic) ) 12621 if( !ae_vector_init(&p->tmpfeas, 0, DT_REAL, _state, make_automatic) ) 12623 if( !ae_matrix_init(&p->tmpm0, 0, 0, DT_REAL, _state, make_automatic) ) 12625 if( !ae_vector_init(&p->rctmps, 0, DT_REAL, _state, make_automatic) ) 12627 if( !ae_vector_init(&p->rctmpg, 0, DT_REAL, _state, make_automatic) ) 12629 if( !ae_vector_init(&p->rctmprightpart, 0, DT_REAL, _state, make_automatic) ) 12631 if( !ae_matrix_init(&p->rctmpdense0, 0, 0, DT_REAL, _state, make_automatic) ) 12633 if( !ae_matrix_init(&p->rctmpdense1, 0, 0, DT_REAL, _state, make_automatic) ) 12635 if( !ae_vector_init(&p->rctmpisequality, 0, DT_BOOL, _state, make_automatic) ) 12637 if( !ae_vector_init(&p->rctmpconstraintidx, 0, DT_INT, _state, make_automatic) ) 12639 if( !ae_vector_init(&p->rctmplambdas, 0, DT_REAL, _state, make_automatic) ) 12641 if( !ae_matrix_init(&p->tmpbasis, 0, 0, DT_REAL, _state, make_automatic) ) 12647 ae_bool _sactiveset_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 12649 sactiveset *dst = (sactiveset*)_dst; 12650 sactiveset *src = (sactiveset*)_src; 12652 dst->algostate = src->algostate; 12653 if( !ae_vector_init_copy(&dst->xc, &src->xc, _state, make_automatic) ) 12655 dst->hasxc = src->hasxc; 12656 if( !ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic) ) 12658 if( !ae_vector_init_copy(&dst->h, &src->h, _state, make_automatic) ) 12660 if( !ae_vector_init_copy(&dst->activeset, &src->activeset, _state, make_automatic) ) 12662 dst->basisisready = src->basisisready; 12663 if( !ae_matrix_init_copy(&dst->sbasis, &src->sbasis, _state, make_automatic) ) 12665 if( !ae_matrix_init_copy(&dst->pbasis, &src->pbasis, _state, make_automatic) ) 12667 if( !ae_matrix_init_copy(&dst->ibasis, &src->ibasis, _state, make_automatic) ) 12669 dst->basissize = src->basissize; 12670 dst->constraintschanged = src->constraintschanged; 12671 if( !ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic) ) 12673 if( !ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic) ) 12675 if( !ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic) ) 12677 if( !ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic) ) 12679 if( !ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic) ) 12681 dst->nec = src->nec; 12682 dst->nic = src->nic; 12683 if( !ae_vector_init_copy(&dst->mtx, &src->mtx, _state, make_automatic) ) 12685 if( !ae_vector_init_copy(&dst->mtas, &src->mtas, _state, make_automatic) ) 12687 if( !ae_vector_init_copy(&dst->cdtmp, &src->cdtmp, _state, make_automatic) ) 12689 if( !ae_vector_init_copy(&dst->corrtmp, &src->corrtmp, _state, make_automatic) ) 12691 if( !ae_vector_init_copy(&dst->unitdiagonal, &src->unitdiagonal, _state, make_automatic) ) 12693 if( !_snnlssolver_init_copy(&dst->solver, &src->solver, _state, make_automatic) ) 12695 if( !ae_vector_init_copy(&dst->scntmp, &src->scntmp, _state, make_automatic) ) 12697 if( !ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic) ) 12699 if( !ae_vector_init_copy(&dst->tmpfeas, &src->tmpfeas, _state, make_automatic) ) 12701 if( !ae_matrix_init_copy(&dst->tmpm0, &src->tmpm0, _state, make_automatic) ) 12703 if( !ae_vector_init_copy(&dst->rctmps, &src->rctmps, _state, make_automatic) ) 12705 if( !ae_vector_init_copy(&dst->rctmpg, &src->rctmpg, _state, make_automatic) ) 12707 if( !ae_vector_init_copy(&dst->rctmprightpart, &src->rctmprightpart, _state, make_automatic) ) 12709 if( !ae_matrix_init_copy(&dst->rctmpdense0, &src->rctmpdense0, _state, make_automatic) ) 12711 if( !ae_matrix_init_copy(&dst->rctmpdense1, &src->rctmpdense1, _state, make_automatic) ) 12713 if( !ae_vector_init_copy(&dst->rctmpisequality, &src->rctmpisequality, _state, make_automatic) ) 12715 if( !ae_vector_init_copy(&dst->rctmpconstraintidx, &src->rctmpconstraintidx, _state, make_automatic) ) 12717 if( !ae_vector_init_copy(&dst->rctmplambdas, &src->rctmplambdas, _state, make_automatic) ) 12719 if( !ae_matrix_init_copy(&dst->tmpbasis, &src->tmpbasis, _state, make_automatic) ) 12725 void _sactiveset_clear(void* _p) 12727 sactiveset *p = (sactiveset*)_p; 12728 ae_touch_ptr((void*)p); 12729 ae_vector_clear(&p->xc); 12730 ae_vector_clear(&p->s); 12731 ae_vector_clear(&p->h); 12732 ae_vector_clear(&p->activeset); 12733 ae_matrix_clear(&p->sbasis); 12734 ae_matrix_clear(&p->pbasis); 12735 ae_matrix_clear(&p->ibasis); 12736 ae_vector_clear(&p->hasbndl); 12737 ae_vector_clear(&p->hasbndu); 12738 ae_vector_clear(&p->bndl); 12739 ae_vector_clear(&p->bndu); 12740 ae_matrix_clear(&p->cleic); 12741 ae_vector_clear(&p->mtx); 12742 ae_vector_clear(&p->mtas); 12743 ae_vector_clear(&p->cdtmp); 12744 ae_vector_clear(&p->corrtmp); 12745 ae_vector_clear(&p->unitdiagonal); 12746 _snnlssolver_clear(&p->solver); 12747 ae_vector_clear(&p->scntmp); 12748 ae_vector_clear(&p->tmp0); 12749 ae_vector_clear(&p->tmpfeas); 12750 ae_matrix_clear(&p->tmpm0); 12751 ae_vector_clear(&p->rctmps); 12752 ae_vector_clear(&p->rctmpg); 12753 ae_vector_clear(&p->rctmprightpart); 12754 ae_matrix_clear(&p->rctmpdense0); 12755 ae_matrix_clear(&p->rctmpdense1); 12756 ae_vector_clear(&p->rctmpisequality); 12757 ae_vector_clear(&p->rctmpconstraintidx); 12758 ae_vector_clear(&p->rctmplambdas); 12759 ae_matrix_clear(&p->tmpbasis); 12763 void _sactiveset_destroy(void* _p) 12765 sactiveset *p = (sactiveset*)_p; 12766 ae_touch_ptr((void*)p); 12767 ae_vector_destroy(&p->xc); 12768 ae_vector_destroy(&p->s); 12769 ae_vector_destroy(&p->h); 12770 ae_vector_destroy(&p->activeset); 12771 ae_matrix_destroy(&p->sbasis); 12772 ae_matrix_destroy(&p->pbasis); 12773 ae_matrix_destroy(&p->ibasis); 12774 ae_vector_destroy(&p->hasbndl); 12775 ae_vector_destroy(&p->hasbndu); 12776 ae_vector_destroy(&p->bndl); 12777 ae_vector_destroy(&p->bndu); 12778 ae_matrix_destroy(&p->cleic); 12779 ae_vector_destroy(&p->mtx); 12780 ae_vector_destroy(&p->mtas); 12781 ae_vector_destroy(&p->cdtmp); 12782 ae_vector_destroy(&p->corrtmp); 12783 ae_vector_destroy(&p->unitdiagonal); 12784 _snnlssolver_destroy(&p->solver); 12785 ae_vector_destroy(&p->scntmp); 12786 ae_vector_destroy(&p->tmp0); 12787 ae_vector_destroy(&p->tmpfeas); 12788 ae_matrix_destroy(&p->tmpm0); 12789 ae_vector_destroy(&p->rctmps); 12790 ae_vector_destroy(&p->rctmpg); 12791 ae_vector_destroy(&p->rctmprightpart); 12792 ae_matrix_destroy(&p->rctmpdense0); 12793 ae_matrix_destroy(&p->rctmpdense1); 12794 ae_vector_destroy(&p->rctmpisequality); 12795 ae_vector_destroy(&p->rctmpconstraintidx); 12796 ae_vector_destroy(&p->rctmplambdas); 12797 ae_matrix_destroy(&p->tmpbasis); 12803 /************************************************************************* 12804 NONLINEAR CONJUGATE GRADIENT METHOD 12807 The subroutine minimizes function F(x) of N arguments by using one of the 12808 nonlinear conjugate gradient methods. 12810 These CG methods are globally convergent (even on non-convex functions) as 12811 long as grad(f) is Lipschitz continuous in a some neighborhood of the 12812 L = { x : f(x)<=f(x0) }. 12816 Algorithm will request following information during its operation: 12817 * function value F and its gradient G (simultaneously) at given point X 12821 1. User initializes algorithm state with MinCGCreate() call 12822 2. User tunes solver parameters with MinCGSetCond(), MinCGSetStpMax() and 12824 3. User calls MinCGOptimize() function which takes algorithm state and 12825 pointer (delegate, etc.) to callback function which calculates F/G. 12826 4. User calls MinCGResults() to get solution 12827 5. Optionally, user may call MinCGRestartFrom() to solve another problem 12828 with same N but another starting point and/or another function. 12829 MinCGRestartFrom() allows to reuse already initialized structure. 12833 N - problem dimension, N>0: 12834 * if given, only leading N elements of X are used 12835 * if not given, automatically determined from size of X 12836 X - starting point, array[0..N-1]. 12839 State - structure which stores algorithm state 12842 Copyright 25.03.2010 by Bochkanov Sergey 12843 *************************************************************************/ 12844 void mincgcreate(ae_int_t n, 12845 /* Real */ ae_vector* x, 12850 _mincgstate_clear(state); 12852 ae_assert(n>=1, "MinCGCreate: N too small!
", _state); 12853 ae_assert(x->cnt>=n, "MinCGCreate: Length(X)<N!
", _state); 12854 ae_assert(isfinitevector(x, n, _state), "MinCGCreate: X contains infinite
or NaN values!
", _state); 12855 mincg_mincginitinternal(n, 0.0, state, _state); 12856 mincgrestartfrom(state, x, _state); 12860 /************************************************************************* 12861 The subroutine is finite difference variant of MinCGCreate(). It uses 12862 finite differences in order to differentiate target function. 12864 Description below contains information which is specific to this function 12865 only. We recommend to read comments on MinCGCreate() in order to get more 12866 information about creation of CG optimizer. 12869 N - problem dimension, N>0: 12870 * if given, only leading N elements of X are used 12871 * if not given, automatically determined from size of X 12872 X - starting point, array[0..N-1]. 12873 DiffStep- differentiation step, >0 12876 State - structure which stores algorithm state 12879 1. algorithm uses 4-point central formula for differentiation. 12880 2. differentiation step along I-th axis is equal to DiffStep*S[I] where 12881 S[] is scaling vector which can be set by MinCGSetScale() call. 12882 3. we recommend you to use moderate values of differentiation step. Too 12883 large step will result in too large truncation errors, while too small 12884 step will result in too large numerical errors. 1.0E-6 can be good 12885 value to start with. 12886 4. Numerical differentiation is very inefficient - one gradient 12887 calculation needs 4*N function evaluations. This function will work for 12888 any N - either small (1...10), moderate (10...100) or large (100...). 12889 However, performance penalty will be too severe for any N's except for 12891 We should also say that code which relies on numerical differentiation 12892 is less robust and precise. L-BFGS needs exact gradient values. 12893 Imprecise gradient may slow down convergence, especially on highly 12894 nonlinear problems. 12895 Thus we recommend to use this function for fast prototyping on small- 12896 dimensional problems only, and to implement analytical gradient as soon 12900 Copyright 16.05.2011 by Bochkanov Sergey 12901 *************************************************************************/ 12902 void mincgcreatef(ae_int_t n, 12903 /* Real */ ae_vector* x, 12909 _mincgstate_clear(state); 12911 ae_assert(n>=1, "MinCGCreateF: N too small!
", _state); 12912 ae_assert(x->cnt>=n, "MinCGCreateF: Length(X)<N!
", _state); 12913 ae_assert(isfinitevector(x, n, _state), "MinCGCreateF: X contains infinite
or NaN values!
", _state); 12914 ae_assert(ae_isfinite(diffstep, _state), "MinCGCreateF: DiffStep is infinite
or NaN!
", _state); 12915 ae_assert(ae_fp_greater(diffstep,0), "MinCGCreateF: DiffStep is non-positive!
", _state); 12916 mincg_mincginitinternal(n, diffstep, state, _state); 12917 mincgrestartfrom(state, x, _state); 12921 /************************************************************************* 12922 This function sets stopping conditions for CG optimization algorithm. 12925 State - structure which stores algorithm state 12927 The subroutine finishes its work if the condition 12928 |v|<EpsG is satisfied, where: 12929 * |.| means Euclidian norm 12930 * v - scaled gradient vector, v[i]=g[i]*s[i] 12932 * s - scaling coefficients set by MinCGSetScale() 12934 The subroutine finishes its work if on k+1-th iteration 12935 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} 12938 The subroutine finishes its work if on k+1-th iteration 12939 the condition |v|<=EpsX is fulfilled, where: 12940 * |.| means Euclidian norm 12941 * v - scaled step vector, v[i]=dx[i]/s[i] 12942 * dx - ste pvector, dx=X(k+1)-X(k) 12943 * s - scaling coefficients set by MinCGSetScale() 12944 MaxIts - maximum number of iterations. If MaxIts=0, the number of 12945 iterations is unlimited. 12947 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to 12948 automatic stopping criterion selection (small EpsX). 12951 Copyright 02.04.2010 by Bochkanov Sergey 12952 *************************************************************************/ 12953 void mincgsetcond(mincgstate* state, 12962 ae_assert(ae_isfinite(epsg, _state), "MinCGSetCond: EpsG is not finite number!
", _state); 12963 ae_assert(ae_fp_greater_eq(epsg,0), "MinCGSetCond: negative EpsG!
", _state); 12964 ae_assert(ae_isfinite(epsf, _state), "MinCGSetCond: EpsF is not finite number!
", _state); 12965 ae_assert(ae_fp_greater_eq(epsf,0), "MinCGSetCond: negative EpsF!
", _state); 12966 ae_assert(ae_isfinite(epsx, _state), "MinCGSetCond: EpsX is not finite number!
", _state); 12967 ae_assert(ae_fp_greater_eq(epsx,0), "MinCGSetCond: negative EpsX!
", _state); 12968 ae_assert(maxits>=0, "MinCGSetCond: negative MaxIts!
", _state); 12969 if( ((ae_fp_eq(epsg,0)&&ae_fp_eq(epsf,0))&&ae_fp_eq(epsx,0))&&maxits==0 ) 12973 state->epsg = epsg; 12974 state->epsf = epsf; 12975 state->epsx = epsx; 12976 state->maxits = maxits; 12980 /************************************************************************* 12981 This function sets scaling coefficients for CG optimizer. 12983 ALGLIB optimizers use scaling matrices to test stopping conditions (step 12984 size and gradient are scaled before comparison with tolerances). Scale of 12985 the I-th variable is a translation invariant measure of: 12986 a) "how large
" the variable is 12987 b) how large the step should be to make significant changes in the function 12989 Scaling is also used by finite difference variant of CG optimizer - step 12990 along I-th axis is equal to DiffStep*S[I]. 12992 In most optimizers (and in the CG too) scaling is NOT a form of 12993 preconditioning. It just affects stopping conditions. You should set 12994 preconditioner by separate call to one of the MinCGSetPrec...() functions. 12996 There is special preconditioning mode, however, which uses scaling 12997 coefficients to form diagonal preconditioning matrix. You can turn this 12998 mode on, if you want. But you should understand that scaling is not the 12999 same thing as preconditioning - these are two different, although related 13000 forms of tuning solver. 13003 State - structure stores algorithm state 13004 S - array[N], non-zero scaling coefficients 13005 S[i] may be negative, sign doesn't matter. 13008 Copyright 14.01.2011 by Bochkanov Sergey 13009 *************************************************************************/ 13010 void mincgsetscale(mincgstate* state, 13011 /* Real */ ae_vector* s, 13017 ae_assert(s->cnt>=state->n, "MinCGSetScale: Length(S)<N
", _state); 13018 for(i=0; i<=state->n-1; i++) 13020 ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinCGSetScale: S contains infinite
or NAN elements
", _state); 13021 ae_assert(ae_fp_neq(s->ptr.p_double[i],0), "MinCGSetScale: S contains
zero elements
", _state); 13022 state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); 13027 /************************************************************************* 13028 This function turns on/off reporting. 13031 State - structure which stores algorithm state 13032 NeedXRep- whether iteration reports are needed or not 13034 If NeedXRep is True, algorithm will call rep() callback function if it is 13035 provided to MinCGOptimize(). 13038 Copyright 02.04.2010 by Bochkanov Sergey 13039 *************************************************************************/ 13040 void mincgsetxrep(mincgstate* state, ae_bool needxrep, ae_state *_state) 13044 state->xrep = needxrep; 13048 /************************************************************************* 13049 This function turns on/off line search reports. 13050 These reports are described in more details in developer-only comments on 13054 State - structure which stores algorithm state 13055 NeedDRep- whether line search reports are needed or not 13057 This function is intended for private use only. Turning it on artificially 13058 may cause program failure. 13061 Copyright 02.04.2010 by Bochkanov Sergey 13062 *************************************************************************/ 13063 void mincgsetdrep(mincgstate* state, ae_bool needdrep, ae_state *_state) 13067 state->drep = needdrep; 13071 /************************************************************************* 13072 This function sets CG algorithm. 13075 State - structure which stores algorithm state 13076 CGType - algorithm type: 13077 * -1 automatic selection of the best algorithm 13078 * 0 DY (Dai and Yuan) algorithm 13079 * 1 Hybrid DY-HS algorithm 13082 Copyright 02.04.2010 by Bochkanov Sergey 13083 *************************************************************************/ 13084 void mincgsetcgtype(mincgstate* state, ae_int_t cgtype, ae_state *_state) 13088 ae_assert(cgtype>=-1&&cgtype<=1, "MinCGSetCGType: incorrect CGType!
", _state); 13093 state->cgtype = cgtype; 13097 /************************************************************************* 13098 This function sets maximum step length 13101 State - structure which stores algorithm state 13102 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't 13103 want to limit step length. 13105 Use this subroutine when you optimize target function which contains exp() 13106 or other fast growing functions, and optimization algorithm makes too 13107 large steps which leads to overflow. This function allows us to reject 13108 steps that are too large (and therefore expose us to the possible 13109 overflow) without actually calculating function value at the x+stp*d. 13112 Copyright 02.04.2010 by Bochkanov Sergey 13113 *************************************************************************/ 13114 void mincgsetstpmax(mincgstate* state, double stpmax, ae_state *_state) 13118 ae_assert(ae_isfinite(stpmax, _state), "MinCGSetStpMax: StpMax is not finite!
", _state); 13119 ae_assert(ae_fp_greater_eq(stpmax,0), "MinCGSetStpMax: StpMax<0!
", _state); 13120 state->stpmax = stpmax; 13124 /************************************************************************* 13125 This function allows to suggest initial step length to the CG algorithm. 13127 Suggested step length is used as starting point for the line search. It 13128 can be useful when you have badly scaled problem, i.e. when ||grad|| 13129 (which is used as initial estimate for the first step) is many orders of 13130 magnitude different from the desired step. 13132 Line search may fail on such problems without good estimate of initial 13133 step length. Imagine, for example, problem with ||grad||=10^50 and desired 13134 step equal to 0.1 Line search function will use 10^50 as initial step, 13135 then it will decrease step length by 2 (up to 20 attempts) and will get 13136 10^44, which is still too large. 13138 This function allows us to tell than line search should be started from 13139 some moderate step length, like 1.0, so algorithm will be able to detect 13140 desired step length in a several searches. 13142 Default behavior (when no step is suggested) is to use preconditioner, if 13143 it is available, to generate initial estimate of step length. 13145 This function influences only first iteration of algorithm. It should be 13146 called between MinCGCreate/MinCGRestartFrom() call and MinCGOptimize call. 13147 Suggested step is ignored if you have preconditioner. 13150 State - structure used to store algorithm state. 13151 Stp - initial estimate of the step length. 13152 Can be zero (no estimate). 13155 Copyright 30.07.2010 by Bochkanov Sergey 13156 *************************************************************************/ 13157 void mincgsuggeststep(mincgstate* state, double stp, ae_state *_state) 13161 ae_assert(ae_isfinite(stp, _state), "MinCGSuggestStep: Stp is infinite
or NAN
", _state); 13162 ae_assert(ae_fp_greater_eq(stp,0), "MinCGSuggestStep: Stp<0
", _state); 13163 state->suggestedstep = stp; 13167 /************************************************************************* 13168 Modification of the preconditioner: preconditioning is turned off. 13171 State - structure which stores algorithm state 13173 NOTE: you can change preconditioner "on the fly
", during algorithm 13177 Copyright 13.10.2010 by Bochkanov Sergey 13178 *************************************************************************/ 13179 void mincgsetprecdefault(mincgstate* state, ae_state *_state) 13183 state->prectype = 0; 13184 state->innerresetneeded = ae_true; 13188 /************************************************************************* 13189 Modification of the preconditioner: diagonal of approximate Hessian is 13193 State - structure which stores algorithm state 13194 D - diagonal of the approximate Hessian, array[0..N-1], 13195 (if larger, only leading N elements are used). 13197 NOTE: you can change preconditioner "on the fly
", during algorithm 13200 NOTE 2: D[i] should be positive. Exception will be thrown otherwise. 13202 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. 13205 Copyright 13.10.2010 by Bochkanov Sergey 13206 *************************************************************************/ 13207 void mincgsetprecdiag(mincgstate* state, 13208 /* Real */ ae_vector* d, 13214 ae_assert(d->cnt>=state->n, "MinCGSetPrecDiag: D is too
short", _state); 13215 for(i=0; i<=state->n-1; i++) 13217 ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinCGSetPrecDiag: D contains infinite
or NAN elements
", _state); 13218 ae_assert(ae_fp_greater(d->ptr.p_double[i],0), "MinCGSetPrecDiag: D contains non-positive elements
", _state); 13220 mincgsetprecdiagfast(state, d, _state); 13224 /************************************************************************* 13225 Modification of the preconditioner: scale-based diagonal preconditioning. 13227 This preconditioning mode can be useful when you don't have approximate 13228 diagonal of Hessian, but you know that your variables are badly scaled 13229 (for example, one variable is in [1,10], and another in [1000,100000]), 13230 and most part of the ill-conditioning comes from different scales of vars. 13232 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), 13233 can greatly improve convergence. 13235 IMPRTANT: you should set scale of your variables with MinCGSetScale() call 13236 (before or after MinCGSetPrecScale() call). Without knowledge of the scale 13237 of your variables scale-based preconditioner will be just unit matrix. 13240 State - structure which stores algorithm state 13242 NOTE: you can change preconditioner "on the fly
", during algorithm 13246 Copyright 13.10.2010 by Bochkanov Sergey 13247 *************************************************************************/ 13248 void mincgsetprecscale(mincgstate* state, ae_state *_state) 13252 state->prectype = 3; 13253 state->innerresetneeded = ae_true; 13257 /************************************************************************* 13260 1. This function has two different implementations: one which uses exact 13261 (analytical) user-supplied gradient, and one which uses function value 13262 only and numerically differentiates function in order to obtain 13265 Depending on the specific function used to create optimizer object 13266 (either MinCGCreate() for analytical gradient or MinCGCreateF() for 13267 numerical differentiation) you should choose appropriate variant of 13268 MinCGOptimize() - one which accepts function AND gradient or one which 13269 accepts function ONLY. 13271 Be careful to choose variant of MinCGOptimize() which corresponds to 13272 your optimization scheme! Table below lists different combinations of 13273 callback (function/gradient) passed to MinCGOptimize() and specific 13274 function used to create optimizer. 13277 | USER PASSED TO MinCGOptimize() 13278 CREATED WITH | function only | function and gradient 13279 ------------------------------------------------------------ 13280 MinCGCreateF() | work FAIL 13281 MinCGCreate() | FAIL work 13283 Here "FAIL" denotes inappropriate combinations of optimizer creation 13284 function and MinCGOptimize() version. Attemps to use such combination 13285 (for example, to create optimizer with MinCGCreateF() and to pass 13286 gradient information to MinCGOptimize()) will lead to exception being 13287 thrown. Either you did not pass gradient when it WAS needed or you 13288 passed gradient when it was NOT needed. 13291 Copyright 20.04.2009 by Bochkanov Sergey 13292 *************************************************************************/ 13293 ae_bool mincgiteration(mincgstate* state, ae_state *_state) 13305 * Reverse communication preparations 13306 * I know it looks ugly, but it works the same way 13307 * anywhere from C++ to Python. 13309 * This code initializes locals by: 13310 * * random values determined during code 13311 * generation - on first subroutine call 13312 * * values from previous call - on subsequent calls 13314 if( state->rstate.stage>=0 ) 13316 n = state->rstate.ia.ptr.p_int[0]; 13317 i = state->rstate.ia.ptr.p_int[1]; 13318 betak = state->rstate.ra.ptr.p_double[0]; 13319 v = state->rstate.ra.ptr.p_double[1]; 13320 vv = state->rstate.ra.ptr.p_double[2]; 13330 if( state->rstate.stage==0 ) 13334 if( state->rstate.stage==1 ) 13338 if( state->rstate.stage==2 ) 13342 if( state->rstate.stage==3 ) 13346 if( state->rstate.stage==4 ) 13350 if( state->rstate.stage==5 ) 13354 if( state->rstate.stage==6 ) 13358 if( state->rstate.stage==7 ) 13362 if( state->rstate.stage==8 ) 13366 if( state->rstate.stage==9 ) 13370 if( state->rstate.stage==10 ) 13374 if( state->rstate.stage==11 ) 13378 if( state->rstate.stage==12 ) 13382 if( state->rstate.stage==13 ) 13386 if( state->rstate.stage==14 ) 13390 if( state->rstate.stage==15 ) 13394 if( state->rstate.stage==16 ) 13398 if( state->rstate.stage==17 ) 13402 if( state->rstate.stage==18 ) 13406 if( state->rstate.stage==19 ) 13419 state->repterminationtype = 0; 13420 state->repiterationscount = 0; 13421 state->repvaridx = -1; 13422 state->repnfev = 0; 13423 state->debugrestartscount = 0; 13426 * Check, that transferred derivative value is right 13428 mincg_clearrequestfields(state, _state); 13429 if( !(ae_fp_eq(state->diffstep,0)&&ae_fp_greater(state->teststep,0)) ) 13433 state->needfg = ae_true; 13440 v = state->x.ptr.p_double[i]; 13441 state->x.ptr.p_double[i] = v-state->teststep*state->s.ptr.p_double[i]; 13442 state->rstate.stage = 0; 13445 state->fm1 = state->f; 13446 state->fp1 = state->g.ptr.p_double[i]; 13447 state->x.ptr.p_double[i] = v+state->teststep*state->s.ptr.p_double[i]; 13448 state->rstate.stage = 1; 13451 state->fm2 = state->f; 13452 state->fp2 = state->g.ptr.p_double[i]; 13453 state->x.ptr.p_double[i] = v; 13454 state->rstate.stage = 2; 13459 * 2*State.TestStep - scale parameter 13460 * width of segment [Xi-TestStep;Xi+TestStep] 13462 if( !derivativecheck(state->fm1, state->fp1, state->fm2, state->fp2, state->f, state->g.ptr.p_double[i], 2*state->teststep, _state) ) 13464 state->repvaridx = i; 13465 state->repterminationtype = -7; 13472 state->needfg = ae_false; 13476 * Preparations continue: 13480 * * powerup algo (it may change preconditioner) 13481 * * apply preconditioner to DK 13482 * * report update of X 13483 * * check stopping conditions for G 13485 ae_v_move(&state->xk.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13486 state->terminationneeded = ae_false; 13487 mincg_clearrequestfields(state, _state); 13488 if( ae_fp_neq(state->diffstep,0) ) 13492 state->needfg = ae_true; 13493 state->rstate.stage = 3; 13496 state->needfg = ae_false; 13499 state->needf = ae_true; 13500 state->rstate.stage = 4; 13503 state->fbase = state->f; 13510 v = state->x.ptr.p_double[i]; 13511 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 13512 state->rstate.stage = 5; 13515 state->fm2 = state->f; 13516 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 13517 state->rstate.stage = 6; 13520 state->fm1 = state->f; 13521 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 13522 state->rstate.stage = 7; 13525 state->fp1 = state->f; 13526 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 13527 state->rstate.stage = 8; 13530 state->fp2 = state->f; 13531 state->x.ptr.p_double[i] = v; 13532 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 13536 state->f = state->fbase; 13537 state->needf = ae_false; 13545 * Report algorithm powerup (if needed) 13547 mincg_clearrequestfields(state, _state); 13548 state->algpowerup = ae_true; 13549 state->rstate.stage = 9; 13552 state->algpowerup = ae_false; 13554 trimprepare(state->f, &state->trimthreshold, _state); 13555 ae_v_moveneg(&state->dk.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13556 mincg_preconditionedmultiply(state, &state->dk, &state->work0, &state->work1, _state); 13561 mincg_clearrequestfields(state, _state); 13562 state->xupdated = ae_true; 13563 state->rstate.stage = 10; 13566 state->xupdated = ae_false; 13568 if( state->terminationneeded ) 13570 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13571 state->repterminationtype = 8; 13576 for(i=0; i<=n-1; i++) 13578 v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 13580 if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) 13582 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13583 state->repterminationtype = 4; 13587 state->repnfev = 1; 13589 state->fold = state->f; 13592 * Choose initial step. 13593 * Apply preconditioner, if we have something other than default. 13595 if( state->prectype==2||state->prectype==3 ) 13599 * because we use preconditioner, step length must be equal 13600 * to the norm of DK 13602 v = ae_v_dotproduct(&state->dk.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13603 state->lastgoodstep = ae_sqrt(v, _state); 13609 * No preconditioner is used, we try to use suggested step 13611 if( ae_fp_greater(state->suggestedstep,0) ) 13613 state->lastgoodstep = state->suggestedstep; 13617 state->lastgoodstep = 1.0; 13624 state->rstimer = mincg_rscountdownlen; 13632 * * clear reset flag 13633 * * clear termination flag 13634 * * store G[k] for later calculation of Y[k] 13635 * * prepare starting point and direction and step length for line search 13637 state->innerresetneeded = ae_false; 13638 state->terminationneeded = ae_false; 13639 ae_v_moveneg(&state->yk.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13640 ae_v_move(&state->d.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13641 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13642 state->mcstage = 0; 13644 linminnormalized(&state->d, &state->stp, n, _state); 13645 if( ae_fp_neq(state->lastgoodstep,0) ) 13647 state->stp = state->lastgoodstep; 13649 state->curstpmax = state->stpmax; 13652 * Report beginning of line search (if needed) 13653 * Terminate algorithm, if user request was detected 13659 mincg_clearrequestfields(state, _state); 13660 state->lsstart = ae_true; 13661 state->rstate.stage = 11; 13664 state->lsstart = ae_false; 13666 if( state->terminationneeded ) 13668 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13669 state->repterminationtype = 8; 13675 * Minimization along D 13677 mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->curstpmax, mincg_gtol, &state->mcinfo, &state->nfev, &state->work0, &state->lstate, &state->mcstage, _state); 13679 if( state->mcstage==0 ) 13685 * Calculate function/gradient using either 13686 * analytical gradient supplied by user 13687 * or finite difference approximation. 13689 * "Trim
" function in order to handle near-singularity points. 13691 mincg_clearrequestfields(state, _state); 13692 if( ae_fp_neq(state->diffstep,0) ) 13696 state->needfg = ae_true; 13697 state->rstate.stage = 12; 13700 state->needfg = ae_false; 13703 state->needf = ae_true; 13704 state->rstate.stage = 13; 13707 state->fbase = state->f; 13714 v = state->x.ptr.p_double[i]; 13715 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 13716 state->rstate.stage = 14; 13719 state->fm2 = state->f; 13720 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 13721 state->rstate.stage = 15; 13724 state->fm1 = state->f; 13725 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 13726 state->rstate.stage = 16; 13729 state->fp1 = state->f; 13730 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 13731 state->rstate.stage = 17; 13734 state->fp2 = state->f; 13735 state->x.ptr.p_double[i] = v; 13736 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 13740 state->f = state->fbase; 13741 state->needf = ae_false; 13743 trimfunction(&state->f, &state->g, n, state->trimthreshold, _state); 13746 * Call MCSRCH again 13748 mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->curstpmax, mincg_gtol, &state->mcinfo, &state->nfev, &state->work0, &state->lstate, &state->mcstage, _state); 13753 * * report end of line search 13754 * * store current point to XN 13755 * * report iteration 13756 * * terminate algorithm if user request was detected 13764 * Report end of line search (if needed) 13766 mincg_clearrequestfields(state, _state); 13767 state->lsend = ae_true; 13768 state->rstate.stage = 18; 13771 state->lsend = ae_false; 13773 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13778 mincg_clearrequestfields(state, _state); 13779 state->xupdated = ae_true; 13780 state->rstate.stage = 19; 13783 state->xupdated = ae_false; 13785 if( state->terminationneeded ) 13787 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13788 state->repterminationtype = 8; 13794 * Line search is finished. 13795 * * calculate BetaK 13798 * * calculate step length: 13799 * * LastScaledStep is ALWAYS calculated because it is used in the stopping criteria 13800 * * LastGoodStep is updated only when MCINFO is equal to 1 (Wolfe conditions hold). 13801 * See below for more explanation. 13803 if( state->mcinfo==1&&!state->innerresetneeded ) 13807 * Standard Wolfe conditions hold 13808 * Calculate Y[K] and D[K]'*Y[K] 13810 ae_v_add(&state->yk.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13811 vv = ae_v_dotproduct(&state->yk.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13814 * Calculate BetaK according to DY formula 13816 v = mincg_preconditionedmultiply2(state, &state->g, &state->g, &state->work0, &state->work1, _state); 13817 state->betady = v/vv; 13820 * Calculate BetaK according to HS formula 13822 v = mincg_preconditionedmultiply2(state, &state->g, &state->yk, &state->work0, &state->work1, _state); 13823 state->betahs = v/vv; 13828 if( state->cgtype==0 ) 13830 betak = state->betady; 13832 if( state->cgtype==1 ) 13834 betak = ae_maxreal(0, ae_minreal(state->betady, state->betahs, _state), _state); 13841 * Something is wrong (may be function is too wild or too flat) 13842 * or we just have to restart algo. 13844 * We'll set BetaK=0, which will restart CG algorithm. 13845 * We can stop later (during normal checks) if stopping conditions are met. 13848 state->debugrestartscount = state->debugrestartscount+1; 13850 if( state->repiterationscount>0&&state->repiterationscount%(3+n)==0 ) 13854 * clear Beta every N iterations 13858 if( state->mcinfo==1||state->mcinfo==5 ) 13860 state->rstimer = mincg_rscountdownlen; 13864 state->rstimer = state->rstimer-1; 13866 ae_v_moveneg(&state->dn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13867 mincg_preconditionedmultiply(state, &state->dn, &state->work0, &state->work1, _state); 13868 ae_v_addd(&state->dn.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1), betak); 13869 state->lastscaledstep = 0.0; 13870 for(i=0; i<=n-1; i++) 13872 state->lastscaledstep = state->lastscaledstep+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); 13874 state->lastscaledstep = state->stp*ae_sqrt(state->lastscaledstep, _state); 13875 if( state->mcinfo==1 ) 13879 * Step is good (Wolfe conditions hold), update LastGoodStep. 13881 * This check for MCINFO=1 is essential because sometimes in the 13882 * constrained optimization setting we may take very short steps 13883 * (like 1E-15) because we were very close to boundary of the 13884 * feasible area. Such short step does not mean that we've converged 13885 * to the solution - it was so short because we were close to the 13886 * boundary and there was a limit on step length. 13888 * So having such short step is quite normal situation. However, we 13889 * should NOT start next iteration from step whose initial length is 13890 * estimated as 1E-15 because it may lead to the failure of the 13891 * linear minimizer (step is too short, function does not changes, 13892 * line search stagnates). 13894 state->lastgoodstep = 0; 13895 for(i=0; i<=n-1; i++) 13897 state->lastgoodstep = state->lastgoodstep+ae_sqr(state->d.ptr.p_double[i], _state); 13899 state->lastgoodstep = state->stp*ae_sqrt(state->lastgoodstep, _state); 13903 * Update information. 13904 * Check stopping conditions. 13906 state->repnfev = state->repnfev+state->nfev; 13907 state->repiterationscount = state->repiterationscount+1; 13908 if( state->repiterationscount>=state->maxits&&state->maxits>0 ) 13912 * Too many iterations 13914 state->repterminationtype = 5; 13919 for(i=0; i<=n-1; i++) 13921 v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 13923 if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) 13927 * Gradient is small enough 13929 state->repterminationtype = 4; 13933 if( !state->innerresetneeded ) 13937 * These conditions are checked only when no inner reset was requested by user 13939 if( ae_fp_less_eq(state->fold-state->f,state->epsf*ae_maxreal(ae_fabs(state->fold, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) 13943 * F(k+1)-F(k) is small enough 13945 state->repterminationtype = 1; 13949 if( ae_fp_less_eq(state->lastscaledstep,state->epsx) ) 13953 * X(k+1)-X(k) is small enough 13955 state->repterminationtype = 2; 13960 if( state->rstimer<=0 ) 13964 * Too many subsequent restarts 13966 state->repterminationtype = 7; 13972 * Shift Xk/Dk, update other information 13974 ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13975 ae_v_move(&state->dk.ptr.p_double[0], 1, &state->dn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 13976 state->fold = state->f; 13977 state->k = state->k+1; 13988 state->rstate.ia.ptr.p_int[0] = n; 13989 state->rstate.ia.ptr.p_int[1] = i; 13990 state->rstate.ra.ptr.p_double[0] = betak; 13991 state->rstate.ra.ptr.p_double[1] = v; 13992 state->rstate.ra.ptr.p_double[2] = vv; 13997 /************************************************************************* 13998 Conjugate gradient results 14001 State - algorithm state 14004 X - array[0..N-1], solution 14005 Rep - optimization report: 14006 * Rep.TerminationType completion code: 14007 * -7 gradient verification failed. 14008 See MinCGSetGradientCheck() for more information. 14009 * 1 relative function improvement is no more than 14011 * 2 relative step is no more than EpsX. 14012 * 4 gradient norm is no more than EpsG 14013 * 5 MaxIts steps was taken 14014 * 7 stopping conditions are too stringent, 14015 further improvement is impossible, 14016 we return best X found so far 14017 * 8 terminated by user 14018 * Rep.IterationsCount contains iterations count 14019 * NFEV contains number of function calculations 14022 Copyright 20.04.2009 by Bochkanov Sergey 14023 *************************************************************************/ 14024 void mincgresults(mincgstate* state, 14025 /* Real */ ae_vector* x, 14030 ae_vector_clear(x); 14031 _mincgreport_clear(rep); 14033 mincgresultsbuf(state, x, rep, _state); 14037 /************************************************************************* 14038 Conjugate gradient results 14040 Buffered implementation of MinCGResults(), which uses pre-allocated buffer 14041 to store X[]. If buffer size is too small, it resizes buffer. It is 14042 intended to be used in the inner cycles of performance critical algorithms 14043 where array reallocation penalty is too large to be ignored. 14046 Copyright 20.04.2009 by Bochkanov Sergey 14047 *************************************************************************/ 14048 void mincgresultsbuf(mincgstate* state, 14049 /* Real */ ae_vector* x, 14055 if( x->cnt<state->n ) 14057 ae_vector_set_length(x, state->n, _state); 14059 ae_v_move(&x->ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 14060 rep->iterationscount = state->repiterationscount; 14061 rep->nfev = state->repnfev; 14062 rep->varidx = state->repvaridx; 14063 rep->terminationtype = state->repterminationtype; 14067 /************************************************************************* 14068 This subroutine restarts CG algorithm from new point. All optimization 14069 parameters are left unchanged. 14071 This function allows to solve multiple optimization problems (which 14072 must have same number of dimensions) without object reallocation penalty. 14075 State - structure used to store algorithm state. 14076 X - new starting point. 14079 Copyright 30.07.2010 by Bochkanov Sergey 14080 *************************************************************************/ 14081 void mincgrestartfrom(mincgstate* state, 14082 /* Real */ ae_vector* x, 14087 ae_assert(x->cnt>=state->n, "MinCGRestartFrom: Length(X)<N!
", _state); 14088 ae_assert(isfinitevector(x, state->n, _state), "MinCGCreate: X contains infinite
or NaN values!
", _state); 14089 ae_v_move(&state->x.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 14090 mincgsuggeststep(state, 0.0, _state); 14091 ae_vector_set_length(&state->rstate.ia, 1+1, _state); 14092 ae_vector_set_length(&state->rstate.ra, 2+1, _state); 14093 state->rstate.stage = -1; 14094 mincg_clearrequestfields(state, _state); 14098 /************************************************************************* 14099 Faster version of MinCGSetPrecDiag(), for time-critical parts of code, 14100 without safety checks. 14103 Copyright 13.10.2010 by Bochkanov Sergey 14104 *************************************************************************/ 14105 void mincgsetprecdiagfast(mincgstate* state, 14106 /* Real */ ae_vector* d, 14112 rvectorsetlengthatleast(&state->diagh, state->n, _state); 14113 rvectorsetlengthatleast(&state->diaghl2, state->n, _state); 14114 state->prectype = 2; 14116 state->innerresetneeded = ae_true; 14117 for(i=0; i<=state->n-1; i++) 14119 state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; 14120 state->diaghl2.ptr.p_double[i] = 0.0; 14125 /************************************************************************* 14126 This function sets low-rank preconditioner for Hessian matrix H=D+V'*C*V, 14128 * H is a Hessian matrix, which is approximated by D/V/C 14129 * D=D1+D2 is a diagonal matrix, which includes two positive definite terms: 14130 * constant term D1 (is not updated or infrequently updated) 14131 * variable term D2 (can be cheaply updated from iteration to iteration) 14132 * V is a low-rank correction 14133 * C is a diagonal factor of low-rank correction 14135 Preconditioner P is calculated using approximate Woodburry formula: 14136 P = D^(-1) - D^(-1)*V'*(C^(-1)+V*D1^(-1)*V')^(-1)*V*D^(-1) 14137 = D^(-1) - D^(-1)*VC'*VC*D^(-1), 14140 B = (C^(-1)+V*D1^(-1)*V')^(-1) 14142 Note that B is calculated using constant term (D1) only, which allows us 14143 to update D2 without recalculation of B or VC. Such preconditioner is 14144 exact when D2 is zero. When D2 is non-zero, it is only approximation, but 14145 very good and cheap one. 14147 This function accepts D1, V, C. 14148 D2 is set to zero by default. 14150 Cost of this update is O(N*VCnt*VCnt), but D2 can be updated in just O(N) 14151 by MinCGSetPrecVarPart. 14154 Copyright 13.10.2010 by Bochkanov Sergey 14155 *************************************************************************/ 14156 void mincgsetpreclowrankfast(mincgstate* state, 14157 /* Real */ ae_vector* d1, 14158 /* Real */ ae_vector* c, 14159 /* Real */ ae_matrix* v, 14163 ae_frame _frame_block; 14171 ae_frame_make(_state, &_frame_block); 14172 ae_matrix_init(&b, 0, 0, DT_REAL, _state, ae_true); 14176 mincgsetprecdiagfast(state, d1, _state); 14177 ae_frame_leave(_state); 14181 ae_matrix_set_length(&b, vcnt, vcnt, _state); 14182 rvectorsetlengthatleast(&state->diagh, n, _state); 14183 rvectorsetlengthatleast(&state->diaghl2, n, _state); 14184 rmatrixsetlengthatleast(&state->vcorr, vcnt, n, _state); 14185 state->prectype = 2; 14186 state->vcnt = vcnt; 14187 state->innerresetneeded = ae_true; 14188 for(i=0; i<=n-1; i++) 14190 state->diagh.ptr.p_double[i] = d1->ptr.p_double[i]; 14191 state->diaghl2.ptr.p_double[i] = 0.0; 14193 for(i=0; i<=vcnt-1; i++) 14195 for(j=i; j<=vcnt-1; j++) 14198 for(k=0; k<=n-1; k++) 14200 t = t+v->ptr.pp_double[i][k]*v->ptr.pp_double[j][k]/d1->ptr.p_double[k]; 14202 b.ptr.pp_double[i][j] = t; 14204 b.ptr.pp_double[i][i] = b.ptr.pp_double[i][i]+1.0/c->ptr.p_double[i]; 14206 if( !spdmatrixcholeskyrec(&b, 0, vcnt, ae_true, &state->work0, _state) ) 14209 ae_frame_leave(_state); 14212 for(i=0; i<=vcnt-1; i++) 14214 ae_v_move(&state->vcorr.ptr.pp_double[i][0], 1, &v->ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); 14215 for(j=0; j<=i-1; j++) 14217 t = b.ptr.pp_double[j][i]; 14218 ae_v_subd(&state->vcorr.ptr.pp_double[i][0], 1, &state->vcorr.ptr.pp_double[j][0], 1, ae_v_len(0,n-1), t); 14220 t = 1/b.ptr.pp_double[i][i]; 14221 ae_v_muld(&state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), t); 14223 ae_frame_leave(_state); 14227 /************************************************************************* 14228 This function updates variable part (diagonal matrix D2) 14229 of low-rank preconditioner. 14231 This update is very cheap and takes just O(N) time. 14233 It has no effect with default preconditioner. 14236 Copyright 13.10.2010 by Bochkanov Sergey 14237 *************************************************************************/ 14238 void mincgsetprecvarpart(mincgstate* state, 14239 /* Real */ ae_vector* d2, 14247 for(i=0; i<=n-1; i++) 14249 state->diaghl2.ptr.p_double[i] = d2->ptr.p_double[i]; 14254 /************************************************************************* 14256 This subroutine turns on verification of the user-supplied analytic 14258 * user calls this subroutine before optimization begins 14259 * MinCGOptimize() is called 14260 * prior to actual optimization, for each component of parameters being 14261 optimized X[i] algorithm performs following steps: 14262 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], 14263 where X[i] is i-th component of the initial point and S[i] is a scale 14265 * F(X) is evaluated at these trial points 14266 * we perform one more evaluation in the middle point of the interval 14267 * we build cubic model using function values and derivatives at trial 14268 points and we compare its prediction with actual value in the middle 14270 * in case difference between prediction and actual value is higher than 14271 some predetermined threshold, algorithm stops with completion code -7; 14272 Rep.VarIdx is set to index of the parameter with incorrect derivative. 14273 * after verification is over, algorithm proceeds to the actual optimization. 14275 NOTE 1: verification needs N (parameters count) gradient evaluations. It 14276 is very costly and you should use it only for low dimensional 14277 problems, when you want to be sure that you've correctly 14278 calculated analytic derivatives. You should not use it in the 14279 production code (unless you want to check derivatives provided by 14282 NOTE 2: you should carefully choose TestStep. Value which is too large 14283 (so large that function behaviour is significantly non-cubic) will 14284 lead to false alarms. You may use different step for different 14285 parameters by means of setting scale with MinCGSetScale(). 14287 NOTE 3: this function may lead to false positives. In case it reports that 14288 I-th derivative was calculated incorrectly, you may decrease test 14289 step and try one more time - maybe your function changes too 14290 sharply and your step is too large for such rapidly chanding 14294 State - structure used to store algorithm state 14295 TestStep - verification step: 14296 * TestStep=0 turns verification off 14297 * TestStep>0 activates verification 14300 Copyright 31.05.2012 by Bochkanov Sergey 14301 *************************************************************************/ 14302 void mincgsetgradientcheck(mincgstate* state, 14308 ae_assert(ae_isfinite(teststep, _state), "MinCGSetGradientCheck: TestStep contains NaN
or Infinite
", _state); 14309 ae_assert(ae_fp_greater_eq(teststep,0), "MinCGSetGradientCheck: invalid argument TestStep(TestStep<0)
", _state); 14310 state->teststep = teststep; 14314 /************************************************************************* 14315 Clears request fileds (to be sure that we don't forgot to clear something) 14316 *************************************************************************/ 14317 static void mincg_clearrequestfields(mincgstate* state, ae_state *_state) 14321 state->needf = ae_false; 14322 state->needfg = ae_false; 14323 state->xupdated = ae_false; 14324 state->lsstart = ae_false; 14325 state->lsend = ae_false; 14326 state->algpowerup = ae_false; 14330 /************************************************************************* 14331 This function calculates preconditioned product H^(-1)*x and stores result 14332 back into X. Work0[] and Work1[] are used as temporaries (size must be at 14333 least N; this function doesn't allocate arrays). 14336 Copyright 13.10.2010 by Bochkanov Sergey 14337 *************************************************************************/ 14338 static void mincg_preconditionedmultiply(mincgstate* state, 14339 /* Real */ ae_vector* x, 14340 /* Real */ ae_vector* work0, 14341 /* Real */ ae_vector* work1, 14351 vcnt = state->vcnt; 14352 if( state->prectype==0 ) 14356 if( state->prectype==3 ) 14358 for(i=0; i<=n-1; i++) 14360 x->ptr.p_double[i] = x->ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]; 14364 ae_assert(state->prectype==2, "MinCG:
internal error (unexpected PrecType)
", _state); 14367 * handle part common for VCnt=0 and VCnt<>0 14369 for(i=0; i<=n-1; i++) 14371 x->ptr.p_double[i] = x->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); 14379 for(i=0; i<=vcnt-1; i++) 14381 v = ae_v_dotproduct(&state->vcorr.ptr.pp_double[i][0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); 14382 work0->ptr.p_double[i] = v; 14384 for(i=0; i<=n-1; i++) 14386 work1->ptr.p_double[i] = 0; 14388 for(i=0; i<=vcnt-1; i++) 14390 v = work0->ptr.p_double[i]; 14391 ae_v_addd(&state->work1.ptr.p_double[0], 1, &state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); 14393 for(i=0; i<=n-1; i++) 14395 x->ptr.p_double[i] = x->ptr.p_double[i]-state->work1.ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); 14401 /************************************************************************* 14402 This function calculates preconditioned product x'*H^(-1)*y. Work0[] and 14403 Work1[] are used as temporaries (size must be at least N; this function 14404 doesn't allocate arrays). 14407 Copyright 13.10.2010 by Bochkanov Sergey 14408 *************************************************************************/ 14409 static double mincg_preconditionedmultiply2(mincgstate* state, 14410 /* Real */ ae_vector* x, 14411 /* Real */ ae_vector* y, 14412 /* Real */ ae_vector* work0, 14413 /* Real */ ae_vector* work1, 14425 vcnt = state->vcnt; 14428 * no preconditioning 14430 if( state->prectype==0 ) 14432 v0 = ae_v_dotproduct(&x->ptr.p_double[0], 1, &y->ptr.p_double[0], 1, ae_v_len(0,n-1)); 14436 if( state->prectype==3 ) 14439 for(i=0; i<=n-1; i++) 14441 result = result+x->ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]*y->ptr.p_double[i]; 14445 ae_assert(state->prectype==2, "MinCG:
internal error (unexpected PrecType)
", _state); 14448 * low rank preconditioning 14451 for(i=0; i<=n-1; i++) 14453 result = result+x->ptr.p_double[i]*y->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); 14457 for(i=0; i<=n-1; i++) 14459 work0->ptr.p_double[i] = x->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); 14460 work1->ptr.p_double[i] = y->ptr.p_double[i]/(state->diagh.ptr.p_double[i]+state->diaghl2.ptr.p_double[i]); 14462 for(i=0; i<=vcnt-1; i++) 14464 v0 = ae_v_dotproduct(&work0->ptr.p_double[0], 1, &state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); 14465 v1 = ae_v_dotproduct(&work1->ptr.p_double[0], 1, &state->vcorr.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); 14466 result = result-v0*v1; 14473 /************************************************************************* 14474 Internal initialization subroutine 14477 Copyright 16.05.2011 by Bochkanov Sergey 14478 *************************************************************************/ 14479 static void mincg_mincginitinternal(ae_int_t n, 14491 state->teststep = 0; 14493 state->diffstep = diffstep; 14494 mincgsetcond(state, 0, 0, 0, 0, _state); 14495 mincgsetxrep(state, ae_false, _state); 14496 mincgsetdrep(state, ae_false, _state); 14497 mincgsetstpmax(state, 0, _state); 14498 mincgsetcgtype(state, -1, _state); 14499 mincgsetprecdefault(state, _state); 14500 ae_vector_set_length(&state->xk, n, _state); 14501 ae_vector_set_length(&state->dk, n, _state); 14502 ae_vector_set_length(&state->xn, n, _state); 14503 ae_vector_set_length(&state->dn, n, _state); 14504 ae_vector_set_length(&state->x, n, _state); 14505 ae_vector_set_length(&state->d, n, _state); 14506 ae_vector_set_length(&state->g, n, _state); 14507 ae_vector_set_length(&state->work0, n, _state); 14508 ae_vector_set_length(&state->work1, n, _state); 14509 ae_vector_set_length(&state->yk, n, _state); 14510 ae_vector_set_length(&state->s, n, _state); 14511 for(i=0; i<=n-1; i++) 14513 state->s.ptr.p_double[i] = 1.0; 14518 ae_bool _mincgstate_init(void* _p, ae_state *_state, ae_bool make_automatic) 14520 mincgstate *p = (mincgstate*)_p; 14521 ae_touch_ptr((void*)p); 14522 if( !ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic) ) 14524 if( !ae_vector_init(&p->diaghl2, 0, DT_REAL, _state, make_automatic) ) 14526 if( !ae_matrix_init(&p->vcorr, 0, 0, DT_REAL, _state, make_automatic) ) 14528 if( !ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic) ) 14530 if( !ae_vector_init(&p->xk, 0, DT_REAL, _state, make_automatic) ) 14532 if( !ae_vector_init(&p->dk, 0, DT_REAL, _state, make_automatic) ) 14534 if( !ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic) ) 14536 if( !ae_vector_init(&p->dn, 0, DT_REAL, _state, make_automatic) ) 14538 if( !ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic) ) 14540 if( !ae_vector_init(&p->yk, 0, DT_REAL, _state, make_automatic) ) 14542 if( !ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic) ) 14544 if( !ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic) ) 14546 if( !_rcommstate_init(&p->rstate, _state, make_automatic) ) 14548 if( !_linminstate_init(&p->lstate, _state, make_automatic) ) 14550 if( !ae_vector_init(&p->work0, 0, DT_REAL, _state, make_automatic) ) 14552 if( !ae_vector_init(&p->work1, 0, DT_REAL, _state, make_automatic) ) 14558 ae_bool _mincgstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 14560 mincgstate *dst = (mincgstate*)_dst; 14561 mincgstate *src = (mincgstate*)_src; 14563 dst->epsg = src->epsg; 14564 dst->epsf = src->epsf; 14565 dst->epsx = src->epsx; 14566 dst->maxits = src->maxits; 14567 dst->stpmax = src->stpmax; 14568 dst->suggestedstep = src->suggestedstep; 14569 dst->xrep = src->xrep; 14570 dst->drep = src->drep; 14571 dst->cgtype = src->cgtype; 14572 dst->prectype = src->prectype; 14573 if( !ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic) ) 14575 if( !ae_vector_init_copy(&dst->diaghl2, &src->diaghl2, _state, make_automatic) ) 14577 if( !ae_matrix_init_copy(&dst->vcorr, &src->vcorr, _state, make_automatic) ) 14579 dst->vcnt = src->vcnt; 14580 if( !ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic) ) 14582 dst->diffstep = src->diffstep; 14583 dst->nfev = src->nfev; 14584 dst->mcstage = src->mcstage; 14586 if( !ae_vector_init_copy(&dst->xk, &src->xk, _state, make_automatic) ) 14588 if( !ae_vector_init_copy(&dst->dk, &src->dk, _state, make_automatic) ) 14590 if( !ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic) ) 14592 if( !ae_vector_init_copy(&dst->dn, &src->dn, _state, make_automatic) ) 14594 if( !ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic) ) 14596 dst->fold = src->fold; 14597 dst->stp = src->stp; 14598 dst->curstpmax = src->curstpmax; 14599 if( !ae_vector_init_copy(&dst->yk, &src->yk, _state, make_automatic) ) 14601 dst->lastgoodstep = src->lastgoodstep; 14602 dst->lastscaledstep = src->lastscaledstep; 14603 dst->mcinfo = src->mcinfo; 14604 dst->innerresetneeded = src->innerresetneeded; 14605 dst->terminationneeded = src->terminationneeded; 14606 dst->trimthreshold = src->trimthreshold; 14607 dst->rstimer = src->rstimer; 14608 if( !ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic) ) 14611 if( !ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic) ) 14613 dst->needf = src->needf; 14614 dst->needfg = src->needfg; 14615 dst->xupdated = src->xupdated; 14616 dst->algpowerup = src->algpowerup; 14617 dst->lsstart = src->lsstart; 14618 dst->lsend = src->lsend; 14619 dst->teststep = src->teststep; 14620 if( !_rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic) ) 14622 dst->repiterationscount = src->repiterationscount; 14623 dst->repnfev = src->repnfev; 14624 dst->repvaridx = src->repvaridx; 14625 dst->repterminationtype = src->repterminationtype; 14626 dst->debugrestartscount = src->debugrestartscount; 14627 if( !_linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic) ) 14629 dst->fbase = src->fbase; 14630 dst->fm2 = src->fm2; 14631 dst->fm1 = src->fm1; 14632 dst->fp1 = src->fp1; 14633 dst->fp2 = src->fp2; 14634 dst->betahs = src->betahs; 14635 dst->betady = src->betady; 14636 if( !ae_vector_init_copy(&dst->work0, &src->work0, _state, make_automatic) ) 14638 if( !ae_vector_init_copy(&dst->work1, &src->work1, _state, make_automatic) ) 14644 void _mincgstate_clear(void* _p) 14646 mincgstate *p = (mincgstate*)_p; 14647 ae_touch_ptr((void*)p); 14648 ae_vector_clear(&p->diagh); 14649 ae_vector_clear(&p->diaghl2); 14650 ae_matrix_clear(&p->vcorr); 14651 ae_vector_clear(&p->s); 14652 ae_vector_clear(&p->xk); 14653 ae_vector_clear(&p->dk); 14654 ae_vector_clear(&p->xn); 14655 ae_vector_clear(&p->dn); 14656 ae_vector_clear(&p->d); 14657 ae_vector_clear(&p->yk); 14658 ae_vector_clear(&p->x); 14659 ae_vector_clear(&p->g); 14660 _rcommstate_clear(&p->rstate); 14661 _linminstate_clear(&p->lstate); 14662 ae_vector_clear(&p->work0); 14663 ae_vector_clear(&p->work1); 14667 void _mincgstate_destroy(void* _p) 14669 mincgstate *p = (mincgstate*)_p; 14670 ae_touch_ptr((void*)p); 14671 ae_vector_destroy(&p->diagh); 14672 ae_vector_destroy(&p->diaghl2); 14673 ae_matrix_destroy(&p->vcorr); 14674 ae_vector_destroy(&p->s); 14675 ae_vector_destroy(&p->xk); 14676 ae_vector_destroy(&p->dk); 14677 ae_vector_destroy(&p->xn); 14678 ae_vector_destroy(&p->dn); 14679 ae_vector_destroy(&p->d); 14680 ae_vector_destroy(&p->yk); 14681 ae_vector_destroy(&p->x); 14682 ae_vector_destroy(&p->g); 14683 _rcommstate_destroy(&p->rstate); 14684 _linminstate_destroy(&p->lstate); 14685 ae_vector_destroy(&p->work0); 14686 ae_vector_destroy(&p->work1); 14690 ae_bool _mincgreport_init(void* _p, ae_state *_state, ae_bool make_automatic) 14692 mincgreport *p = (mincgreport*)_p; 14693 ae_touch_ptr((void*)p); 14698 ae_bool _mincgreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 14700 mincgreport *dst = (mincgreport*)_dst; 14701 mincgreport *src = (mincgreport*)_src; 14702 dst->iterationscount = src->iterationscount; 14703 dst->nfev = src->nfev; 14704 dst->varidx = src->varidx; 14705 dst->terminationtype = src->terminationtype; 14710 void _mincgreport_clear(void* _p) 14712 mincgreport *p = (mincgreport*)_p; 14713 ae_touch_ptr((void*)p); 14717 void _mincgreport_destroy(void* _p) 14719 mincgreport *p = (mincgreport*)_p; 14720 ae_touch_ptr((void*)p); 14726 /************************************************************************* 14727 BOUND CONSTRAINED OPTIMIZATION 14728 WITH ADDITIONAL LINEAR EQUALITY AND INEQUALITY CONSTRAINTS 14731 The subroutine minimizes function F(x) of N arguments subject to any 14733 * bound constraints 14734 * linear inequality constraints 14735 * linear equality constraints 14738 * user must provide function value and gradient 14739 * starting point X0 must be feasible or 14740 not too far away from the feasible set 14741 * grad(f) must be Lipschitz continuous on a level set: 14742 L = { x : f(x)<=f(x0) } 14743 * function must be defined everywhere on the feasible set F 14747 Constrained optimization if far more complex than the unconstrained one. 14748 Here we give very brief outline of the BLEIC optimizer. We strongly recommend 14749 you to read examples in the ALGLIB Reference Manual and to read ALGLIB User Guide 14750 on optimization, which is available at http://www.alglib.net/optimization/ 14752 1. User initializes algorithm state with MinBLEICCreate() call 14754 2. USer adds boundary and/or linear constraints by calling 14755 MinBLEICSetBC() and MinBLEICSetLC() functions. 14757 3. User sets stopping conditions with MinBLEICSetCond(). 14759 4. User calls MinBLEICOptimize() function which takes algorithm state and 14760 pointer (delegate, etc.) to callback function which calculates F/G. 14762 5. User calls MinBLEICResults() to get solution 14764 6. Optionally user may call MinBLEICRestartFrom() to solve another problem 14765 with same N but another starting point. 14766 MinBLEICRestartFrom() allows to reuse already initialized structure. 14770 N - problem dimension, N>0: 14771 * if given, only leading N elements of X are used 14772 * if not given, automatically determined from size ofX 14773 X - starting point, array[N]: 14774 * it is better to set X to a feasible point 14775 * but X can be infeasible, in which case algorithm will try 14776 to find feasible point first, using X as initial 14780 State - structure stores algorithm state 14783 Copyright 28.11.2010 by Bochkanov Sergey 14784 *************************************************************************/ 14785 void minbleiccreate(ae_int_t n, 14786 /* Real */ ae_vector* x, 14787 minbleicstate* state, 14790 ae_frame _frame_block; 14794 ae_frame_make(_state, &_frame_block); 14795 _minbleicstate_clear(state); 14796 ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); 14797 ae_vector_init(&ct, 0, DT_INT, _state, ae_true); 14799 ae_assert(n>=1, "MinBLEICCreate: N<1
", _state); 14800 ae_assert(x->cnt>=n, "MinBLEICCreate: Length(X)<N
", _state); 14801 ae_assert(isfinitevector(x, n, _state), "MinBLEICCreate: X contains infinite
or NaN values!
", _state); 14802 minbleic_minbleicinitinternal(n, x, 0.0, state, _state); 14803 ae_frame_leave(_state); 14807 /************************************************************************* 14808 The subroutine is finite difference variant of MinBLEICCreate(). It uses 14809 finite differences in order to differentiate target function. 14811 Description below contains information which is specific to this function 14812 only. We recommend to read comments on MinBLEICCreate() in order to get 14813 more information about creation of BLEIC optimizer. 14816 N - problem dimension, N>0: 14817 * if given, only leading N elements of X are used 14818 * if not given, automatically determined from size of X 14819 X - starting point, array[0..N-1]. 14820 DiffStep- differentiation step, >0 14823 State - structure which stores algorithm state 14826 1. algorithm uses 4-point central formula for differentiation. 14827 2. differentiation step along I-th axis is equal to DiffStep*S[I] where 14828 S[] is scaling vector which can be set by MinBLEICSetScale() call. 14829 3. we recommend you to use moderate values of differentiation step. Too 14830 large step will result in too large truncation errors, while too small 14831 step will result in too large numerical errors. 1.0E-6 can be good 14832 value to start with. 14833 4. Numerical differentiation is very inefficient - one gradient 14834 calculation needs 4*N function evaluations. This function will work for 14835 any N - either small (1...10), moderate (10...100) or large (100...). 14836 However, performance penalty will be too severe for any N's except for 14838 We should also say that code which relies on numerical differentiation 14839 is less robust and precise. CG needs exact gradient values. Imprecise 14840 gradient may slow down convergence, especially on highly nonlinear 14842 Thus we recommend to use this function for fast prototyping on small- 14843 dimensional problems only, and to implement analytical gradient as soon 14847 Copyright 16.05.2011 by Bochkanov Sergey 14848 *************************************************************************/ 14849 void minbleiccreatef(ae_int_t n, 14850 /* Real */ ae_vector* x, 14852 minbleicstate* state, 14855 ae_frame _frame_block; 14859 ae_frame_make(_state, &_frame_block); 14860 _minbleicstate_clear(state); 14861 ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); 14862 ae_vector_init(&ct, 0, DT_INT, _state, ae_true); 14864 ae_assert(n>=1, "MinBLEICCreateF: N<1
", _state); 14865 ae_assert(x->cnt>=n, "MinBLEICCreateF: Length(X)<N
", _state); 14866 ae_assert(isfinitevector(x, n, _state), "MinBLEICCreateF: X contains infinite
or NaN values!
", _state); 14867 ae_assert(ae_isfinite(diffstep, _state), "MinBLEICCreateF: DiffStep is infinite
or NaN!
", _state); 14868 ae_assert(ae_fp_greater(diffstep,0), "MinBLEICCreateF: DiffStep is non-positive!
", _state); 14869 minbleic_minbleicinitinternal(n, x, diffstep, state, _state); 14870 ae_frame_leave(_state); 14874 /************************************************************************* 14875 This function sets boundary constraints for BLEIC optimizer. 14877 Boundary constraints are inactive by default (after initial creation). 14878 They are preserved after algorithm restart with MinBLEICRestartFrom(). 14881 State - structure stores algorithm state 14882 BndL - lower bounds, array[N]. 14883 If some (all) variables are unbounded, you may specify 14884 very small number or -INF. 14885 BndU - upper bounds, array[N]. 14886 If some (all) variables are unbounded, you may specify 14887 very large number or +INF. 14889 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th 14890 variable will be "frozen
" at X[i]=BndL[i]=BndU[i]. 14892 NOTE 2: this solver has following useful properties: 14893 * bound constraints are always satisfied exactly 14894 * function is evaluated only INSIDE area specified by bound constraints, 14895 even when numerical differentiation is used (algorithm adjusts nodes 14896 according to boundary constraints) 14899 Copyright 28.11.2010 by Bochkanov Sergey 14900 *************************************************************************/ 14901 void minbleicsetbc(minbleicstate* state, 14902 /* Real */ ae_vector* bndl, 14903 /* Real */ ae_vector* bndu, 14911 ae_assert(bndl->cnt>=n, "MinBLEICSetBC: Length(BndL)<N
", _state); 14912 ae_assert(bndu->cnt>=n, "MinBLEICSetBC: Length(BndU)<N
", _state); 14913 for(i=0; i<=n-1; i++) 14915 ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinBLEICSetBC: BndL contains NAN
or +
INF", _state); 14916 ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinBLEICSetBC: BndL contains NAN
or -
INF", _state); 14917 state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; 14918 state->hasbndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); 14919 state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; 14920 state->hasbndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); 14922 sassetbc(&state->sas, bndl, bndu, _state); 14926 /************************************************************************* 14927 This function sets linear constraints for BLEIC optimizer. 14929 Linear constraints are inactive by default (after initial creation). 14930 They are preserved after algorithm restart with MinBLEICRestartFrom(). 14933 State - structure previously allocated with MinBLEICCreate call. 14934 C - linear constraints, array[K,N+1]. 14935 Each row of C represents one constraint, either equality 14936 or inequality (see below): 14937 * first N elements correspond to coefficients, 14938 * last element corresponds to the right part. 14939 All elements of C (including right part) must be finite. 14940 CT - type of constraints, array[K]: 14941 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] 14942 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] 14943 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] 14944 K - number of equality/inequality constraints, K>=0: 14945 * if given, only leading K elements of C/CT are used 14946 * if not given, automatically determined from sizes of C/CT 14948 NOTE 1: linear (non-bound) constraints are satisfied only approximately: 14949 * there always exists some minor violation (about Epsilon in magnitude) 14950 due to rounding errors 14951 * numerical differentiation, if used, may lead to function evaluations 14952 outside of the feasible area, because algorithm does NOT change 14953 numerical differentiation formula according to linear constraints. 14954 If you want constraints to be satisfied exactly, try to reformulate your 14955 problem in such manner that all constraints will become boundary ones 14956 (this kind of constraints is always satisfied exactly, both in the final 14957 solution and in all intermediate points). 14960 Copyright 28.11.2010 by Bochkanov Sergey 14961 *************************************************************************/ 14962 void minbleicsetlc(minbleicstate* state, 14963 /* Real */ ae_matrix* c, 14964 /* Integer */ ae_vector* ct, 14977 * First, check for errors in the inputs 14979 ae_assert(k>=0, "MinBLEICSetLC:
K<0
", _state); 14980 ae_assert(c->cols>=n+1||k==0, "MinBLEICSetLC: Cols(C)<N+1
", _state); 14981 ae_assert(c->rows>=k, "MinBLEICSetLC: Rows(C)<
K", _state); 14982 ae_assert(ct->cnt>=k, "MinBLEICSetLC: Length(CT)<
K", _state); 14983 ae_assert(apservisfinitematrix(c, k, n+1, _state), "MinBLEICSetLC: C contains infinite
or NaN values!
", _state); 14996 * Equality constraints are stored first, in the upper 14997 * NEC rows of State.CLEIC matrix. Inequality constraints 14998 * are stored in the next NIC rows. 15000 * NOTE: we convert inequality constraints to the form 15001 * A*x<=b before copying them. 15003 rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); 15006 for(i=0; i<=k-1; i++) 15008 if( ct->ptr.p_int[i]==0 ) 15010 ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 15011 state->nec = state->nec+1; 15014 for(i=0; i<=k-1; i++) 15016 if( ct->ptr.p_int[i]!=0 ) 15018 if( ct->ptr.p_int[i]>0 ) 15020 ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 15024 ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 15026 state->nic = state->nic+1; 15031 * Normalize rows of State.CLEIC: each row must have unit norm. 15032 * Norm is calculated using first N elements (i.e. right part is 15033 * not counted when we calculate norm). 15035 for(i=0; i<=k-1; i++) 15038 for(j=0; j<=n-1; j++) 15040 v = v+ae_sqr(state->cleic.ptr.pp_double[i][j], _state); 15042 if( ae_fp_eq(v,0) ) 15046 v = 1/ae_sqrt(v, _state); 15047 ae_v_muld(&state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n), v); 15049 sassetlc(&state->sas, c, ct, k, _state); 15053 /************************************************************************* 15054 This function sets stopping conditions for the optimizer. 15057 State - structure which stores algorithm state 15059 The subroutine finishes its work if the condition 15060 |v|<EpsG is satisfied, where: 15061 * |.| means Euclidian norm 15062 * v - scaled gradient vector, v[i]=g[i]*s[i] 15064 * s - scaling coefficients set by MinBLEICSetScale() 15066 The subroutine finishes its work if on k+1-th iteration 15067 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} 15070 The subroutine finishes its work if on k+1-th iteration 15071 the condition |v|<=EpsX is fulfilled, where: 15072 * |.| means Euclidian norm 15073 * v - scaled step vector, v[i]=dx[i]/s[i] 15074 * dx - step vector, dx=X(k+1)-X(k) 15075 * s - scaling coefficients set by MinBLEICSetScale() 15076 MaxIts - maximum number of iterations. If MaxIts=0, the number of 15077 iterations is unlimited. 15079 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead 15080 to automatic stopping criterion selection. 15082 NOTE: when SetCond() called with non-zero MaxIts, BLEIC solver may perform 15083 slightly more than MaxIts iterations. I.e., MaxIts sets non-strict 15084 limit on iterations count. 15087 Copyright 28.11.2010 by Bochkanov Sergey 15088 *************************************************************************/ 15089 void minbleicsetcond(minbleicstate* state, 15098 ae_assert(ae_isfinite(epsg, _state), "MinBLEICSetCond: EpsG is not finite number
", _state); 15099 ae_assert(ae_fp_greater_eq(epsg,0), "MinBLEICSetCond: negative EpsG
", _state); 15100 ae_assert(ae_isfinite(epsf, _state), "MinBLEICSetCond: EpsF is not finite number
", _state); 15101 ae_assert(ae_fp_greater_eq(epsf,0), "MinBLEICSetCond: negative EpsF
", _state); 15102 ae_assert(ae_isfinite(epsx, _state), "MinBLEICSetCond: EpsX is not finite number
", _state); 15103 ae_assert(ae_fp_greater_eq(epsx,0), "MinBLEICSetCond: negative EpsX
", _state); 15104 ae_assert(maxits>=0, "MinBLEICSetCond: negative MaxIts!
", _state); 15105 if( ((ae_fp_eq(epsg,0)&&ae_fp_eq(epsf,0))&&ae_fp_eq(epsx,0))&&maxits==0 ) 15109 state->epsg = epsg; 15110 state->epsf = epsf; 15111 state->epsx = epsx; 15112 state->maxits = maxits; 15116 /************************************************************************* 15117 This function sets scaling coefficients for BLEIC optimizer. 15119 ALGLIB optimizers use scaling matrices to test stopping conditions (step 15120 size and gradient are scaled before comparison with tolerances). Scale of 15121 the I-th variable is a translation invariant measure of: 15122 a) "how large
" the variable is 15123 b) how large the step should be to make significant changes in the function 15125 Scaling is also used by finite difference variant of the optimizer - step 15126 along I-th axis is equal to DiffStep*S[I]. 15128 In most optimizers (and in the BLEIC too) scaling is NOT a form of 15129 preconditioning. It just affects stopping conditions. You should set 15130 preconditioner by separate call to one of the MinBLEICSetPrec...() 15133 There is a special preconditioning mode, however, which uses scaling 15134 coefficients to form diagonal preconditioning matrix. You can turn this 15135 mode on, if you want. But you should understand that scaling is not the 15136 same thing as preconditioning - these are two different, although related 15137 forms of tuning solver. 15140 State - structure stores algorithm state 15141 S - array[N], non-zero scaling coefficients 15142 S[i] may be negative, sign doesn't matter. 15145 Copyright 14.01.2011 by Bochkanov Sergey 15146 *************************************************************************/ 15147 void minbleicsetscale(minbleicstate* state, 15148 /* Real */ ae_vector* s, 15154 ae_assert(s->cnt>=state->nmain, "MinBLEICSetScale: Length(S)<N
", _state); 15155 for(i=0; i<=state->nmain-1; i++) 15157 ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinBLEICSetScale: S contains infinite
or NAN elements
", _state); 15158 ae_assert(ae_fp_neq(s->ptr.p_double[i],0), "MinBLEICSetScale: S contains
zero elements
", _state); 15159 state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); 15161 sassetscale(&state->sas, s, _state); 15165 /************************************************************************* 15166 Modification of the preconditioner: preconditioning is turned off. 15169 State - structure which stores algorithm state 15172 Copyright 13.10.2010 by Bochkanov Sergey 15173 *************************************************************************/ 15174 void minbleicsetprecdefault(minbleicstate* state, ae_state *_state) 15178 state->prectype = 0; 15182 /************************************************************************* 15183 Modification of the preconditioner: diagonal of approximate Hessian is 15187 State - structure which stores algorithm state 15188 D - diagonal of the approximate Hessian, array[0..N-1], 15189 (if larger, only leading N elements are used). 15191 NOTE 1: D[i] should be positive. Exception will be thrown otherwise. 15193 NOTE 2: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. 15196 Copyright 13.10.2010 by Bochkanov Sergey 15197 *************************************************************************/ 15198 void minbleicsetprecdiag(minbleicstate* state, 15199 /* Real */ ae_vector* d, 15205 ae_assert(d->cnt>=state->nmain, "MinBLEICSetPrecDiag: D is too
short", _state); 15206 for(i=0; i<=state->nmain-1; i++) 15208 ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinBLEICSetPrecDiag: D contains infinite
or NAN elements
", _state); 15209 ae_assert(ae_fp_greater(d->ptr.p_double[i],0), "MinBLEICSetPrecDiag: D contains non-positive elements
", _state); 15211 rvectorsetlengthatleast(&state->diagh, state->nmain, _state); 15212 state->prectype = 2; 15213 for(i=0; i<=state->nmain-1; i++) 15215 state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; 15220 /************************************************************************* 15221 Modification of the preconditioner: scale-based diagonal preconditioning. 15223 This preconditioning mode can be useful when you don't have approximate 15224 diagonal of Hessian, but you know that your variables are badly scaled 15225 (for example, one variable is in [1,10], and another in [1000,100000]), 15226 and most part of the ill-conditioning comes from different scales of vars. 15228 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), 15229 can greatly improve convergence. 15231 IMPRTANT: you should set scale of your variables with MinBLEICSetScale() 15232 call (before or after MinBLEICSetPrecScale() call). Without knowledge of 15233 the scale of your variables scale-based preconditioner will be just unit 15237 State - structure which stores algorithm state 15240 Copyright 13.10.2010 by Bochkanov Sergey 15241 *************************************************************************/ 15242 void minbleicsetprecscale(minbleicstate* state, ae_state *_state) 15246 state->prectype = 3; 15250 /************************************************************************* 15251 This function turns on/off reporting. 15254 State - structure which stores algorithm state 15255 NeedXRep- whether iteration reports are needed or not 15257 If NeedXRep is True, algorithm will call rep() callback function if it is 15258 provided to MinBLEICOptimize(). 15261 Copyright 28.11.2010 by Bochkanov Sergey 15262 *************************************************************************/ 15263 void minbleicsetxrep(minbleicstate* state, 15269 state->xrep = needxrep; 15273 /************************************************************************* 15274 This function turns on/off line search reports. 15275 These reports are described in more details in developer-only comments on 15276 MinBLEICState object. 15279 State - structure which stores algorithm state 15280 NeedDRep- whether line search reports are needed or not 15282 This function is intended for private use only. Turning it on artificially 15283 may cause program failure. 15286 Copyright 02.04.2010 by Bochkanov Sergey 15287 *************************************************************************/ 15288 void minbleicsetdrep(minbleicstate* state, 15294 state->drep = needdrep; 15298 /************************************************************************* 15299 This function sets maximum step length 15301 IMPORTANT: this feature is hard to combine with preconditioning. You can't 15302 set upper limit on step length, when you solve optimization problem with 15303 linear (non-boundary) constraints AND preconditioner turned on. 15305 When non-boundary constraints are present, you have to either a) use 15306 preconditioner, or b) use upper limit on step length. YOU CAN'T USE BOTH! 15307 In this case algorithm will terminate with appropriate error code. 15310 State - structure which stores algorithm state 15311 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't 15312 want to limit step length. 15314 Use this subroutine when you optimize target function which contains exp() 15315 or other fast growing functions, and optimization algorithm makes too 15316 large steps which lead to overflow. This function allows us to reject 15317 steps that are too large (and therefore expose us to the possible 15318 overflow) without actually calculating function value at the x+stp*d. 15321 Copyright 02.04.2010 by Bochkanov Sergey 15322 *************************************************************************/ 15323 void minbleicsetstpmax(minbleicstate* state, 15329 ae_assert(ae_isfinite(stpmax, _state), "MinBLEICSetStpMax: StpMax is not finite!
", _state); 15330 ae_assert(ae_fp_greater_eq(stpmax,0), "MinBLEICSetStpMax: StpMax<0!
", _state); 15331 state->stpmax = stpmax; 15335 /************************************************************************* 15338 1. This function has two different implementations: one which uses exact 15339 (analytical) user-supplied gradient, and one which uses function value 15340 only and numerically differentiates function in order to obtain 15343 Depending on the specific function used to create optimizer object 15344 (either MinBLEICCreate() for analytical gradient or MinBLEICCreateF() 15345 for numerical differentiation) you should choose appropriate variant of 15346 MinBLEICOptimize() - one which accepts function AND gradient or one 15347 which accepts function ONLY. 15349 Be careful to choose variant of MinBLEICOptimize() which corresponds to 15350 your optimization scheme! Table below lists different combinations of 15351 callback (function/gradient) passed to MinBLEICOptimize() and specific 15352 function used to create optimizer. 15355 | USER PASSED TO MinBLEICOptimize() 15356 CREATED WITH | function only | function and gradient 15357 ------------------------------------------------------------ 15358 MinBLEICCreateF() | work FAIL 15359 MinBLEICCreate() | FAIL work 15361 Here "FAIL" denotes inappropriate combinations of optimizer creation 15362 function and MinBLEICOptimize() version. Attemps to use such 15363 combination (for example, to create optimizer with MinBLEICCreateF() 15364 and to pass gradient information to MinCGOptimize()) will lead to 15365 exception being thrown. Either you did not pass gradient when it WAS 15366 needed or you passed gradient when it was NOT needed. 15369 Copyright 28.11.2010 by Bochkanov Sergey 15370 *************************************************************************/ 15371 ae_bool minbleiciteration(minbleicstate* state, ae_state *_state) 15379 ae_int_t badbfgsits; 15381 ae_int_t nextaction; 15383 ae_int_t actstatus; 15393 * Reverse communication preparations 15394 * I know it looks ugly, but it works the same way 15395 * anywhere from C++ to Python. 15397 * This code initializes locals by: 15398 * * random values determined during code 15399 * generation - on first subroutine call 15400 * * values from previous call - on subsequent calls 15402 if( state->rstate.stage>=0 ) 15404 n = state->rstate.ia.ptr.p_int[0]; 15405 m = state->rstate.ia.ptr.p_int[1]; 15406 i = state->rstate.ia.ptr.p_int[2]; 15407 j = state->rstate.ia.ptr.p_int[3]; 15408 badbfgsits = state->rstate.ia.ptr.p_int[4]; 15409 nextaction = state->rstate.ia.ptr.p_int[5]; 15410 mcinfo = state->rstate.ia.ptr.p_int[6]; 15411 actstatus = state->rstate.ia.ptr.p_int[7]; 15412 ic = state->rstate.ia.ptr.p_int[8]; 15413 b = state->rstate.ba.ptr.p_bool[0]; 15414 v = state->rstate.ra.ptr.p_double[0]; 15415 vv = state->rstate.ra.ptr.p_double[1]; 15416 penalty = state->rstate.ra.ptr.p_double[2]; 15417 ginit = state->rstate.ra.ptr.p_double[3]; 15418 gdecay = state->rstate.ra.ptr.p_double[4]; 15438 if( state->rstate.stage==0 ) 15442 if( state->rstate.stage==1 ) 15446 if( state->rstate.stage==2 ) 15450 if( state->rstate.stage==3 ) 15454 if( state->rstate.stage==4 ) 15458 if( state->rstate.stage==5 ) 15462 if( state->rstate.stage==6 ) 15466 if( state->rstate.stage==7 ) 15470 if( state->rstate.stage==8 ) 15474 if( state->rstate.stage==9 ) 15478 if( state->rstate.stage==10 ) 15482 if( state->rstate.stage==11 ) 15486 if( state->rstate.stage==12 ) 15490 if( state->rstate.stage==13 ) 15494 if( state->rstate.stage==14 ) 15498 if( state->rstate.stage==15 ) 15502 if( state->rstate.stage==16 ) 15506 if( state->rstate.stage==17 ) 15510 if( state->rstate.stage==18 ) 15514 if( state->rstate.stage==19 ) 15518 if( state->rstate.stage==20 ) 15522 if( state->rstate.stage==21 ) 15526 if( state->rstate.stage==22 ) 15530 if( state->rstate.stage==23 ) 15534 if( state->rstate.stage==24 ) 15538 if( state->rstate.stage==25 ) 15542 if( state->rstate.stage==26 ) 15546 if( state->rstate.stage==27 ) 15550 if( state->rstate.stage==28 ) 15554 if( state->rstate.stage==29 ) 15558 if( state->rstate.stage==30 ) 15562 if( state->rstate.stage==31 ) 15566 if( state->rstate.stage==32 ) 15570 if( state->rstate.stage==33 ) 15574 if( state->rstate.stage==34 ) 15578 if( state->rstate.stage==35 ) 15582 if( state->rstate.stage==36 ) 15586 if( state->rstate.stage==37 ) 15590 if( state->rstate.stage==38 ) 15594 if( state->rstate.stage==39 ) 15598 if( state->rstate.stage==40 ) 15602 if( state->rstate.stage==41 ) 15612 * Algorithm parameters: 15613 * * M number of L-BFGS corrections. 15614 * This coefficient remains fixed during iterations. 15615 * * GDecay desired decrease of constrained gradient during L-BFGS iterations. 15616 * This coefficient is decreased after each L-BFGS round until 15617 * it reaches minimum decay. 15619 m = ae_minint(5, state->nmain, _state); 15620 gdecay = minbleic_initialdecay; 15626 state->repterminationtype = 0; 15627 state->repinneriterationscount = 0; 15628 state->repouteriterationscount = 0; 15629 state->repnfev = 0; 15630 state->repvaridx = -1; 15631 state->repdebugeqerr = 0.0; 15632 state->repdebugfs = _state->v_nan; 15633 state->repdebugff = _state->v_nan; 15634 state->repdebugdx = _state->v_nan; 15635 if( ae_fp_neq(state->stpmax,0)&&state->prectype!=0 ) 15637 state->repterminationtype = -10; 15641 rvectorsetlengthatleast(&state->rho, m, _state); 15642 rvectorsetlengthatleast(&state->theta, m, _state); 15643 rmatrixsetlengthatleast(&state->yk, m, n, _state); 15644 rmatrixsetlengthatleast(&state->sk, m, n, _state); 15647 * Fill TmpPrec with current preconditioner 15649 rvectorsetlengthatleast(&state->tmpprec, n, _state); 15650 for(i=0; i<=n-1; i++) 15652 if( state->prectype==2 ) 15654 state->tmpprec.ptr.p_double[i] = state->diagh.ptr.p_double[i]; 15657 if( state->prectype==3 ) 15659 state->tmpprec.ptr.p_double[i] = 1/ae_sqr(state->s.ptr.p_double[i], _state); 15662 state->tmpprec.ptr.p_double[i] = 1; 15664 sassetprecdiag(&state->sas, &state->tmpprec, _state); 15667 * Start optimization 15669 if( !sasstartoptimization(&state->sas, &state->xstart, _state) ) 15671 state->repterminationtype = -3; 15677 * Check correctness of user-supplied gradient 15679 if( !(ae_fp_eq(state->diffstep,0)&&ae_fp_greater(state->teststep,0)) ) 15683 minbleic_clearrequestfields(state, _state); 15684 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 15685 state->needfg = ae_true; 15692 ae_assert(!state->hasbndl.ptr.p_bool[i]||ae_fp_greater_eq(state->sas.xc.ptr.p_double[i],state->bndl.ptr.p_double[i]), "MinBLEICIteration:
internal error(State.X is out of bounds)
", _state); 15693 ae_assert(!state->hasbndu.ptr.p_bool[i]||ae_fp_less_eq(state->sas.xc.ptr.p_double[i],state->bndu.ptr.p_double[i]), "MinBLEICIteration:
internal error(State.X is out of bounds)
", _state); 15694 v = state->x.ptr.p_double[i]; 15695 state->x.ptr.p_double[i] = v-state->teststep*state->s.ptr.p_double[i]; 15696 if( state->hasbndl.ptr.p_bool[i] ) 15698 state->x.ptr.p_double[i] = ae_maxreal(state->x.ptr.p_double[i], state->bndl.ptr.p_double[i], _state); 15700 state->xm1 = state->x.ptr.p_double[i]; 15701 state->rstate.stage = 0; 15704 state->fm1 = state->f; 15705 state->gm1 = state->g.ptr.p_double[i]; 15706 state->x.ptr.p_double[i] = v+state->teststep*state->s.ptr.p_double[i]; 15707 if( state->hasbndu.ptr.p_bool[i] ) 15709 state->x.ptr.p_double[i] = ae_minreal(state->x.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 15711 state->xp1 = state->x.ptr.p_double[i]; 15712 state->rstate.stage = 1; 15715 state->fp1 = state->f; 15716 state->gp1 = state->g.ptr.p_double[i]; 15717 state->x.ptr.p_double[i] = (state->xm1+state->xp1)/2; 15718 if( state->hasbndl.ptr.p_bool[i] ) 15720 state->x.ptr.p_double[i] = ae_maxreal(state->x.ptr.p_double[i], state->bndl.ptr.p_double[i], _state); 15722 if( state->hasbndu.ptr.p_bool[i] ) 15724 state->x.ptr.p_double[i] = ae_minreal(state->x.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 15726 state->rstate.stage = 2; 15729 state->x.ptr.p_double[i] = v; 15730 if( !derivativecheck(state->fm1, state->gm1, state->fp1, state->gp1, state->f, state->g.ptr.p_double[i], state->xp1-state->xm1, _state) ) 15732 state->repvaridx = i; 15733 state->repterminationtype = -7; 15734 sasstopoptimization(&state->sas, _state); 15741 state->needfg = ae_false; 15745 * Main cycle of BLEIC-PG algorithm 15747 state->repterminationtype = 4; 15749 state->lastgoodstep = 0; 15750 state->lastscaledgoodstep = 0; 15751 state->maxscaledgrad = 0; 15752 state->nonmonotoniccnt = n+state->nic; 15753 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 15754 minbleic_clearrequestfields(state, _state); 15755 if( ae_fp_neq(state->diffstep,0) ) 15759 state->needfg = ae_true; 15760 state->rstate.stage = 3; 15763 state->needfg = ae_false; 15766 state->needf = ae_true; 15767 state->rstate.stage = 4; 15770 state->needf = ae_false; 15772 state->fc = state->f; 15773 trimprepare(state->f, &state->trimthreshold, _state); 15774 state->repnfev = state->repnfev+1; 15781 * Report current point 15783 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 15784 state->f = state->fc; 15785 state->xupdated = ae_true; 15786 state->rstate.stage = 5; 15789 state->xupdated = ae_false; 15800 * (a) calculate unconstrained gradient 15801 * (b) determine active set 15802 * (c) update MaxScaledGrad 15804 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 15805 minbleic_clearrequestfields(state, _state); 15806 if( ae_fp_neq(state->diffstep,0) ) 15812 * Analytic gradient 15814 state->needfg = ae_true; 15815 state->rstate.stage = 6; 15818 state->needfg = ae_false; 15823 * Numerical differentiation 15825 state->needf = ae_true; 15826 state->rstate.stage = 7; 15829 state->fbase = state->f; 15836 v = state->x.ptr.p_double[i]; 15838 if( state->hasbndl.ptr.p_bool[i] ) 15840 b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); 15842 if( state->hasbndu.ptr.p_bool[i] ) 15844 b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); 15850 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 15851 state->rstate.stage = 8; 15854 state->fm2 = state->f; 15855 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 15856 state->rstate.stage = 9; 15859 state->fm1 = state->f; 15860 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 15861 state->rstate.stage = 10; 15864 state->fp1 = state->f; 15865 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 15866 state->rstate.stage = 11; 15869 state->fp2 = state->f; 15870 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 15873 state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; 15874 state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; 15875 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) 15877 state->xm1 = state->bndl.ptr.p_double[i]; 15879 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) 15881 state->xp1 = state->bndu.ptr.p_double[i]; 15883 state->x.ptr.p_double[i] = state->xm1; 15884 state->rstate.stage = 12; 15887 state->fm1 = state->f; 15888 state->x.ptr.p_double[i] = state->xp1; 15889 state->rstate.stage = 13; 15892 state->fp1 = state->f; 15893 if( ae_fp_neq(state->xm1,state->xp1) ) 15895 state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); 15899 state->g.ptr.p_double[i] = 0; 15902 state->x.ptr.p_double[i] = v; 15906 state->f = state->fbase; 15907 state->needf = ae_false; 15909 state->fc = state->f; 15910 ae_v_move(&state->gc.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 15911 sasreactivateconstraintsprec(&state->sas, &state->gc, _state); 15913 for(i=0; i<=n-1; i++) 15915 v = v+ae_sqr(state->gc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 15917 state->maxscaledgrad = ae_maxreal(state->maxscaledgrad, ae_sqrt(v, _state), _state); 15920 * Phase 2: perform steepest descent step. 15922 * NextAction control variable is set on exit from this loop: 15923 * * NextAction>0 in case we have to proceed to Phase 3 (L-BFGS step) 15924 * * NextAction<0 in case we have to proceed to Phase 1 (recalculate active set) 15925 * * NextAction=0 in case we found solution (step size or function change are small enough) 15935 * Check gradient-based stopping criteria 15937 if( ae_fp_less_eq(sasscaledconstrainednorm(&state->sas, &state->gc, _state),state->epsg) ) 15941 * Gradient is small enough, stop iterations 15943 state->repterminationtype = 4; 15949 * Calculate normalized constrained descent direction, store to D. 15950 * Try to use previous scaled step length as initial estimate for new step. 15952 * NOTE: D can be exactly zero, in this case Stp is set to 1.0 15954 sasconstraineddescentprec(&state->sas, &state->gc, &state->d, _state); 15956 for(i=0; i<=n-1; i++) 15958 v = v+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); 15960 v = ae_sqrt(v, _state); 15961 if( ae_fp_greater(state->lastscaledgoodstep,0)&&ae_fp_greater(v,0) ) 15963 state->stp = state->lastscaledgoodstep/v; 15971 * Calculate bound on step length. 15972 * Enforce user-supplied limit on step length. 15974 sasexploredirection(&state->sas, &state->d, &state->curstpmax, &state->cidx, &state->cval, _state); 15975 state->activationstep = state->curstpmax; 15976 if( state->cidx>=0&&ae_fp_eq(state->activationstep,0) ) 15978 sasimmediateactivation(&state->sas, state->cidx, state->cval, _state); 15981 if( ae_fp_greater(state->stpmax,0) ) 15983 state->curstpmax = ae_minreal(state->curstpmax, state->stpmax, _state); 15987 * Report beginning of line search (if requested by caller). 15988 * See description of the MinBLEICState for more information 15989 * about fields accessible to caller. 15991 * Caller may do following: 15992 * * change State.Stp and load better initial estimate of 15999 minbleic_clearrequestfields(state, _state); 16000 state->lsstart = ae_true; 16001 state->lbfgssearch = ae_false; 16002 state->boundedstep = state->cidx>=0; 16003 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16004 ae_v_move(&state->g.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16005 state->f = state->fc; 16006 state->rstate.stage = 14; 16009 state->lsstart = ae_false; 16013 * Perform optimization of F along XC+alpha*D. 16015 state->mcstage = 0; 16016 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16017 ae_v_move(&state->gn.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16018 state->fn = state->fc; 16019 mcsrch(n, &state->xn, &state->fn, &state->gn, &state->d, &state->stp, state->curstpmax, minbleic_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 16021 if( state->mcstage==0 ) 16027 * Enforce constraints (correction) in XN. 16028 * Copy current point from XN to X. 16030 sascorrection(&state->sas, &state->xn, &penalty, _state); 16031 for(i=0; i<=n-1; i++) 16033 state->x.ptr.p_double[i] = state->xn.ptr.p_double[i]; 16037 * Gradient, either user-provided or numerical differentiation 16039 minbleic_clearrequestfields(state, _state); 16040 if( ae_fp_neq(state->diffstep,0) ) 16046 * Analytic gradient 16048 state->needfg = ae_true; 16049 state->rstate.stage = 15; 16052 state->needfg = ae_false; 16053 state->repnfev = state->repnfev+1; 16058 * Numerical differentiation 16060 state->needf = ae_true; 16061 state->rstate.stage = 16; 16064 state->fbase = state->f; 16071 v = state->x.ptr.p_double[i]; 16073 if( state->hasbndl.ptr.p_bool[i] ) 16075 b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); 16077 if( state->hasbndu.ptr.p_bool[i] ) 16079 b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); 16085 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 16086 state->rstate.stage = 17; 16089 state->fm2 = state->f; 16090 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 16091 state->rstate.stage = 18; 16094 state->fm1 = state->f; 16095 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 16096 state->rstate.stage = 19; 16099 state->fp1 = state->f; 16100 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 16101 state->rstate.stage = 20; 16104 state->fp2 = state->f; 16105 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 16106 state->repnfev = state->repnfev+4; 16109 state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; 16110 state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; 16111 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) 16113 state->xm1 = state->bndl.ptr.p_double[i]; 16115 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) 16117 state->xp1 = state->bndu.ptr.p_double[i]; 16119 state->x.ptr.p_double[i] = state->xm1; 16120 state->rstate.stage = 21; 16123 state->fm1 = state->f; 16124 state->x.ptr.p_double[i] = state->xp1; 16125 state->rstate.stage = 22; 16128 state->fp1 = state->f; 16129 if( ae_fp_neq(state->xm1,state->xp1) ) 16131 state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); 16135 state->g.ptr.p_double[i] = 0; 16137 state->repnfev = state->repnfev+2; 16139 state->x.ptr.p_double[i] = v; 16143 state->f = state->fbase; 16144 state->needf = ae_false; 16150 * NOTE: penalty term from correction is added to FN in order 16151 * to penalize increase in infeasibility. 16153 state->fn = state->f+minbleic_penaltyfactor*state->maxscaledgrad*penalty; 16154 ae_v_move(&state->gn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16155 trimfunction(&state->fn, &state->gn, n, state->trimthreshold, _state); 16156 mcsrch(n, &state->xn, &state->fn, &state->gn, &state->d, &state->stp, state->curstpmax, minbleic_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 16161 * Handle possible failure of the line search 16163 if( mcinfo!=1&&mcinfo!=5 ) 16167 * We can not find step which decreases function value. We have 16168 * two possibilities: 16169 * (a) numerical properties of the function do not allow us to 16170 * find good solution. 16171 * (b) we are close to activation of some constraint, and it is 16172 * so close that step which activates it leads to change in 16173 * target function which is smaller than numerical noise. 16175 * Optimization algorithm must be able to handle case (b), because 16176 * inability to handle it will cause failure when algorithm 16177 * started very close to boundary of the feasible area. 16179 * In order to correctly handle such cases we allow limited amount 16180 * of small steps which increase function value. 16183 for(i=0; i<=n-1; i++) 16185 v = v+ae_sqr(state->d.ptr.p_double[i]*state->curstpmax/state->s.ptr.p_double[i], _state); 16187 v = ae_sqrt(v, _state); 16188 if( (state->cidx>=0&&ae_fp_less_eq(v,minbleic_maxnonmonotoniclen))&&state->nonmonotoniccnt>0 ) 16192 * We enforce non-monotonic step: 16193 * * Stp := CurStpMax 16195 * * XN := XC+CurStpMax*D 16196 * * non-monotonic counter is decreased 16198 state->stp = state->curstpmax; 16200 v = state->curstpmax; 16201 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16202 ae_v_addd(&state->xn.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1), v); 16203 state->nonmonotoniccnt = state->nonmonotoniccnt-1; 16209 * Numerical properties of the function does not allow us to solve problem 16211 state->repterminationtype = 7; 16218 * Current point is updated. 16220 ae_v_move(&state->xp.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16221 ae_v_move(&state->gp.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16222 state->fp = state->fc; 16223 actstatus = sasmoveto(&state->sas, &state->xn, state->cidx>=0&&ae_fp_greater_eq(state->stp,state->activationstep), state->cidx, state->cval, _state); 16224 ae_v_move(&state->gc.ptr.p_double[0], 1, &state->gn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16225 state->fc = state->fn; 16226 state->repinneriterationscount = state->repinneriterationscount+1; 16231 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16232 minbleic_clearrequestfields(state, _state); 16233 state->xupdated = ae_true; 16234 state->rstate.stage = 23; 16237 state->xupdated = ae_false; 16241 * Check for stopping. 16243 * Step, gradient and function-based stopping criteria are tested only 16244 * for steps which satisfy Wolfe conditions. 16246 * MaxIts-based stopping condition is checked for all steps 16252 * Step is small enough 16256 for(i=0; i<=n-1; i++) 16258 v = v+ae_sqr((state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i])/state->s.ptr.p_double[i], _state); 16259 vv = vv+ae_sqr(state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i], _state); 16261 v = ae_sqrt(v, _state); 16262 vv = ae_sqrt(vv, _state); 16263 if( ae_fp_less_eq(v,state->epsx) ) 16265 state->repterminationtype = 2; 16269 state->lastgoodstep = vv; 16270 minbleic_updateestimateofgoodstep(&state->lastscaledgoodstep, v, _state); 16273 * Function change is small enough 16275 if( ae_fp_less_eq(ae_fabs(state->fp-state->fc, _state),state->epsf*ae_maxreal(ae_fabs(state->fc, _state), ae_maxreal(ae_fabs(state->fp, _state), 1.0, _state), _state)) ) 16279 * Function change is small enough 16281 state->repterminationtype = 1; 16286 if( state->maxits>0&&state->repinneriterationscount>=state->maxits ) 16290 * Required number of iterations was performed 16292 state->repterminationtype = 5; 16298 * Decide where to move: 16299 * * in case only "candidate
" constraints were activated, repeat stage 2 16300 * * in case no constraints was activated, move to stage 3 16301 * * otherwise, move to stage 1 (re-evaluation of the active set) 16322 if( nextaction==0 ) 16328 * Phase 3: L-BFGS step 16330 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16331 minbleic_clearrequestfields(state, _state); 16332 if( ae_fp_neq(state->diffstep,0) ) 16338 * Analytic gradient 16340 state->needfg = ae_true; 16341 state->rstate.stage = 24; 16344 state->needfg = ae_false; 16345 state->repnfev = state->repnfev+1; 16350 * Numerical differentiation 16352 state->needf = ae_true; 16353 state->rstate.stage = 25; 16356 state->fbase = state->f; 16363 v = state->x.ptr.p_double[i]; 16365 if( state->hasbndl.ptr.p_bool[i] ) 16367 b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); 16369 if( state->hasbndu.ptr.p_bool[i] ) 16371 b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); 16377 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 16378 state->rstate.stage = 26; 16381 state->fm2 = state->f; 16382 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 16383 state->rstate.stage = 27; 16386 state->fm1 = state->f; 16387 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 16388 state->rstate.stage = 28; 16391 state->fp1 = state->f; 16392 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 16393 state->rstate.stage = 29; 16396 state->fp2 = state->f; 16397 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 16398 state->repnfev = state->repnfev+4; 16401 state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; 16402 state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; 16403 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) 16405 state->xm1 = state->bndl.ptr.p_double[i]; 16407 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) 16409 state->xp1 = state->bndu.ptr.p_double[i]; 16411 state->x.ptr.p_double[i] = state->xm1; 16412 state->rstate.stage = 30; 16415 state->fm1 = state->f; 16416 state->x.ptr.p_double[i] = state->xp1; 16417 state->rstate.stage = 31; 16420 state->fp1 = state->f; 16421 if( ae_fp_neq(state->xm1,state->xp1) ) 16423 state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); 16427 state->g.ptr.p_double[i] = 0; 16429 state->repnfev = state->repnfev+2; 16431 state->x.ptr.p_double[i] = v; 16435 state->f = state->fbase; 16436 state->needf = ae_false; 16438 state->fc = state->f; 16439 trimprepare(state->fc, &state->trimthreshold, _state); 16440 ae_v_move(&state->gc.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16441 ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16442 sasconstraineddirection(&state->sas, &state->gc, _state); 16443 sasconstraineddirectionprec(&state->sas, &state->d, _state); 16445 for(i=0; i<=n-1; i++) 16447 ginit = ginit+ae_sqr(state->gc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 16449 ginit = ae_sqrt(ginit, _state); 16458 * Main cycle: prepare to 1-D line search 16460 state->p = state->k%m; 16461 state->q = ae_minint(state->k, m-1, _state); 16466 ae_v_moveneg(&state->sk.ptr.pp_double[state->p][0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16467 ae_v_moveneg(&state->yk.ptr.pp_double[state->p][0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16470 * Try to use previous scaled step length as initial estimate for new step. 16473 for(i=0; i<=n-1; i++) 16475 v = v+ae_sqr(state->d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); 16477 v = ae_sqrt(v, _state); 16478 if( ae_fp_greater(state->lastscaledgoodstep,0)&&ae_fp_greater(v,0) ) 16480 state->stp = state->lastscaledgoodstep/v; 16488 * Calculate bound on step length 16490 sasexploredirection(&state->sas, &state->d, &state->curstpmax, &state->cidx, &state->cval, _state); 16491 state->activationstep = state->curstpmax; 16492 if( state->cidx>=0&&ae_fp_eq(state->activationstep,0) ) 16496 if( ae_fp_greater(state->stpmax,0) ) 16498 v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16499 v = ae_sqrt(v, _state); 16500 if( ae_fp_greater(v,0) ) 16502 state->curstpmax = ae_minreal(state->curstpmax, state->stpmax/v, _state); 16507 * Report beginning of line search (if requested by caller). 16508 * See description of the MinBLEICState for more information 16509 * about fields accessible to caller. 16511 * Caller may do following: 16512 * * change State.Stp and load better initial estimate of 16514 * Caller may not terminate algorithm. 16520 minbleic_clearrequestfields(state, _state); 16521 state->lsstart = ae_true; 16522 state->lbfgssearch = ae_true; 16523 state->boundedstep = state->cidx>=0; 16524 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16525 state->rstate.stage = 32; 16528 state->lsstart = ae_false; 16532 * Minimize F(x+alpha*d) 16534 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16535 ae_v_move(&state->gn.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16536 state->fn = state->fc; 16537 state->mcstage = 0; 16538 mcsrch(n, &state->xn, &state->fn, &state->gn, &state->d, &state->stp, state->curstpmax, minbleic_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 16540 if( state->mcstage==0 ) 16546 * Perform correction (constraints are enforced) 16549 sascorrection(&state->sas, &state->xn, &penalty, _state); 16550 for(i=0; i<=n-1; i++) 16552 state->x.ptr.p_double[i] = state->xn.ptr.p_double[i]; 16556 * Gradient, either user-provided or numerical differentiation 16558 minbleic_clearrequestfields(state, _state); 16559 if( ae_fp_neq(state->diffstep,0) ) 16565 * Analytic gradient 16567 state->needfg = ae_true; 16568 state->rstate.stage = 33; 16571 state->needfg = ae_false; 16572 state->repnfev = state->repnfev+1; 16577 * Numerical differentiation 16579 state->needf = ae_true; 16580 state->rstate.stage = 34; 16583 state->fbase = state->f; 16590 v = state->x.ptr.p_double[i]; 16592 if( state->hasbndl.ptr.p_bool[i] ) 16594 b = b||ae_fp_less(v-state->diffstep*state->s.ptr.p_double[i],state->bndl.ptr.p_double[i]); 16596 if( state->hasbndu.ptr.p_bool[i] ) 16598 b = b||ae_fp_greater(v+state->diffstep*state->s.ptr.p_double[i],state->bndu.ptr.p_double[i]); 16604 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 16605 state->rstate.stage = 35; 16608 state->fm2 = state->f; 16609 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 16610 state->rstate.stage = 36; 16613 state->fm1 = state->f; 16614 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 16615 state->rstate.stage = 37; 16618 state->fp1 = state->f; 16619 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 16620 state->rstate.stage = 38; 16623 state->fp2 = state->f; 16624 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 16625 state->repnfev = state->repnfev+4; 16628 state->xm1 = v-state->diffstep*state->s.ptr.p_double[i]; 16629 state->xp1 = v+state->diffstep*state->s.ptr.p_double[i]; 16630 if( state->hasbndl.ptr.p_bool[i]&&ae_fp_less(state->xm1,state->bndl.ptr.p_double[i]) ) 16632 state->xm1 = state->bndl.ptr.p_double[i]; 16634 if( state->hasbndu.ptr.p_bool[i]&&ae_fp_greater(state->xp1,state->bndu.ptr.p_double[i]) ) 16636 state->xp1 = state->bndu.ptr.p_double[i]; 16638 state->x.ptr.p_double[i] = state->xm1; 16639 state->rstate.stage = 39; 16642 state->fm1 = state->f; 16643 state->x.ptr.p_double[i] = state->xp1; 16644 state->rstate.stage = 40; 16647 state->fp1 = state->f; 16648 if( ae_fp_neq(state->xm1,state->xp1) ) 16650 state->g.ptr.p_double[i] = (state->fp1-state->fm1)/(state->xp1-state->xm1); 16654 state->g.ptr.p_double[i] = 0; 16656 state->repnfev = state->repnfev+2; 16658 state->x.ptr.p_double[i] = v; 16662 state->f = state->fbase; 16663 state->needf = ae_false; 16669 * NOTE: penalty term from correction is added to FN in order 16670 * to penalize increase in infeasibility. 16672 state->fn = state->f+minbleic_penaltyfactor*state->maxscaledgrad*penalty; 16673 ae_v_move(&state->gn.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16674 sasconstraineddirection(&state->sas, &state->gn, _state); 16675 trimfunction(&state->fn, &state->gn, n, state->trimthreshold, _state); 16676 mcsrch(n, &state->xn, &state->fn, &state->gn, &state->d, &state->stp, state->curstpmax, minbleic_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 16679 ae_v_add(&state->sk.ptr.pp_double[state->p][0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16680 ae_v_add(&state->yk.ptr.pp_double[state->p][0], 1, &state->gn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16683 * Handle possible failure of the line search 16685 if( mcinfo!=1&&mcinfo!=5 ) 16691 * Current point is updated. 16693 ae_v_move(&state->xp.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16694 ae_v_move(&state->gp.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16695 state->fp = state->fc; 16696 actstatus = sasmoveto(&state->sas, &state->xn, state->cidx>=0&&ae_fp_greater_eq(state->stp,state->activationstep), state->cidx, state->cval, _state); 16697 ae_v_move(&state->gc.ptr.p_double[0], 1, &state->gn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16698 state->fc = state->fn; 16703 ae_v_move(&state->x.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16704 minbleic_clearrequestfields(state, _state); 16705 state->xupdated = ae_true; 16706 state->rstate.stage = 41; 16709 state->xupdated = ae_false; 16711 state->repinneriterationscount = state->repinneriterationscount+1; 16714 * Update length of the good step 16720 for(i=0; i<=n-1; i++) 16722 v = v+ae_sqr((state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i])/state->s.ptr.p_double[i], _state); 16723 vv = vv+ae_sqr(state->sas.xc.ptr.p_double[i]-state->xp.ptr.p_double[i], _state); 16725 state->lastgoodstep = ae_sqrt(vv, _state); 16726 minbleic_updateestimateofgoodstep(&state->lastscaledgoodstep, ae_sqrt(v, _state), _state); 16730 * Termination of the L-BFGS algorithm: 16731 * a) line search was performed with activation of constraint 16732 * b) scaled gradient decreased below GDecay 16733 * c) iterations counter >= MaxIts 16740 for(i=0; i<=n-1; i++) 16742 v = v+ae_sqr(state->gc.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 16744 if( ae_fp_less(ae_sqrt(v, _state),gdecay*ginit) ) 16748 if( state->maxits>0&&state->repinneriterationscount>=state->maxits ) 16754 * Update L-BFGS model: 16755 * * calculate Rho[k] 16756 * * calculate d(k+1) = -H(k+1)*g(k+1) 16757 * (use constrained preconditioner to perform multiplication) 16759 v = ae_v_dotproduct(&state->yk.ptr.pp_double[state->p][0], 1, &state->sk.ptr.pp_double[state->p][0], 1, ae_v_len(0,n-1)); 16760 vv = ae_v_dotproduct(&state->yk.ptr.pp_double[state->p][0], 1, &state->yk.ptr.pp_double[state->p][0], 1, ae_v_len(0,n-1)); 16761 if( ae_fp_eq(v,0)||ae_fp_eq(vv,0) ) 16765 state->rho.ptr.p_double[state->p] = 1/v; 16766 ae_v_move(&state->work.ptr.p_double[0], 1, &state->gn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16767 for(i=state->k; i>=state->k-state->q; i--) 16770 v = ae_v_dotproduct(&state->sk.ptr.pp_double[ic][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16771 state->theta.ptr.p_double[ic] = v; 16772 vv = v*state->rho.ptr.p_double[ic]; 16773 ae_v_subd(&state->work.ptr.p_double[0], 1, &state->yk.ptr.pp_double[ic][0], 1, ae_v_len(0,n-1), vv); 16775 sasconstraineddirectionprec(&state->sas, &state->work, _state); 16776 for(i=state->k-state->q; i<=state->k; i++) 16779 v = ae_v_dotproduct(&state->yk.ptr.pp_double[ic][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16780 vv = state->rho.ptr.p_double[ic]*(-v+state->theta.ptr.p_double[ic]); 16781 ae_v_addd(&state->work.ptr.p_double[0], 1, &state->sk.ptr.pp_double[ic][0], 1, ae_v_len(0,n-1), vv); 16783 ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); 16784 state->k = state->k+1; 16789 * Decrease decay coefficient. Subsequent L-BFGS stages will 16790 * have more stringent stopping criteria. 16792 gdecay = ae_maxreal(gdecay*minbleic_decaycorrection, minbleic_mindecay, _state); 16795 sasstopoptimization(&state->sas, _state); 16796 state->repouteriterationscount = 1; 16805 state->rstate.ia.ptr.p_int[0] = n; 16806 state->rstate.ia.ptr.p_int[1] = m; 16807 state->rstate.ia.ptr.p_int[2] = i; 16808 state->rstate.ia.ptr.p_int[3] = j; 16809 state->rstate.ia.ptr.p_int[4] = badbfgsits; 16810 state->rstate.ia.ptr.p_int[5] = nextaction; 16811 state->rstate.ia.ptr.p_int[6] = mcinfo; 16812 state->rstate.ia.ptr.p_int[7] = actstatus; 16813 state->rstate.ia.ptr.p_int[8] = ic; 16814 state->rstate.ba.ptr.p_bool[0] = b; 16815 state->rstate.ra.ptr.p_double[0] = v; 16816 state->rstate.ra.ptr.p_double[1] = vv; 16817 state->rstate.ra.ptr.p_double[2] = penalty; 16818 state->rstate.ra.ptr.p_double[3] = ginit; 16819 state->rstate.ra.ptr.p_double[4] = gdecay; 16824 /************************************************************************* 16828 State - algorithm state 16831 X - array[0..N-1], solution 16832 Rep - optimization report. You should check Rep.TerminationType 16833 in order to distinguish successful termination from 16835 * -7 gradient verification failed. 16836 See MinBLEICSetGradientCheck() for more information. 16837 * -3 inconsistent constraints. Feasible point is 16838 either nonexistent or too hard to find. Try to 16839 restart optimizer with better initial approximation 16840 * 1 relative function improvement is no more than EpsF. 16841 * 2 scaled step is no more than EpsX. 16842 * 4 scaled gradient norm is no more than EpsG. 16843 * 5 MaxIts steps was taken 16844 More information about fields of this structure can be 16845 found in the comments on MinBLEICReport datatype. 16848 Copyright 28.11.2010 by Bochkanov Sergey 16849 *************************************************************************/ 16850 void minbleicresults(minbleicstate* state, 16851 /* Real */ ae_vector* x, 16852 minbleicreport* rep, 16856 ae_vector_clear(x); 16857 _minbleicreport_clear(rep); 16859 minbleicresultsbuf(state, x, rep, _state); 16863 /************************************************************************* 16866 Buffered implementation of MinBLEICResults() which uses pre-allocated buffer 16867 to store X[]. If buffer size is too small, it resizes buffer. It is 16868 intended to be used in the inner cycles of performance critical algorithms 16869 where array reallocation penalty is too large to be ignored. 16872 Copyright 28.11.2010 by Bochkanov Sergey 16873 *************************************************************************/ 16874 void minbleicresultsbuf(minbleicstate* state, 16875 /* Real */ ae_vector* x, 16876 minbleicreport* rep, 16882 if( x->cnt<state->nmain ) 16884 ae_vector_set_length(x, state->nmain, _state); 16886 rep->iterationscount = state->repinneriterationscount; 16887 rep->inneriterationscount = state->repinneriterationscount; 16888 rep->outeriterationscount = state->repouteriterationscount; 16889 rep->nfev = state->repnfev; 16890 rep->varidx = state->repvaridx; 16891 rep->terminationtype = state->repterminationtype; 16892 if( state->repterminationtype>0 ) 16894 ae_v_move(&x->ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,state->nmain-1)); 16898 for(i=0; i<=state->nmain-1; i++) 16900 x->ptr.p_double[i] = _state->v_nan; 16903 rep->debugeqerr = state->repdebugeqerr; 16904 rep->debugfs = state->repdebugfs; 16905 rep->debugff = state->repdebugff; 16906 rep->debugdx = state->repdebugdx; 16907 rep->debugfeasqpits = state->repdebugfeasqpits; 16908 rep->debugfeasgpaits = state->repdebugfeasgpaits; 16912 /************************************************************************* 16913 This subroutine restarts algorithm from new point. 16914 All optimization parameters (including constraints) are left unchanged. 16916 This function allows to solve multiple optimization problems (which 16917 must have same number of dimensions) without object reallocation penalty. 16920 State - structure previously allocated with MinBLEICCreate call. 16921 X - new starting point. 16924 Copyright 28.11.2010 by Bochkanov Sergey 16925 *************************************************************************/ 16926 void minbleicrestartfrom(minbleicstate* state, 16927 /* Real */ ae_vector* x, 16936 * First, check for errors in the inputs 16938 ae_assert(x->cnt>=n, "MinBLEICRestartFrom: Length(X)<N
", _state); 16939 ae_assert(isfinitevector(x, n, _state), "MinBLEICRestartFrom: X contains infinite
or NaN values!
", _state); 16944 ae_v_move(&state->xstart.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); 16947 * prepare RComm facilities 16949 ae_vector_set_length(&state->rstate.ia, 8+1, _state); 16950 ae_vector_set_length(&state->rstate.ba, 0+1, _state); 16951 ae_vector_set_length(&state->rstate.ra, 4+1, _state); 16952 state->rstate.stage = -1; 16953 minbleic_clearrequestfields(state, _state); 16954 sasstopoptimization(&state->sas, _state); 16958 /************************************************************************* 16959 This subroutine finalizes internal structures after emergency termination 16960 from State.LSStart report (see comments on MinBLEICState for more information). 16963 State - structure after exit from LSStart report 16966 Copyright 28.11.2010 by Bochkanov Sergey 16967 *************************************************************************/ 16968 void minbleicemergencytermination(minbleicstate* state, ae_state *_state) 16972 sasstopoptimization(&state->sas, _state); 16976 /************************************************************************* 16977 This subroutine turns on verification of the user-supplied analytic 16979 * user calls this subroutine before optimization begins 16980 * MinBLEICOptimize() is called 16981 * prior to actual optimization, for each component of parameters being 16982 optimized X[i] algorithm performs following steps: 16983 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], 16984 where X[i] is i-th component of the initial point and S[i] is a scale 16986 * if needed, steps are bounded with respect to constraints on X[] 16987 * F(X) is evaluated at these trial points 16988 * we perform one more evaluation in the middle point of the interval 16989 * we build cubic model using function values and derivatives at trial 16990 points and we compare its prediction with actual value in the middle 16992 * in case difference between prediction and actual value is higher than 16993 some predetermined threshold, algorithm stops with completion code -7; 16994 Rep.VarIdx is set to index of the parameter with incorrect derivative. 16995 * after verification is over, algorithm proceeds to the actual optimization. 16997 NOTE 1: verification needs N (parameters count) gradient evaluations. It 16998 is very costly and you should use it only for low dimensional 16999 problems, when you want to be sure that you've correctly 17000 calculated analytic derivatives. You should not use it in the 17001 production code (unless you want to check derivatives provided by 17004 NOTE 2: you should carefully choose TestStep. Value which is too large 17005 (so large that function behaviour is significantly non-cubic) will 17006 lead to false alarms. You may use different step for different 17007 parameters by means of setting scale with MinBLEICSetScale(). 17009 NOTE 3: this function may lead to false positives. In case it reports that 17010 I-th derivative was calculated incorrectly, you may decrease test 17011 step and try one more time - maybe your function changes too 17012 sharply and your step is too large for such rapidly chanding 17016 State - structure used to store algorithm state 17017 TestStep - verification step: 17018 * TestStep=0 turns verification off 17019 * TestStep>0 activates verification 17022 Copyright 15.06.2012 by Bochkanov Sergey 17023 *************************************************************************/ 17024 void minbleicsetgradientcheck(minbleicstate* state, 17030 ae_assert(ae_isfinite(teststep, _state), "MinBLEICSetGradientCheck: TestStep contains NaN
or Infinite
", _state); 17031 ae_assert(ae_fp_greater_eq(teststep,0), "MinBLEICSetGradientCheck: invalid argument TestStep(TestStep<0)
", _state); 17032 state->teststep = teststep; 17036 /************************************************************************* 17037 Clears request fileds (to be sure that we don't forget to clear something) 17038 *************************************************************************/ 17039 static void minbleic_clearrequestfields(minbleicstate* state, 17044 state->needf = ae_false; 17045 state->needfg = ae_false; 17046 state->xupdated = ae_false; 17047 state->lsstart = ae_false; 17051 /************************************************************************* 17052 Internal initialization subroutine 17053 *************************************************************************/ 17054 static void minbleic_minbleicinitinternal(ae_int_t n, 17055 /* Real */ ae_vector* x, 17057 minbleicstate* state, 17060 ae_frame _frame_block; 17065 ae_frame_make(_state, &_frame_block); 17066 ae_matrix_init(&c, 0, 0, DT_REAL, _state, ae_true); 17067 ae_vector_init(&ct, 0, DT_INT, _state, ae_true); 17073 state->teststep = 0; 17075 state->diffstep = diffstep; 17076 sasinit(n, &state->sas, _state); 17077 ae_vector_set_length(&state->bndl, n, _state); 17078 ae_vector_set_length(&state->hasbndl, n, _state); 17079 ae_vector_set_length(&state->bndu, n, _state); 17080 ae_vector_set_length(&state->hasbndu, n, _state); 17081 ae_vector_set_length(&state->xstart, n, _state); 17082 ae_vector_set_length(&state->gc, n, _state); 17083 ae_vector_set_length(&state->xn, n, _state); 17084 ae_vector_set_length(&state->gn, n, _state); 17085 ae_vector_set_length(&state->xp, n, _state); 17086 ae_vector_set_length(&state->gp, n, _state); 17087 ae_vector_set_length(&state->d, n, _state); 17088 ae_vector_set_length(&state->s, n, _state); 17089 ae_vector_set_length(&state->x, n, _state); 17090 ae_vector_set_length(&state->g, n, _state); 17091 ae_vector_set_length(&state->work, n, _state); 17092 for(i=0; i<=n-1; i++) 17094 state->bndl.ptr.p_double[i] = _state->v_neginf; 17095 state->hasbndl.ptr.p_bool[i] = ae_false; 17096 state->bndu.ptr.p_double[i] = _state->v_posinf; 17097 state->hasbndu.ptr.p_bool[i] = ae_false; 17098 state->s.ptr.p_double[i] = 1.0; 17100 minbleicsetlc(state, &c, &ct, 0, _state); 17101 minbleicsetcond(state, 0.0, 0.0, 0.0, 0, _state); 17102 minbleicsetxrep(state, ae_false, _state); 17103 minbleicsetdrep(state, ae_false, _state); 17104 minbleicsetstpmax(state, 0.0, _state); 17105 minbleicsetprecdefault(state, _state); 17106 minbleicrestartfrom(state, x, _state); 17107 ae_frame_leave(_state); 17111 /************************************************************************* 17112 This subroutine updates estimate of the good step length given: 17113 1) previous estimate 17114 2) new length of the good step 17116 It makes sure that estimate does not change too rapidly - ratio of new and 17117 old estimates will be at least 0.01, at most 100.0 17119 In case previous estimate of good step is zero (no estimate), new estimate 17120 is used unconditionally. 17123 Copyright 16.01.2013 by Bochkanov Sergey 17124 *************************************************************************/ 17125 static void minbleic_updateestimateofgoodstep(double* estimate, 17131 if( ae_fp_eq(*estimate,0) ) 17133 *estimate = newstep; 17136 if( ae_fp_less(newstep,*estimate*0.01) ) 17138 *estimate = *estimate*0.01; 17141 if( ae_fp_greater(newstep,*estimate*100) ) 17143 *estimate = *estimate*100; 17146 *estimate = newstep; 17150 ae_bool _minbleicstate_init(void* _p, ae_state *_state, ae_bool make_automatic) 17152 minbleicstate *p = (minbleicstate*)_p; 17153 ae_touch_ptr((void*)p); 17154 if( !_sactiveset_init(&p->sas, _state, make_automatic) ) 17156 if( !ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic) ) 17158 if( !ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic) ) 17160 if( !ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic) ) 17162 if( !ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic) ) 17164 if( !_rcommstate_init(&p->rstate, _state, make_automatic) ) 17166 if( !ae_vector_init(&p->gc, 0, DT_REAL, _state, make_automatic) ) 17168 if( !ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic) ) 17170 if( !ae_vector_init(&p->gn, 0, DT_REAL, _state, make_automatic) ) 17172 if( !ae_vector_init(&p->xp, 0, DT_REAL, _state, make_automatic) ) 17174 if( !ae_vector_init(&p->gp, 0, DT_REAL, _state, make_automatic) ) 17176 if( !ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic) ) 17178 if( !ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic) ) 17180 if( !ae_vector_init(&p->hasbndl, 0, DT_BOOL, _state, make_automatic) ) 17182 if( !ae_vector_init(&p->hasbndu, 0, DT_BOOL, _state, make_automatic) ) 17184 if( !ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic) ) 17186 if( !ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic) ) 17188 if( !ae_vector_init(&p->xstart, 0, DT_REAL, _state, make_automatic) ) 17190 if( !_snnlssolver_init(&p->solver, _state, make_automatic) ) 17192 if( !ae_vector_init(&p->tmpprec, 0, DT_REAL, _state, make_automatic) ) 17194 if( !ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic) ) 17196 if( !_linminstate_init(&p->lstate, _state, make_automatic) ) 17198 if( !ae_vector_init(&p->rho, 0, DT_REAL, _state, make_automatic) ) 17200 if( !ae_matrix_init(&p->yk, 0, 0, DT_REAL, _state, make_automatic) ) 17202 if( !ae_matrix_init(&p->sk, 0, 0, DT_REAL, _state, make_automatic) ) 17204 if( !ae_vector_init(&p->theta, 0, DT_REAL, _state, make_automatic) ) 17210 ae_bool _minbleicstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 17212 minbleicstate *dst = (minbleicstate*)_dst; 17213 minbleicstate *src = (minbleicstate*)_src; 17214 dst->nmain = src->nmain; 17215 dst->nslack = src->nslack; 17216 dst->epsg = src->epsg; 17217 dst->epsf = src->epsf; 17218 dst->epsx = src->epsx; 17219 dst->maxits = src->maxits; 17220 dst->xrep = src->xrep; 17221 dst->drep = src->drep; 17222 dst->stpmax = src->stpmax; 17223 dst->diffstep = src->diffstep; 17224 if( !_sactiveset_init_copy(&dst->sas, &src->sas, _state, make_automatic) ) 17226 if( !ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic) ) 17228 dst->prectype = src->prectype; 17229 if( !ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic) ) 17231 if( !ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic) ) 17234 if( !ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic) ) 17236 dst->needf = src->needf; 17237 dst->needfg = src->needfg; 17238 dst->xupdated = src->xupdated; 17239 dst->lsstart = src->lsstart; 17240 dst->lbfgssearch = src->lbfgssearch; 17241 dst->boundedstep = src->boundedstep; 17242 dst->teststep = src->teststep; 17243 if( !_rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic) ) 17245 if( !ae_vector_init_copy(&dst->gc, &src->gc, _state, make_automatic) ) 17247 if( !ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic) ) 17249 if( !ae_vector_init_copy(&dst->gn, &src->gn, _state, make_automatic) ) 17251 if( !ae_vector_init_copy(&dst->xp, &src->xp, _state, make_automatic) ) 17253 if( !ae_vector_init_copy(&dst->gp, &src->gp, _state, make_automatic) ) 17258 if( !ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic) ) 17260 if( !ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic) ) 17262 dst->nec = src->nec; 17263 dst->nic = src->nic; 17264 dst->lastgoodstep = src->lastgoodstep; 17265 dst->lastscaledgoodstep = src->lastscaledgoodstep; 17266 dst->maxscaledgrad = src->maxscaledgrad; 17267 if( !ae_vector_init_copy(&dst->hasbndl, &src->hasbndl, _state, make_automatic) ) 17269 if( !ae_vector_init_copy(&dst->hasbndu, &src->hasbndu, _state, make_automatic) ) 17271 if( !ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic) ) 17273 if( !ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic) ) 17275 dst->repinneriterationscount = src->repinneriterationscount; 17276 dst->repouteriterationscount = src->repouteriterationscount; 17277 dst->repnfev = src->repnfev; 17278 dst->repvaridx = src->repvaridx; 17279 dst->repterminationtype = src->repterminationtype; 17280 dst->repdebugeqerr = src->repdebugeqerr; 17281 dst->repdebugfs = src->repdebugfs; 17282 dst->repdebugff = src->repdebugff; 17283 dst->repdebugdx = src->repdebugdx; 17284 dst->repdebugfeasqpits = src->repdebugfeasqpits; 17285 dst->repdebugfeasgpaits = src->repdebugfeasgpaits; 17286 if( !ae_vector_init_copy(&dst->xstart, &src->xstart, _state, make_automatic) ) 17288 if( !_snnlssolver_init_copy(&dst->solver, &src->solver, _state, make_automatic) ) 17290 dst->fbase = src->fbase; 17291 dst->fm2 = src->fm2; 17292 dst->fm1 = src->fm1; 17293 dst->fp1 = src->fp1; 17294 dst->fp2 = src->fp2; 17295 dst->xm1 = src->xm1; 17296 dst->xp1 = src->xp1; 17297 dst->gm1 = src->gm1; 17298 dst->gp1 = src->gp1; 17299 dst->cidx = src->cidx; 17300 dst->cval = src->cval; 17301 if( !ae_vector_init_copy(&dst->tmpprec, &src->tmpprec, _state, make_automatic) ) 17303 dst->nfev = src->nfev; 17304 dst->mcstage = src->mcstage; 17305 dst->stp = src->stp; 17306 dst->curstpmax = src->curstpmax; 17307 dst->activationstep = src->activationstep; 17308 if( !ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic) ) 17310 if( !_linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic) ) 17312 dst->trimthreshold = src->trimthreshold; 17313 dst->nonmonotoniccnt = src->nonmonotoniccnt; 17317 if( !ae_vector_init_copy(&dst->rho, &src->rho, _state, make_automatic) ) 17319 if( !ae_matrix_init_copy(&dst->yk, &src->yk, _state, make_automatic) ) 17321 if( !ae_matrix_init_copy(&dst->sk, &src->sk, _state, make_automatic) ) 17323 if( !ae_vector_init_copy(&dst->theta, &src->theta, _state, make_automatic) ) 17329 void _minbleicstate_clear(void* _p) 17331 minbleicstate *p = (minbleicstate*)_p; 17332 ae_touch_ptr((void*)p); 17333 _sactiveset_clear(&p->sas); 17334 ae_vector_clear(&p->s); 17335 ae_vector_clear(&p->diagh); 17336 ae_vector_clear(&p->x); 17337 ae_vector_clear(&p->g); 17338 _rcommstate_clear(&p->rstate); 17339 ae_vector_clear(&p->gc); 17340 ae_vector_clear(&p->xn); 17341 ae_vector_clear(&p->gn); 17342 ae_vector_clear(&p->xp); 17343 ae_vector_clear(&p->gp); 17344 ae_vector_clear(&p->d); 17345 ae_matrix_clear(&p->cleic); 17346 ae_vector_clear(&p->hasbndl); 17347 ae_vector_clear(&p->hasbndu); 17348 ae_vector_clear(&p->bndl); 17349 ae_vector_clear(&p->bndu); 17350 ae_vector_clear(&p->xstart); 17351 _snnlssolver_clear(&p->solver); 17352 ae_vector_clear(&p->tmpprec); 17353 ae_vector_clear(&p->work); 17354 _linminstate_clear(&p->lstate); 17355 ae_vector_clear(&p->rho); 17356 ae_matrix_clear(&p->yk); 17357 ae_matrix_clear(&p->sk); 17358 ae_vector_clear(&p->theta); 17362 void _minbleicstate_destroy(void* _p) 17364 minbleicstate *p = (minbleicstate*)_p; 17365 ae_touch_ptr((void*)p); 17366 _sactiveset_destroy(&p->sas); 17367 ae_vector_destroy(&p->s); 17368 ae_vector_destroy(&p->diagh); 17369 ae_vector_destroy(&p->x); 17370 ae_vector_destroy(&p->g); 17371 _rcommstate_destroy(&p->rstate); 17372 ae_vector_destroy(&p->gc); 17373 ae_vector_destroy(&p->xn); 17374 ae_vector_destroy(&p->gn); 17375 ae_vector_destroy(&p->xp); 17376 ae_vector_destroy(&p->gp); 17377 ae_vector_destroy(&p->d); 17378 ae_matrix_destroy(&p->cleic); 17379 ae_vector_destroy(&p->hasbndl); 17380 ae_vector_destroy(&p->hasbndu); 17381 ae_vector_destroy(&p->bndl); 17382 ae_vector_destroy(&p->bndu); 17383 ae_vector_destroy(&p->xstart); 17384 _snnlssolver_destroy(&p->solver); 17385 ae_vector_destroy(&p->tmpprec); 17386 ae_vector_destroy(&p->work); 17387 _linminstate_destroy(&p->lstate); 17388 ae_vector_destroy(&p->rho); 17389 ae_matrix_destroy(&p->yk); 17390 ae_matrix_destroy(&p->sk); 17391 ae_vector_destroy(&p->theta); 17395 ae_bool _minbleicreport_init(void* _p, ae_state *_state, ae_bool make_automatic) 17397 minbleicreport *p = (minbleicreport*)_p; 17398 ae_touch_ptr((void*)p); 17403 ae_bool _minbleicreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 17405 minbleicreport *dst = (minbleicreport*)_dst; 17406 minbleicreport *src = (minbleicreport*)_src; 17407 dst->iterationscount = src->iterationscount; 17408 dst->nfev = src->nfev; 17409 dst->varidx = src->varidx; 17410 dst->terminationtype = src->terminationtype; 17411 dst->debugeqerr = src->debugeqerr; 17412 dst->debugfs = src->debugfs; 17413 dst->debugff = src->debugff; 17414 dst->debugdx = src->debugdx; 17415 dst->debugfeasqpits = src->debugfeasqpits; 17416 dst->debugfeasgpaits = src->debugfeasgpaits; 17417 dst->inneriterationscount = src->inneriterationscount; 17418 dst->outeriterationscount = src->outeriterationscount; 17423 void _minbleicreport_clear(void* _p) 17425 minbleicreport *p = (minbleicreport*)_p; 17426 ae_touch_ptr((void*)p); 17430 void _minbleicreport_destroy(void* _p) 17432 minbleicreport *p = (minbleicreport*)_p; 17433 ae_touch_ptr((void*)p); 17439 /************************************************************************* 17440 LIMITED MEMORY BFGS METHOD FOR LARGE SCALE OPTIMIZATION 17443 The subroutine minimizes function F(x) of N arguments by using a quasi- 17444 Newton method (LBFGS scheme) which is optimized to use a minimum amount 17446 The subroutine generates the approximation of an inverse Hessian matrix by 17447 using information about the last M steps of the algorithm (instead of N). 17448 It lessens a required amount of memory from a value of order N^2 to a 17449 value of order 2*N*M. 17453 Algorithm will request following information during its operation: 17454 * function value F and its gradient G (simultaneously) at given point X 17458 1. User initializes algorithm state with MinLBFGSCreate() call 17459 2. User tunes solver parameters with MinLBFGSSetCond() MinLBFGSSetStpMax() 17460 and other functions 17461 3. User calls MinLBFGSOptimize() function which takes algorithm state and 17462 pointer (delegate, etc.) to callback function which calculates F/G. 17463 4. User calls MinLBFGSResults() to get solution 17464 5. Optionally user may call MinLBFGSRestartFrom() to solve another problem 17465 with same N/M but another starting point and/or another function. 17466 MinLBFGSRestartFrom() allows to reuse already initialized structure. 17470 N - problem dimension. N>0 17471 M - number of corrections in the BFGS scheme of Hessian 17472 approximation update. Recommended value: 3<=M<=7. The smaller 17473 value causes worse convergence, the bigger will not cause a 17474 considerably better convergence, but will cause a fall in the 17476 X - initial solution approximation, array[0..N-1]. 17480 State - structure which stores algorithm state 17484 1. you may tune stopping conditions with MinLBFGSSetCond() function 17485 2. if target function contains exp() or other fast growing functions, and 17486 optimization algorithm makes too large steps which leads to overflow, 17487 use MinLBFGSSetStpMax() function to bound algorithm's steps. However, 17488 L-BFGS rarely needs such a tuning. 17492 Copyright 02.04.2010 by Bochkanov Sergey 17493 *************************************************************************/ 17494 void minlbfgscreate(ae_int_t n, 17496 /* Real */ ae_vector* x, 17497 minlbfgsstate* state, 17501 _minlbfgsstate_clear(state); 17503 ae_assert(n>=1, "MinLBFGSCreate: N<1!
", _state); 17504 ae_assert(m>=1, "MinLBFGSCreate: M<1
", _state); 17505 ae_assert(m<=n, "MinLBFGSCreate: M>N
", _state); 17506 ae_assert(x->cnt>=n, "MinLBFGSCreate: Length(X)<N!
", _state); 17507 ae_assert(isfinitevector(x, n, _state), "MinLBFGSCreate: X contains infinite
or NaN values!
", _state); 17508 minlbfgscreatex(n, m, x, 0, 0.0, state, _state); 17512 /************************************************************************* 17513 The subroutine is finite difference variant of MinLBFGSCreate(). It uses 17514 finite differences in order to differentiate target function. 17516 Description below contains information which is specific to this function 17517 only. We recommend to read comments on MinLBFGSCreate() in order to get 17518 more information about creation of LBFGS optimizer. 17521 N - problem dimension, N>0: 17522 * if given, only leading N elements of X are used 17523 * if not given, automatically determined from size of X 17524 M - number of corrections in the BFGS scheme of Hessian 17525 approximation update. Recommended value: 3<=M<=7. The smaller 17526 value causes worse convergence, the bigger will not cause a 17527 considerably better convergence, but will cause a fall in the 17529 X - starting point, array[0..N-1]. 17530 DiffStep- differentiation step, >0 17533 State - structure which stores algorithm state 17536 1. algorithm uses 4-point central formula for differentiation. 17537 2. differentiation step along I-th axis is equal to DiffStep*S[I] where 17538 S[] is scaling vector which can be set by MinLBFGSSetScale() call. 17539 3. we recommend you to use moderate values of differentiation step. Too 17540 large step will result in too large truncation errors, while too small 17541 step will result in too large numerical errors. 1.0E-6 can be good 17542 value to start with. 17543 4. Numerical differentiation is very inefficient - one gradient 17544 calculation needs 4*N function evaluations. This function will work for 17545 any N - either small (1...10), moderate (10...100) or large (100...). 17546 However, performance penalty will be too severe for any N's except for 17548 We should also say that code which relies on numerical differentiation 17549 is less robust and precise. LBFGS needs exact gradient values. 17550 Imprecise gradient may slow down convergence, especially on highly 17551 nonlinear problems. 17552 Thus we recommend to use this function for fast prototyping on small- 17553 dimensional problems only, and to implement analytical gradient as soon 17557 Copyright 16.05.2011 by Bochkanov Sergey 17558 *************************************************************************/ 17559 void minlbfgscreatef(ae_int_t n, 17561 /* Real */ ae_vector* x, 17563 minlbfgsstate* state, 17567 _minlbfgsstate_clear(state); 17569 ae_assert(n>=1, "MinLBFGSCreateF: N too small!
", _state); 17570 ae_assert(m>=1, "MinLBFGSCreateF: M<1
", _state); 17571 ae_assert(m<=n, "MinLBFGSCreateF: M>N
", _state); 17572 ae_assert(x->cnt>=n, "MinLBFGSCreateF: Length(X)<N!
", _state); 17573 ae_assert(isfinitevector(x, n, _state), "MinLBFGSCreateF: X contains infinite
or NaN values!
", _state); 17574 ae_assert(ae_isfinite(diffstep, _state), "MinLBFGSCreateF: DiffStep is infinite
or NaN!
", _state); 17575 ae_assert(ae_fp_greater(diffstep,0), "MinLBFGSCreateF: DiffStep is non-positive!
", _state); 17576 minlbfgscreatex(n, m, x, 0, diffstep, state, _state); 17580 /************************************************************************* 17581 This function sets stopping conditions for L-BFGS optimization algorithm. 17584 State - structure which stores algorithm state 17586 The subroutine finishes its work if the condition 17587 |v|<EpsG is satisfied, where: 17588 * |.| means Euclidian norm 17589 * v - scaled gradient vector, v[i]=g[i]*s[i] 17591 * s - scaling coefficients set by MinLBFGSSetScale() 17593 The subroutine finishes its work if on k+1-th iteration 17594 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} 17597 The subroutine finishes its work if on k+1-th iteration 17598 the condition |v|<=EpsX is fulfilled, where: 17599 * |.| means Euclidian norm 17600 * v - scaled step vector, v[i]=dx[i]/s[i] 17601 * dx - ste pvector, dx=X(k+1)-X(k) 17602 * s - scaling coefficients set by MinLBFGSSetScale() 17603 MaxIts - maximum number of iterations. If MaxIts=0, the number of 17604 iterations is unlimited. 17606 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to 17607 automatic stopping criterion selection (small EpsX). 17610 Copyright 02.04.2010 by Bochkanov Sergey 17611 *************************************************************************/ 17612 void minlbfgssetcond(minlbfgsstate* state, 17621 ae_assert(ae_isfinite(epsg, _state), "MinLBFGSSetCond: EpsG is not finite number!
", _state); 17622 ae_assert(ae_fp_greater_eq(epsg,0), "MinLBFGSSetCond: negative EpsG!
", _state); 17623 ae_assert(ae_isfinite(epsf, _state), "MinLBFGSSetCond: EpsF is not finite number!
", _state); 17624 ae_assert(ae_fp_greater_eq(epsf,0), "MinLBFGSSetCond: negative EpsF!
", _state); 17625 ae_assert(ae_isfinite(epsx, _state), "MinLBFGSSetCond: EpsX is not finite number!
", _state); 17626 ae_assert(ae_fp_greater_eq(epsx,0), "MinLBFGSSetCond: negative EpsX!
", _state); 17627 ae_assert(maxits>=0, "MinLBFGSSetCond: negative MaxIts!
", _state); 17628 if( ((ae_fp_eq(epsg,0)&&ae_fp_eq(epsf,0))&&ae_fp_eq(epsx,0))&&maxits==0 ) 17632 state->epsg = epsg; 17633 state->epsf = epsf; 17634 state->epsx = epsx; 17635 state->maxits = maxits; 17639 /************************************************************************* 17640 This function turns on/off reporting. 17643 State - structure which stores algorithm state 17644 NeedXRep- whether iteration reports are needed or not 17646 If NeedXRep is True, algorithm will call rep() callback function if it is 17647 provided to MinLBFGSOptimize(). 17651 Copyright 02.04.2010 by Bochkanov Sergey 17652 *************************************************************************/ 17653 void minlbfgssetxrep(minlbfgsstate* state, 17659 state->xrep = needxrep; 17663 /************************************************************************* 17664 This function sets maximum step length 17667 State - structure which stores algorithm state 17668 StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if 17669 you don't want to limit step length. 17671 Use this subroutine when you optimize target function which contains exp() 17672 or other fast growing functions, and optimization algorithm makes too 17673 large steps which leads to overflow. This function allows us to reject 17674 steps that are too large (and therefore expose us to the possible 17675 overflow) without actually calculating function value at the x+stp*d. 17678 Copyright 02.04.2010 by Bochkanov Sergey 17679 *************************************************************************/ 17680 void minlbfgssetstpmax(minlbfgsstate* state, 17686 ae_assert(ae_isfinite(stpmax, _state), "MinLBFGSSetStpMax: StpMax is not finite!
", _state); 17687 ae_assert(ae_fp_greater_eq(stpmax,0), "MinLBFGSSetStpMax: StpMax<0!
", _state); 17688 state->stpmax = stpmax; 17692 /************************************************************************* 17693 This function sets scaling coefficients for LBFGS optimizer. 17695 ALGLIB optimizers use scaling matrices to test stopping conditions (step 17696 size and gradient are scaled before comparison with tolerances). Scale of 17697 the I-th variable is a translation invariant measure of: 17698 a) "how large
" the variable is 17699 b) how large the step should be to make significant changes in the function 17701 Scaling is also used by finite difference variant of the optimizer - step 17702 along I-th axis is equal to DiffStep*S[I]. 17704 In most optimizers (and in the LBFGS too) scaling is NOT a form of 17705 preconditioning. It just affects stopping conditions. You should set 17706 preconditioner by separate call to one of the MinLBFGSSetPrec...() 17709 There is special preconditioning mode, however, which uses scaling 17710 coefficients to form diagonal preconditioning matrix. You can turn this 17711 mode on, if you want. But you should understand that scaling is not the 17712 same thing as preconditioning - these are two different, although related 17713 forms of tuning solver. 17716 State - structure stores algorithm state 17717 S - array[N], non-zero scaling coefficients 17718 S[i] may be negative, sign doesn't matter. 17721 Copyright 14.01.2011 by Bochkanov Sergey 17722 *************************************************************************/ 17723 void minlbfgssetscale(minlbfgsstate* state, 17724 /* Real */ ae_vector* s, 17730 ae_assert(s->cnt>=state->n, "MinLBFGSSetScale: Length(S)<N
", _state); 17731 for(i=0; i<=state->n-1; i++) 17733 ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinLBFGSSetScale: S contains infinite
or NAN elements
", _state); 17734 ae_assert(ae_fp_neq(s->ptr.p_double[i],0), "MinLBFGSSetScale: S contains
zero elements
", _state); 17735 state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); 17740 /************************************************************************* 17741 Extended subroutine for internal use only. 17743 Accepts additional parameters: 17745 Flags - additional settings: 17746 * Flags = 0 means no additional settings 17747 * Flags = 1 "do not allocate memory
". used when solving 17748 a many subsequent tasks with same N/M values. 17749 First call MUST be without this flag bit set, 17750 subsequent calls of MinLBFGS with same 17751 MinLBFGSState structure can set Flags to 1. 17752 DiffStep - numerical differentiation step 17755 Copyright 02.04.2010 by Bochkanov Sergey 17756 *************************************************************************/ 17757 void minlbfgscreatex(ae_int_t n, 17759 /* Real */ ae_vector* x, 17762 minlbfgsstate* state, 17765 ae_bool allocatemem; 17769 ae_assert(n>=1, "MinLBFGS: N too small!
", _state); 17770 ae_assert(m>=1, "MinLBFGS: M too small!
", _state); 17771 ae_assert(m<=n, "MinLBFGS: M too large!
", _state); 17776 state->teststep = 0; 17777 state->diffstep = diffstep; 17780 allocatemem = flags%2==0; 17784 ae_vector_set_length(&state->rho, m, _state); 17785 ae_vector_set_length(&state->theta, m, _state); 17786 ae_matrix_set_length(&state->yk, m, n, _state); 17787 ae_matrix_set_length(&state->sk, m, n, _state); 17788 ae_vector_set_length(&state->d, n, _state); 17789 ae_vector_set_length(&state->x, n, _state); 17790 ae_vector_set_length(&state->s, n, _state); 17791 ae_vector_set_length(&state->g, n, _state); 17792 ae_vector_set_length(&state->work, n, _state); 17794 minlbfgssetcond(state, 0, 0, 0, 0, _state); 17795 minlbfgssetxrep(state, ae_false, _state); 17796 minlbfgssetstpmax(state, 0, _state); 17797 minlbfgsrestartfrom(state, x, _state); 17798 for(i=0; i<=n-1; i++) 17800 state->s.ptr.p_double[i] = 1.0; 17802 state->prectype = 0; 17806 /************************************************************************* 17807 Modification of the preconditioner: default preconditioner (simple 17808 scaling, same for all elements of X) is used. 17811 State - structure which stores algorithm state 17813 NOTE: you can change preconditioner "on the fly
", during algorithm 17817 Copyright 13.10.2010 by Bochkanov Sergey 17818 *************************************************************************/ 17819 void minlbfgssetprecdefault(minlbfgsstate* state, ae_state *_state) 17823 state->prectype = 0; 17827 /************************************************************************* 17828 Modification of the preconditioner: Cholesky factorization of approximate 17832 State - structure which stores algorithm state 17833 P - triangular preconditioner, Cholesky factorization of 17834 the approximate Hessian. array[0..N-1,0..N-1], 17835 (if larger, only leading N elements are used). 17836 IsUpper - whether upper or lower triangle of P is given 17837 (other triangle is not referenced) 17839 After call to this function preconditioner is changed to P (P is copied 17840 into the internal buffer). 17842 NOTE: you can change preconditioner "on the fly
", during algorithm 17845 NOTE 2: P should be nonsingular. Exception will be thrown otherwise. 17848 Copyright 13.10.2010 by Bochkanov Sergey 17849 *************************************************************************/ 17850 void minlbfgssetpreccholesky(minlbfgsstate* state, 17851 /* Real */ ae_matrix* p, 17859 ae_assert(isfinitertrmatrix(p, state->n, isupper, _state), "MinLBFGSSetPrecCholesky: P contains infinite
or NAN values!
", _state); 17861 for(i=0; i<=state->n-1; i++) 17863 mx = ae_maxreal(mx, ae_fabs(p->ptr.pp_double[i][i], _state), _state); 17865 ae_assert(ae_fp_greater(mx,0), "MinLBFGSSetPrecCholesky: P is strictly singular!
", _state); 17866 if( state->denseh.rows<state->n||state->denseh.cols<state->n ) 17868 ae_matrix_set_length(&state->denseh, state->n, state->n, _state); 17870 state->prectype = 1; 17873 rmatrixcopy(state->n, state->n, p, 0, 0, &state->denseh, 0, 0, _state); 17877 rmatrixtranspose(state->n, state->n, p, 0, 0, &state->denseh, 0, 0, _state); 17882 /************************************************************************* 17883 Modification of the preconditioner: diagonal of approximate Hessian is 17887 State - structure which stores algorithm state 17888 D - diagonal of the approximate Hessian, array[0..N-1], 17889 (if larger, only leading N elements are used). 17891 NOTE: you can change preconditioner "on the fly
", during algorithm 17894 NOTE 2: D[i] should be positive. Exception will be thrown otherwise. 17896 NOTE 3: you should pass diagonal of approximate Hessian - NOT ITS INVERSE. 17899 Copyright 13.10.2010 by Bochkanov Sergey 17900 *************************************************************************/ 17901 void minlbfgssetprecdiag(minlbfgsstate* state, 17902 /* Real */ ae_vector* d, 17908 ae_assert(d->cnt>=state->n, "MinLBFGSSetPrecDiag: D is too
short", _state); 17909 for(i=0; i<=state->n-1; i++) 17911 ae_assert(ae_isfinite(d->ptr.p_double[i], _state), "MinLBFGSSetPrecDiag: D contains infinite
or NAN elements
", _state); 17912 ae_assert(ae_fp_greater(d->ptr.p_double[i],0), "MinLBFGSSetPrecDiag: D contains non-positive elements
", _state); 17914 rvectorsetlengthatleast(&state->diagh, state->n, _state); 17915 state->prectype = 2; 17916 for(i=0; i<=state->n-1; i++) 17918 state->diagh.ptr.p_double[i] = d->ptr.p_double[i]; 17923 /************************************************************************* 17924 Modification of the preconditioner: scale-based diagonal preconditioning. 17926 This preconditioning mode can be useful when you don't have approximate 17927 diagonal of Hessian, but you know that your variables are badly scaled 17928 (for example, one variable is in [1,10], and another in [1000,100000]), 17929 and most part of the ill-conditioning comes from different scales of vars. 17931 In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2), 17932 can greatly improve convergence. 17934 IMPRTANT: you should set scale of your variables with MinLBFGSSetScale() 17935 call (before or after MinLBFGSSetPrecScale() call). Without knowledge of 17936 the scale of your variables scale-based preconditioner will be just unit 17940 State - structure which stores algorithm state 17943 Copyright 13.10.2010 by Bochkanov Sergey 17944 *************************************************************************/ 17945 void minlbfgssetprecscale(minlbfgsstate* state, ae_state *_state) 17949 state->prectype = 3; 17953 /************************************************************************* 17956 1. This function has two different implementations: one which uses exact 17957 (analytical) user-supplied gradient, and one which uses function value 17958 only and numerically differentiates function in order to obtain 17961 Depending on the specific function used to create optimizer object 17962 (either MinLBFGSCreate() for analytical gradient or MinLBFGSCreateF() 17963 for numerical differentiation) you should choose appropriate variant of 17964 MinLBFGSOptimize() - one which accepts function AND gradient or one 17965 which accepts function ONLY. 17967 Be careful to choose variant of MinLBFGSOptimize() which corresponds to 17968 your optimization scheme! Table below lists different combinations of 17969 callback (function/gradient) passed to MinLBFGSOptimize() and specific 17970 function used to create optimizer. 17973 | USER PASSED TO MinLBFGSOptimize() 17974 CREATED WITH | function only | function and gradient 17975 ------------------------------------------------------------ 17976 MinLBFGSCreateF() | work FAIL 17977 MinLBFGSCreate() | FAIL work 17979 Here "FAIL" denotes inappropriate combinations of optimizer creation 17980 function and MinLBFGSOptimize() version. Attemps to use such 17981 combination (for example, to create optimizer with MinLBFGSCreateF() and 17982 to pass gradient information to MinCGOptimize()) will lead to exception 17983 being thrown. Either you did not pass gradient when it WAS needed or 17984 you passed gradient when it was NOT needed. 17987 Copyright 20.03.2009 by Bochkanov Sergey 17988 *************************************************************************/ 17989 ae_bool minlbfgsiteration(minlbfgsstate* state, ae_state *_state) 18004 * Reverse communication preparations 18005 * I know it looks ugly, but it works the same way 18006 * anywhere from C++ to Python. 18008 * This code initializes locals by: 18009 * * random values determined during code 18010 * generation - on first subroutine call 18011 * * values from previous call - on subsequent calls 18013 if( state->rstate.stage>=0 ) 18015 n = state->rstate.ia.ptr.p_int[0]; 18016 m = state->rstate.ia.ptr.p_int[1]; 18017 i = state->rstate.ia.ptr.p_int[2]; 18018 j = state->rstate.ia.ptr.p_int[3]; 18019 ic = state->rstate.ia.ptr.p_int[4]; 18020 mcinfo = state->rstate.ia.ptr.p_int[5]; 18021 v = state->rstate.ra.ptr.p_double[0]; 18022 vv = state->rstate.ra.ptr.p_double[1]; 18035 if( state->rstate.stage==0 ) 18039 if( state->rstate.stage==1 ) 18043 if( state->rstate.stage==2 ) 18047 if( state->rstate.stage==3 ) 18051 if( state->rstate.stage==4 ) 18055 if( state->rstate.stage==5 ) 18059 if( state->rstate.stage==6 ) 18063 if( state->rstate.stage==7 ) 18067 if( state->rstate.stage==8 ) 18071 if( state->rstate.stage==9 ) 18075 if( state->rstate.stage==10 ) 18079 if( state->rstate.stage==11 ) 18083 if( state->rstate.stage==12 ) 18087 if( state->rstate.stage==13 ) 18091 if( state->rstate.stage==14 ) 18095 if( state->rstate.stage==15 ) 18099 if( state->rstate.stage==16 ) 18109 * Unload frequently used variables from State structure 18110 * (just for typing convenience) 18114 state->repterminationtype = 0; 18115 state->repiterationscount = 0; 18116 state->repvaridx = -1; 18117 state->repnfev = 0; 18120 * Check, that transferred derivative value is right 18122 minlbfgs_clearrequestfields(state, _state); 18123 if( !(ae_fp_eq(state->diffstep,0)&&ae_fp_greater(state->teststep,0)) ) 18127 state->needfg = ae_true; 18134 v = state->x.ptr.p_double[i]; 18135 state->x.ptr.p_double[i] = v-state->teststep*state->s.ptr.p_double[i]; 18136 state->rstate.stage = 0; 18139 state->fm1 = state->f; 18140 state->fp1 = state->g.ptr.p_double[i]; 18141 state->x.ptr.p_double[i] = v+state->teststep*state->s.ptr.p_double[i]; 18142 state->rstate.stage = 1; 18145 state->fm2 = state->f; 18146 state->fp2 = state->g.ptr.p_double[i]; 18147 state->x.ptr.p_double[i] = v; 18148 state->rstate.stage = 2; 18153 * 2*State.TestStep - scale parameter 18154 * width of segment [Xi-TestStep;Xi+TestStep] 18156 if( !derivativecheck(state->fm1, state->fp1, state->fm2, state->fp2, state->f, state->g.ptr.p_double[i], 2*state->teststep, _state) ) 18158 state->repvaridx = i; 18159 state->repterminationtype = -7; 18166 state->needfg = ae_false; 18170 * Calculate F/G at the initial point 18172 minlbfgs_clearrequestfields(state, _state); 18173 if( ae_fp_neq(state->diffstep,0) ) 18177 state->needfg = ae_true; 18178 state->rstate.stage = 3; 18181 state->needfg = ae_false; 18184 state->needf = ae_true; 18185 state->rstate.stage = 4; 18188 state->fbase = state->f; 18195 v = state->x.ptr.p_double[i]; 18196 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 18197 state->rstate.stage = 5; 18200 state->fm2 = state->f; 18201 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 18202 state->rstate.stage = 6; 18205 state->fm1 = state->f; 18206 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 18207 state->rstate.stage = 7; 18210 state->fp1 = state->f; 18211 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 18212 state->rstate.stage = 8; 18215 state->fp2 = state->f; 18216 state->x.ptr.p_double[i] = v; 18217 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 18221 state->f = state->fbase; 18222 state->needf = ae_false; 18224 trimprepare(state->f, &state->trimthreshold, _state); 18229 minlbfgs_clearrequestfields(state, _state); 18230 state->xupdated = ae_true; 18231 state->rstate.stage = 9; 18234 state->xupdated = ae_false; 18236 state->repnfev = 1; 18237 state->fold = state->f; 18239 for(i=0; i<=n-1; i++) 18241 v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 18243 if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) 18245 state->repterminationtype = 4; 18251 * Choose initial step and direction. 18252 * Apply preconditioner, if we have something other than default. 18254 ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18255 if( state->prectype==0 ) 18259 * Default preconditioner is used, but we can't use it before iterations will start 18261 v = ae_v_dotproduct(&state->g.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18262 v = ae_sqrt(v, _state); 18263 if( ae_fp_eq(state->stpmax,0) ) 18265 state->stp = ae_minreal(1.0/v, 1, _state); 18269 state->stp = ae_minreal(1.0/v, state->stpmax, _state); 18272 if( state->prectype==1 ) 18276 * Cholesky preconditioner is used 18278 fblscholeskysolve(&state->denseh, 1.0, n, ae_true, &state->d, &state->autobuf, _state); 18281 if( state->prectype==2 ) 18285 * diagonal approximation is used 18287 for(i=0; i<=n-1; i++) 18289 state->d.ptr.p_double[i] = state->d.ptr.p_double[i]/state->diagh.ptr.p_double[i]; 18293 if( state->prectype==3 ) 18297 * scale-based preconditioner is used 18299 for(i=0; i<=n-1; i++) 18301 state->d.ptr.p_double[i] = state->d.ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]; 18317 * Main cycle: prepare to 1-D line search 18319 state->p = state->k%m; 18320 state->q = ae_minint(state->k, m-1, _state); 18325 ae_v_moveneg(&state->sk.ptr.pp_double[state->p][0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18326 ae_v_moveneg(&state->yk.ptr.pp_double[state->p][0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18329 * Minimize F(x+alpha*d) 18330 * Calculate S[k], Y[k] 18332 state->mcstage = 0; 18337 linminnormalized(&state->d, &state->stp, n, _state); 18338 mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->stpmax, minlbfgs_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 18340 if( state->mcstage==0 ) 18344 minlbfgs_clearrequestfields(state, _state); 18345 if( ae_fp_neq(state->diffstep,0) ) 18349 state->needfg = ae_true; 18350 state->rstate.stage = 10; 18353 state->needfg = ae_false; 18356 state->needf = ae_true; 18357 state->rstate.stage = 11; 18360 state->fbase = state->f; 18367 v = state->x.ptr.p_double[i]; 18368 state->x.ptr.p_double[i] = v-state->diffstep*state->s.ptr.p_double[i]; 18369 state->rstate.stage = 12; 18372 state->fm2 = state->f; 18373 state->x.ptr.p_double[i] = v-0.5*state->diffstep*state->s.ptr.p_double[i]; 18374 state->rstate.stage = 13; 18377 state->fm1 = state->f; 18378 state->x.ptr.p_double[i] = v+0.5*state->diffstep*state->s.ptr.p_double[i]; 18379 state->rstate.stage = 14; 18382 state->fp1 = state->f; 18383 state->x.ptr.p_double[i] = v+state->diffstep*state->s.ptr.p_double[i]; 18384 state->rstate.stage = 15; 18387 state->fp2 = state->f; 18388 state->x.ptr.p_double[i] = v; 18389 state->g.ptr.p_double[i] = (8*(state->fp1-state->fm1)-(state->fp2-state->fm2))/(6*state->diffstep*state->s.ptr.p_double[i]); 18393 state->f = state->fbase; 18394 state->needf = ae_false; 18396 trimfunction(&state->f, &state->g, n, state->trimthreshold, _state); 18397 mcsrch(n, &state->x, &state->f, &state->g, &state->d, &state->stp, state->stpmax, minlbfgs_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 18408 minlbfgs_clearrequestfields(state, _state); 18409 state->xupdated = ae_true; 18410 state->rstate.stage = 16; 18413 state->xupdated = ae_false; 18415 state->repnfev = state->repnfev+state->nfev; 18416 state->repiterationscount = state->repiterationscount+1; 18417 ae_v_add(&state->sk.ptr.pp_double[state->p][0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18418 ae_v_add(&state->yk.ptr.pp_double[state->p][0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18421 * Stopping conditions 18423 if( state->repiterationscount>=state->maxits&&state->maxits>0 ) 18427 * Too many iterations 18429 state->repterminationtype = 5; 18434 for(i=0; i<=n-1; i++) 18436 v = v+ae_sqr(state->g.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 18438 if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsg) ) 18442 * Gradient is small enough 18444 state->repterminationtype = 4; 18448 if( ae_fp_less_eq(state->fold-state->f,state->epsf*ae_maxreal(ae_fabs(state->fold, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) 18452 * F(k+1)-F(k) is small enough 18454 state->repterminationtype = 1; 18459 for(i=0; i<=n-1; i++) 18461 v = v+ae_sqr(state->sk.ptr.pp_double[state->p][i]/state->s.ptr.p_double[i], _state); 18463 if( ae_fp_less_eq(ae_sqrt(v, _state),state->epsx) ) 18467 * X(k+1)-X(k) is small enough 18469 state->repterminationtype = 2; 18475 * If Wolfe conditions are satisfied, we can update 18476 * limited memory model. 18478 * However, if conditions are not satisfied (NFEV limit is met, 18479 * function is too wild, ...), we'll skip L-BFGS update 18487 * In such cases we'll initialize search direction by 18488 * antigradient vector, because it leads to more 18489 * transparent code with less number of special cases 18491 state->fold = state->f; 18492 ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18498 * Calculate Rho[k], GammaK 18500 v = ae_v_dotproduct(&state->yk.ptr.pp_double[state->p][0], 1, &state->sk.ptr.pp_double[state->p][0], 1, ae_v_len(0,n-1)); 18501 vv = ae_v_dotproduct(&state->yk.ptr.pp_double[state->p][0], 1, &state->yk.ptr.pp_double[state->p][0], 1, ae_v_len(0,n-1)); 18502 if( ae_fp_eq(v,0)||ae_fp_eq(vv,0) ) 18506 * Rounding errors make further iterations impossible. 18508 state->repterminationtype = -2; 18512 state->rho.ptr.p_double[state->p] = 1/v; 18513 state->gammak = v/vv; 18516 * Calculate d(k+1) = -H(k+1)*g(k+1) 18518 * for I:=K downto K-Q do 18519 * V = s(i)^T * work(iteration:I) 18521 * work(iteration:I+1) = work(iteration:I) - V*Rho(i)*y(i) 18522 * work(last iteration) = H0*work(last iteration) - preconditioner 18523 * for I:=K-Q to K do 18524 * V = y(i)^T*work(iteration:I) 18525 * work(iteration:I+1) = work(iteration:I) +(-V+theta(i))*Rho(i)*s(i) 18527 * NOW WORK CONTAINS d(k+1) 18529 ae_v_move(&state->work.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18530 for(i=state->k; i>=state->k-state->q; i--) 18533 v = ae_v_dotproduct(&state->sk.ptr.pp_double[ic][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18534 state->theta.ptr.p_double[ic] = v; 18535 vv = v*state->rho.ptr.p_double[ic]; 18536 ae_v_subd(&state->work.ptr.p_double[0], 1, &state->yk.ptr.pp_double[ic][0], 1, ae_v_len(0,n-1), vv); 18538 if( state->prectype==0 ) 18542 * Simple preconditioner is used 18545 ae_v_muld(&state->work.ptr.p_double[0], 1, ae_v_len(0,n-1), v); 18547 if( state->prectype==1 ) 18551 * Cholesky preconditioner is used 18553 fblscholeskysolve(&state->denseh, 1, n, ae_true, &state->work, &state->autobuf, _state); 18555 if( state->prectype==2 ) 18559 * diagonal approximation is used 18561 for(i=0; i<=n-1; i++) 18563 state->work.ptr.p_double[i] = state->work.ptr.p_double[i]/state->diagh.ptr.p_double[i]; 18566 if( state->prectype==3 ) 18570 * scale-based preconditioner is used 18572 for(i=0; i<=n-1; i++) 18574 state->work.ptr.p_double[i] = state->work.ptr.p_double[i]*state->s.ptr.p_double[i]*state->s.ptr.p_double[i]; 18577 for(i=state->k-state->q; i<=state->k; i++) 18580 v = ae_v_dotproduct(&state->yk.ptr.pp_double[ic][0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18581 vv = state->rho.ptr.p_double[ic]*(-v+state->theta.ptr.p_double[ic]); 18582 ae_v_addd(&state->work.ptr.p_double[0], 1, &state->sk.ptr.pp_double[ic][0], 1, ae_v_len(0,n-1), vv); 18584 ae_v_moveneg(&state->d.ptr.p_double[0], 1, &state->work.ptr.p_double[0], 1, ae_v_len(0,n-1)); 18589 state->fold = state->f; 18590 state->k = state->k+1; 18602 state->rstate.ia.ptr.p_int[0] = n; 18603 state->rstate.ia.ptr.p_int[1] = m; 18604 state->rstate.ia.ptr.p_int[2] = i; 18605 state->rstate.ia.ptr.p_int[3] = j; 18606 state->rstate.ia.ptr.p_int[4] = ic; 18607 state->rstate.ia.ptr.p_int[5] = mcinfo; 18608 state->rstate.ra.ptr.p_double[0] = v; 18609 state->rstate.ra.ptr.p_double[1] = vv; 18614 /************************************************************************* 18615 L-BFGS algorithm results 18618 State - algorithm state 18621 X - array[0..N-1], solution 18622 Rep - optimization report: 18623 * Rep.TerminationType completion code: 18624 * -7 gradient verification failed. 18625 See MinLBFGSSetGradientCheck() for more information. 18626 * -2 rounding errors prevent further improvement. 18627 X contains best point found. 18628 * -1 incorrect parameters were specified 18629 * 1 relative function improvement is no more than 18631 * 2 relative step is no more than EpsX. 18632 * 4 gradient norm is no more than EpsG 18633 * 5 MaxIts steps was taken 18634 * 7 stopping conditions are too stringent, 18635 further improvement is impossible 18636 * Rep.IterationsCount contains iterations count 18637 * NFEV contains number of function calculations 18640 Copyright 02.04.2010 by Bochkanov Sergey 18641 *************************************************************************/ 18642 void minlbfgsresults(minlbfgsstate* state, 18643 /* Real */ ae_vector* x, 18644 minlbfgsreport* rep, 18648 ae_vector_clear(x); 18649 _minlbfgsreport_clear(rep); 18651 minlbfgsresultsbuf(state, x, rep, _state); 18655 /************************************************************************* 18656 L-BFGS algorithm results 18658 Buffered implementation of MinLBFGSResults which uses pre-allocated buffer 18659 to store X[]. If buffer size is too small, it resizes buffer. It is 18660 intended to be used in the inner cycles of performance critical algorithms 18661 where array reallocation penalty is too large to be ignored. 18664 Copyright 20.08.2010 by Bochkanov Sergey 18665 *************************************************************************/ 18666 void minlbfgsresultsbuf(minlbfgsstate* state, 18667 /* Real */ ae_vector* x, 18668 minlbfgsreport* rep, 18673 if( x->cnt<state->n ) 18675 ae_vector_set_length(x, state->n, _state); 18677 ae_v_move(&x->ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 18678 rep->iterationscount = state->repiterationscount; 18679 rep->nfev = state->repnfev; 18680 rep->varidx = state->repvaridx; 18681 rep->terminationtype = state->repterminationtype; 18685 /************************************************************************* 18686 This subroutine restarts LBFGS algorithm from new point. All optimization 18687 parameters are left unchanged. 18689 This function allows to solve multiple optimization problems (which 18690 must have same number of dimensions) without object reallocation penalty. 18693 State - structure used to store algorithm state 18694 X - new starting point. 18697 Copyright 30.07.2010 by Bochkanov Sergey 18698 *************************************************************************/ 18699 void minlbfgsrestartfrom(minlbfgsstate* state, 18700 /* Real */ ae_vector* x, 18705 ae_assert(x->cnt>=state->n, "MinLBFGSRestartFrom: Length(X)<N!
", _state); 18706 ae_assert(isfinitevector(x, state->n, _state), "MinLBFGSRestartFrom: X contains infinite
or NaN values!
", _state); 18707 ae_v_move(&state->x.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 18708 ae_vector_set_length(&state->rstate.ia, 5+1, _state); 18709 ae_vector_set_length(&state->rstate.ra, 1+1, _state); 18710 state->rstate.stage = -1; 18711 minlbfgs_clearrequestfields(state, _state); 18715 /************************************************************************* 18716 This subroutine turns on verification of the user-supplied analytic 18718 * user calls this subroutine before optimization begins 18719 * MinLBFGSOptimize() is called 18720 * prior to actual optimization, for each component of parameters being 18721 optimized X[i] algorithm performs following steps: 18722 * two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i], 18723 where X[i] is i-th component of the initial point and S[i] is a scale 18725 * if needed, steps are bounded with respect to constraints on X[] 18726 * F(X) is evaluated at these trial points 18727 * we perform one more evaluation in the middle point of the interval 18728 * we build cubic model using function values and derivatives at trial 18729 points and we compare its prediction with actual value in the middle 18731 * in case difference between prediction and actual value is higher than 18732 some predetermined threshold, algorithm stops with completion code -7; 18733 Rep.VarIdx is set to index of the parameter with incorrect derivative. 18734 * after verification is over, algorithm proceeds to the actual optimization. 18736 NOTE 1: verification needs N (parameters count) gradient evaluations. It 18737 is very costly and you should use it only for low dimensional 18738 problems, when you want to be sure that you've correctly 18739 calculated analytic derivatives. You should not use it in the 18740 production code (unless you want to check derivatives provided by 18743 NOTE 2: you should carefully choose TestStep. Value which is too large 18744 (so large that function behaviour is significantly non-cubic) will 18745 lead to false alarms. You may use different step for different 18746 parameters by means of setting scale with MinLBFGSSetScale(). 18748 NOTE 3: this function may lead to false positives. In case it reports that 18749 I-th derivative was calculated incorrectly, you may decrease test 18750 step and try one more time - maybe your function changes too 18751 sharply and your step is too large for such rapidly chanding 18755 State - structure used to store algorithm state 18756 TestStep - verification step: 18757 * TestStep=0 turns verification off 18758 * TestStep>0 activates verification 18761 Copyright 24.05.2012 by Bochkanov Sergey 18762 *************************************************************************/ 18763 void minlbfgssetgradientcheck(minlbfgsstate* state, 18769 ae_assert(ae_isfinite(teststep, _state), "MinLBFGSSetGradientCheck: TestStep contains NaN
or Infinite
", _state); 18770 ae_assert(ae_fp_greater_eq(teststep,0), "MinLBFGSSetGradientCheck: invalid argument TestStep(TestStep<0)
", _state); 18771 state->teststep = teststep; 18775 /************************************************************************* 18776 Clears request fileds (to be sure that we don't forgot to clear something) 18777 *************************************************************************/ 18778 static void minlbfgs_clearrequestfields(minlbfgsstate* state, 18783 state->needf = ae_false; 18784 state->needfg = ae_false; 18785 state->xupdated = ae_false; 18789 ae_bool _minlbfgsstate_init(void* _p, ae_state *_state, ae_bool make_automatic) 18791 minlbfgsstate *p = (minlbfgsstate*)_p; 18792 ae_touch_ptr((void*)p); 18793 if( !ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic) ) 18795 if( !ae_vector_init(&p->rho, 0, DT_REAL, _state, make_automatic) ) 18797 if( !ae_matrix_init(&p->yk, 0, 0, DT_REAL, _state, make_automatic) ) 18799 if( !ae_matrix_init(&p->sk, 0, 0, DT_REAL, _state, make_automatic) ) 18801 if( !ae_vector_init(&p->theta, 0, DT_REAL, _state, make_automatic) ) 18803 if( !ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic) ) 18805 if( !ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic) ) 18807 if( !ae_matrix_init(&p->denseh, 0, 0, DT_REAL, _state, make_automatic) ) 18809 if( !ae_vector_init(&p->diagh, 0, DT_REAL, _state, make_automatic) ) 18811 if( !ae_vector_init(&p->autobuf, 0, DT_REAL, _state, make_automatic) ) 18813 if( !ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic) ) 18815 if( !ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic) ) 18817 if( !_rcommstate_init(&p->rstate, _state, make_automatic) ) 18819 if( !_linminstate_init(&p->lstate, _state, make_automatic) ) 18825 ae_bool _minlbfgsstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 18827 minlbfgsstate *dst = (minlbfgsstate*)_dst; 18828 minlbfgsstate *src = (minlbfgsstate*)_src; 18831 dst->epsg = src->epsg; 18832 dst->epsf = src->epsf; 18833 dst->epsx = src->epsx; 18834 dst->maxits = src->maxits; 18835 dst->xrep = src->xrep; 18836 dst->stpmax = src->stpmax; 18837 if( !ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic) ) 18839 dst->diffstep = src->diffstep; 18840 dst->nfev = src->nfev; 18841 dst->mcstage = src->mcstage; 18845 if( !ae_vector_init_copy(&dst->rho, &src->rho, _state, make_automatic) ) 18847 if( !ae_matrix_init_copy(&dst->yk, &src->yk, _state, make_automatic) ) 18849 if( !ae_matrix_init_copy(&dst->sk, &src->sk, _state, make_automatic) ) 18851 if( !ae_vector_init_copy(&dst->theta, &src->theta, _state, make_automatic) ) 18853 if( !ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic) ) 18855 dst->stp = src->stp; 18856 if( !ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic) ) 18858 dst->fold = src->fold; 18859 dst->trimthreshold = src->trimthreshold; 18860 dst->prectype = src->prectype; 18861 dst->gammak = src->gammak; 18862 if( !ae_matrix_init_copy(&dst->denseh, &src->denseh, _state, make_automatic) ) 18864 if( !ae_vector_init_copy(&dst->diagh, &src->diagh, _state, make_automatic) ) 18866 dst->fbase = src->fbase; 18867 dst->fm2 = src->fm2; 18868 dst->fm1 = src->fm1; 18869 dst->fp1 = src->fp1; 18870 dst->fp2 = src->fp2; 18871 if( !ae_vector_init_copy(&dst->autobuf, &src->autobuf, _state, make_automatic) ) 18873 if( !ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic) ) 18876 if( !ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic) ) 18878 dst->needf = src->needf; 18879 dst->needfg = src->needfg; 18880 dst->xupdated = src->xupdated; 18881 dst->teststep = src->teststep; 18882 if( !_rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic) ) 18884 dst->repiterationscount = src->repiterationscount; 18885 dst->repnfev = src->repnfev; 18886 dst->repvaridx = src->repvaridx; 18887 dst->repterminationtype = src->repterminationtype; 18888 if( !_linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic) ) 18894 void _minlbfgsstate_clear(void* _p) 18896 minlbfgsstate *p = (minlbfgsstate*)_p; 18897 ae_touch_ptr((void*)p); 18898 ae_vector_clear(&p->s); 18899 ae_vector_clear(&p->rho); 18900 ae_matrix_clear(&p->yk); 18901 ae_matrix_clear(&p->sk); 18902 ae_vector_clear(&p->theta); 18903 ae_vector_clear(&p->d); 18904 ae_vector_clear(&p->work); 18905 ae_matrix_clear(&p->denseh); 18906 ae_vector_clear(&p->diagh); 18907 ae_vector_clear(&p->autobuf); 18908 ae_vector_clear(&p->x); 18909 ae_vector_clear(&p->g); 18910 _rcommstate_clear(&p->rstate); 18911 _linminstate_clear(&p->lstate); 18915 void _minlbfgsstate_destroy(void* _p) 18917 minlbfgsstate *p = (minlbfgsstate*)_p; 18918 ae_touch_ptr((void*)p); 18919 ae_vector_destroy(&p->s); 18920 ae_vector_destroy(&p->rho); 18921 ae_matrix_destroy(&p->yk); 18922 ae_matrix_destroy(&p->sk); 18923 ae_vector_destroy(&p->theta); 18924 ae_vector_destroy(&p->d); 18925 ae_vector_destroy(&p->work); 18926 ae_matrix_destroy(&p->denseh); 18927 ae_vector_destroy(&p->diagh); 18928 ae_vector_destroy(&p->autobuf); 18929 ae_vector_destroy(&p->x); 18930 ae_vector_destroy(&p->g); 18931 _rcommstate_destroy(&p->rstate); 18932 _linminstate_destroy(&p->lstate); 18936 ae_bool _minlbfgsreport_init(void* _p, ae_state *_state, ae_bool make_automatic) 18938 minlbfgsreport *p = (minlbfgsreport*)_p; 18939 ae_touch_ptr((void*)p); 18944 ae_bool _minlbfgsreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 18946 minlbfgsreport *dst = (minlbfgsreport*)_dst; 18947 minlbfgsreport *src = (minlbfgsreport*)_src; 18948 dst->iterationscount = src->iterationscount; 18949 dst->nfev = src->nfev; 18950 dst->varidx = src->varidx; 18951 dst->terminationtype = src->terminationtype; 18956 void _minlbfgsreport_clear(void* _p) 18958 minlbfgsreport *p = (minlbfgsreport*)_p; 18959 ae_touch_ptr((void*)p); 18963 void _minlbfgsreport_destroy(void* _p) 18965 minlbfgsreport *p = (minlbfgsreport*)_p; 18966 ae_touch_ptr((void*)p); 18972 /************************************************************************* 18973 CONSTRAINED QUADRATIC PROGRAMMING 18975 The subroutine creates QP optimizer. After initial creation, it contains 18976 default optimization problem with zero quadratic and linear terms and no 18977 constraints. You should set quadratic/linear terms with calls to functions 18978 provided by MinQP subpackage. 18984 State - optimizer with zero quadratic/linear terms 18988 Copyright 11.01.2011 by Bochkanov Sergey 18989 *************************************************************************/ 18990 void minqpcreate(ae_int_t n, minqpstate* state, ae_state *_state) 18994 _minqpstate_clear(state); 18996 ae_assert(n>=1, "MinQPCreate: N<1
", _state); 18999 * initialize QP solver 19004 state->repterminationtype = 0; 19007 cqminit(n, &state->a, _state); 19008 sasinit(n, &state->sas, _state); 19009 ae_vector_set_length(&state->b, n, _state); 19010 ae_vector_set_length(&state->bndl, n, _state); 19011 ae_vector_set_length(&state->bndu, n, _state); 19012 ae_vector_set_length(&state->workbndl, n, _state); 19013 ae_vector_set_length(&state->workbndu, n, _state); 19014 ae_vector_set_length(&state->havebndl, n, _state); 19015 ae_vector_set_length(&state->havebndu, n, _state); 19016 ae_vector_set_length(&state->s, n, _state); 19017 ae_vector_set_length(&state->startx, n, _state); 19018 ae_vector_set_length(&state->xorigin, n, _state); 19019 ae_vector_set_length(&state->xs, n, _state); 19020 ae_vector_set_length(&state->xn, n, _state); 19021 ae_vector_set_length(&state->gc, n, _state); 19022 ae_vector_set_length(&state->pg, n, _state); 19023 for(i=0; i<=n-1; i++) 19025 state->bndl.ptr.p_double[i] = _state->v_neginf; 19026 state->bndu.ptr.p_double[i] = _state->v_posinf; 19027 state->havebndl.ptr.p_bool[i] = ae_false; 19028 state->havebndu.ptr.p_bool[i] = ae_false; 19029 state->b.ptr.p_double[i] = 0.0; 19030 state->startx.ptr.p_double[i] = 0.0; 19031 state->xorigin.ptr.p_double[i] = 0.0; 19032 state->s.ptr.p_double[i] = 1.0; 19034 state->havex = ae_false; 19035 minqpsetalgocholesky(state, _state); 19036 normestimatorcreate(n, n, 5, 5, &state->estimator, _state); 19037 minbleiccreate(n, &state->startx, &state->solver, _state); 19041 /************************************************************************* 19042 This function sets linear term for QP solver. 19044 By default, linear term is zero. 19047 State - structure which stores algorithm state 19048 B - linear term, array[N]. 19051 Copyright 11.01.2011 by Bochkanov Sergey 19052 *************************************************************************/ 19053 void minqpsetlinearterm(minqpstate* state, 19054 /* Real */ ae_vector* b, 19061 ae_assert(b->cnt>=n, "MinQPSetLinearTerm: Length(B)<N
", _state); 19062 ae_assert(isfinitevector(b, n, _state), "MinQPSetLinearTerm: B contains infinite
or NaN elements
", _state); 19063 minqpsetlineartermfast(state, b, _state); 19067 /************************************************************************* 19068 This function sets dense quadratic term for QP solver. By default, 19069 quadratic term is zero. 19071 SUPPORT BY ALGLIB QP ALGORITHMS: 19073 Dense quadratic term can be handled by any of the QP algorithms supported 19074 by ALGLIB QP Solver. 19078 This solver minimizes following function: 19079 f(x) = 0.5*x'*A*x + b'*x. 19080 Note that quadratic term has 0.5 before it. So if you want to minimize 19082 you should rewrite your problem as follows: 19083 f(x) = 0.5*(2*x^2) + x 19084 and your matrix A will be equal to [[2.0]], not to [[1.0]] 19087 State - structure which stores algorithm state 19088 A - matrix, array[N,N] 19089 IsUpper - (optional) storage type: 19090 * if True, symmetric matrix A is given by its upper 19091 triangle, and the lower triangle isn’t used 19092 * if False, symmetric matrix A is given by its lower 19093 triangle, and the upper triangle isn’t used 19094 * if not given, both lower and upper triangles must be 19098 Copyright 11.01.2011 by Bochkanov Sergey 19099 *************************************************************************/ 19100 void minqpsetquadraticterm(minqpstate* state, 19101 /* Real */ ae_matrix* a, 19109 ae_assert(a->rows>=n, "MinQPSetQuadraticTerm: Rows(A)<N
", _state); 19110 ae_assert(a->cols>=n, "MinQPSetQuadraticTerm: Cols(A)<N
", _state); 19111 ae_assert(isfinitertrmatrix(a, n, isupper, _state), "MinQPSetQuadraticTerm: A contains infinite
or NaN elements
", _state); 19112 minqpsetquadratictermfast(state, a, isupper, 0.0, _state); 19116 /************************************************************************* 19117 This function sets sparse quadratic term for QP solver. By default, 19118 quadratic term is zero. 19120 SUPPORT BY ALGLIB QP ALGORITHMS: 19122 Sparse quadratic term is supported only by BLEIC-based QP algorithm (one 19123 which is activated by MinQPSetAlgoBLEIC function). Cholesky-based QP algo 19124 won't be able to deal with sparse quadratic term and will terminate 19127 IF YOU CALLED THIS FUNCTION, YOU MUST SWITCH TO BLEIC-BASED QP ALGORITHM 19128 BEFORE CALLING MINQPOPTIMIZE() FUNCTION. 19132 This solver minimizes following function: 19133 f(x) = 0.5*x'*A*x + b'*x. 19134 Note that quadratic term has 0.5 before it. So if you want to minimize 19136 you should rewrite your problem as follows: 19137 f(x) = 0.5*(2*x^2) + x 19138 and your matrix A will be equal to [[2.0]], not to [[1.0]] 19141 State - structure which stores algorithm state 19142 A - matrix, array[N,N] 19143 IsUpper - (optional) storage type: 19144 * if True, symmetric matrix A is given by its upper 19145 triangle, and the lower triangle isn’t used 19146 * if False, symmetric matrix A is given by its lower 19147 triangle, and the upper triangle isn’t used 19148 * if not given, both lower and upper triangles must be 19152 Copyright 11.01.2011 by Bochkanov Sergey 19153 *************************************************************************/ 19154 void minqpsetquadratictermsparse(minqpstate* state, 19163 ae_assert(sparsegetnrows(a, _state)>=n, "MinQPSetQuadraticTermSparse: Rows(A)<N
", _state); 19164 ae_assert(sparsegetncols(a, _state)>=n, "MinQPSetQuadraticTermSparse: Cols(A)<N
", _state); 19165 sparsecopytocrs(a, &state->sparsea, _state); 19166 state->sparseaupper = isupper; 19171 /************************************************************************* 19172 This function sets starting point for QP solver. It is useful to have 19173 good initial approximation to the solution, because it will increase 19174 speed of convergence and identification of active constraints. 19177 State - structure which stores algorithm state 19178 X - starting point, array[N]. 19181 Copyright 11.01.2011 by Bochkanov Sergey 19182 *************************************************************************/ 19183 void minqpsetstartingpoint(minqpstate* state, 19184 /* Real */ ae_vector* x, 19191 ae_assert(x->cnt>=n, "MinQPSetStartingPoint: Length(B)<N
", _state); 19192 ae_assert(isfinitevector(x, n, _state), "MinQPSetStartingPoint: X contains infinite
or NaN elements
", _state); 19193 minqpsetstartingpointfast(state, x, _state); 19197 /************************************************************************* 19198 This function sets origin for QP solver. By default, following QP program 19201 min(0.5*x'*A*x+b'*x) 19203 This function allows to solve different problem: 19205 min(0.5*(x-x_origin)'*A*(x-x_origin)+b'*(x-x_origin)) 19208 State - structure which stores algorithm state 19209 XOrigin - origin, array[N]. 19212 Copyright 11.01.2011 by Bochkanov Sergey 19213 *************************************************************************/ 19214 void minqpsetorigin(minqpstate* state, 19215 /* Real */ ae_vector* xorigin, 19222 ae_assert(xorigin->cnt>=n, "MinQPSetOrigin: Length(B)<N
", _state); 19223 ae_assert(isfinitevector(xorigin, n, _state), "MinQPSetOrigin: B contains infinite
or NaN elements
", _state); 19224 minqpsetoriginfast(state, xorigin, _state); 19228 /************************************************************************* 19229 This function sets scaling coefficients. 19231 ALGLIB optimizers use scaling matrices to test stopping conditions (step 19232 size and gradient are scaled before comparison with tolerances). Scale of 19233 the I-th variable is a translation invariant measure of: 19234 a) "how large
" the variable is 19235 b) how large the step should be to make significant changes in the function 19237 BLEIC-based QP solver uses scale for two purposes: 19238 * to evaluate stopping conditions 19239 * for preconditioning of the underlying BLEIC solver 19242 State - structure stores algorithm state 19243 S - array[N], non-zero scaling coefficients 19244 S[i] may be negative, sign doesn't matter. 19247 Copyright 14.01.2011 by Bochkanov Sergey 19248 *************************************************************************/ 19249 void minqpsetscale(minqpstate* state, 19250 /* Real */ ae_vector* s, 19256 ae_assert(s->cnt>=state->n, "MinQPSetScale: Length(S)<N
", _state); 19257 for(i=0; i<=state->n-1; i++) 19259 ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinQPSetScale: S contains infinite
or NAN elements
", _state); 19260 ae_assert(ae_fp_neq(s->ptr.p_double[i],0), "MinQPSetScale: S contains
zero elements
", _state); 19261 state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); 19266 /************************************************************************* 19267 This function tells solver to use Cholesky-based algorithm. This algorithm 19268 is active by default. 19272 Cholesky-based algorithm can be used only for problems which: 19273 * have dense quadratic term, set by MinQPSetQuadraticTerm(), sparse or 19274 structured problems are not supported. 19275 * are strictly convex, i.e. quadratic term is symmetric positive definite, 19276 indefinite or semidefinite problems are not supported by this algorithm. 19278 If anything of what listed above is violated, you may use BLEIC-based QP 19279 algorithm which can be activated by MinQPSetAlgoBLEIC(). 19281 BENEFITS AND DRAWBACKS: 19283 This algorithm gives best precision amongst all QP solvers provided by 19284 ALGLIB (Newton iterations have much higher precision than any other 19285 optimization algorithm). This solver also gracefully handles problems with 19286 very large amount of constraints. 19288 Performance of the algorithm is good because internally it uses Level 3 19289 Dense BLAS for its performance-critical parts. 19292 From the other side, algorithm has O(N^3) complexity for unconstrained 19293 problems and up to orders of magnitude slower on constrained problems 19294 (these additional iterations are needed to identify active constraints). 19295 So, its running time depends on number of constraints active at solution. 19297 Furthermore, this algorithm can not solve problems with sparse matrices or 19298 problems with semidefinite/indefinite matrices of any kind (dense/sparse). 19301 State - structure which stores algorithm state 19304 Copyright 11.01.2011 by Bochkanov Sergey 19305 *************************************************************************/ 19306 void minqpsetalgocholesky(minqpstate* state, ae_state *_state) 19310 state->algokind = 1; 19314 /************************************************************************* 19315 This function tells solver to use BLEIC-based algorithm and sets stopping 19316 criteria for the algorithm. 19320 BLEIC-based QP algorithm can be used for any kind of QP problems: 19321 * problems with both dense and sparse quadratic terms 19322 * problems with positive definite, semidefinite, indefinite terms 19324 BLEIC-based algorithm can solve even indefinite problems - as long as they 19325 are bounded from below on the feasible set. Of course, global minimum is 19326 found only for positive definite and semidefinite problems. As for 19327 indefinite ones - only local minimum is found. 19329 BENEFITS AND DRAWBACKS: 19331 This algorithm can be used to solve both convex and indefinite QP problems 19332 and it can utilize sparsity of the quadratic term (algorithm calculates 19333 matrix-vector products, which can be performed efficiently in case of 19336 Algorithm has iteration cost, which (assuming fixed amount of non-boundary 19337 linear constraints) linearly depends on problem size. Boundary constraints 19338 does not significantly change iteration cost. 19340 Thus, it outperforms Cholesky-based QP algorithm (CQP) on high-dimensional 19341 sparse problems with moderate amount of constraints. 19344 From the other side, unlike CQP solver, this algorithm does NOT make use 19345 of Level 3 Dense BLAS. Thus, its performance on dense problems is inferior 19346 to that of CQP solver. 19348 Its precision is also inferior to that of CQP. CQP performs Newton steps 19349 which are know to achieve very good precision. In many cases Newton step 19350 leads us exactly to the solution. BLEIC-QP performs LBFGS steps, which are 19351 good at detecting neighborhood of the solution, buy need many iterations 19352 to find solution with 6 digits of precision. 19355 State - structure which stores algorithm state 19357 The subroutine finishes its work if the condition 19358 |v|<EpsG is satisfied, where: 19359 * |.| means Euclidian norm 19360 * v - scaled constrained gradient vector, v[i]=g[i]*s[i] 19362 * s - scaling coefficients set by MinQPSetScale() 19364 The subroutine finishes its work if exploratory steepest 19365 descent step on k+1-th iteration satisfies following 19366 condition: |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} 19368 The subroutine finishes its work if exploratory steepest 19369 descent step on k+1-th iteration satisfies following 19371 * |.| means Euclidian norm 19372 * v - scaled step vector, v[i]=dx[i]/s[i] 19373 * dx - step vector, dx=X(k+1)-X(k) 19374 * s - scaling coefficients set by MinQPSetScale() 19375 MaxIts - maximum number of iterations. If MaxIts=0, the number of 19376 iterations is unlimited. 19378 Passing EpsG=0, EpsF=0 and EpsX=0 and MaxIts=0 (simultaneously) will lead 19379 to automatic stopping criterion selection (presently it is small step 19380 length, but it may change in the future versions of ALGLIB). 19382 IT IS VERY IMPORTANT THAT YOU CALL MinQPSetScale() WHEN YOU USE THIS ALGO! 19385 Copyright 11.01.2011 by Bochkanov Sergey 19386 *************************************************************************/ 19387 void minqpsetalgobleic(minqpstate* state, 19396 ae_assert(ae_isfinite(epsg, _state), "MinQPSetAlgoBLEIC: EpsG is not finite number
", _state); 19397 ae_assert(ae_fp_greater_eq(epsg,0), "MinQPSetAlgoBLEIC: negative EpsG
", _state); 19398 ae_assert(ae_isfinite(epsf, _state), "MinQPSetAlgoBLEIC: EpsF is not finite number
", _state); 19399 ae_assert(ae_fp_greater_eq(epsf,0), "MinQPSetAlgoBLEIC: negative EpsF
", _state); 19400 ae_assert(ae_isfinite(epsx, _state), "MinQPSetAlgoBLEIC: EpsX is not finite number
", _state); 19401 ae_assert(ae_fp_greater_eq(epsx,0), "MinQPSetAlgoBLEIC: negative EpsX
", _state); 19402 ae_assert(maxits>=0, "MinQPSetAlgoBLEIC: negative MaxIts!
", _state); 19403 state->algokind = 2; 19404 if( ((ae_fp_eq(epsg,0)&&ae_fp_eq(epsf,0))&&ae_fp_eq(epsx,0))&&maxits==0 ) 19408 state->bleicepsg = epsg; 19409 state->bleicepsf = epsf; 19410 state->bleicepsx = epsx; 19411 state->bleicmaxits = maxits; 19415 /************************************************************************* 19416 This function sets boundary constraints for QP solver 19418 Boundary constraints are inactive by default (after initial creation). 19419 After being set, they are preserved until explicitly turned off with 19420 another SetBC() call. 19423 State - structure stores algorithm state 19424 BndL - lower bounds, array[N]. 19425 If some (all) variables are unbounded, you may specify 19426 very small number or -INF (latter is recommended because 19427 it will allow solver to use better algorithm). 19428 BndU - upper bounds, array[N]. 19429 If some (all) variables are unbounded, you may specify 19430 very large number or +INF (latter is recommended because 19431 it will allow solver to use better algorithm). 19433 NOTE: it is possible to specify BndL[i]=BndU[i]. In this case I-th 19434 variable will be "frozen
" at X[i]=BndL[i]=BndU[i]. 19437 Copyright 11.01.2011 by Bochkanov Sergey 19438 *************************************************************************/ 19439 void minqpsetbc(minqpstate* state, 19440 /* Real */ ae_vector* bndl, 19441 /* Real */ ae_vector* bndu, 19449 ae_assert(bndl->cnt>=n, "MinQPSetBC: Length(BndL)<N
", _state); 19450 ae_assert(bndu->cnt>=n, "MinQPSetBC: Length(BndU)<N
", _state); 19451 for(i=0; i<=n-1; i++) 19453 ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinQPSetBC: BndL contains NAN
or +
INF", _state); 19454 ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinQPSetBC: BndU contains NAN
or -
INF", _state); 19455 state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; 19456 state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); 19457 state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; 19458 state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); 19463 /************************************************************************* 19464 This function sets linear constraints for QP optimizer. 19466 Linear constraints are inactive by default (after initial creation). 19469 State - structure previously allocated with MinQPCreate call. 19470 C - linear constraints, array[K,N+1]. 19471 Each row of C represents one constraint, either equality 19472 or inequality (see below): 19473 * first N elements correspond to coefficients, 19474 * last element corresponds to the right part. 19475 All elements of C (including right part) must be finite. 19476 CT - type of constraints, array[K]: 19477 * if CT[i]>0, then I-th constraint is C[i,*]*x >= C[i,n+1] 19478 * if CT[i]=0, then I-th constraint is C[i,*]*x = C[i,n+1] 19479 * if CT[i]<0, then I-th constraint is C[i,*]*x <= C[i,n+1] 19480 K - number of equality/inequality constraints, K>=0: 19481 * if given, only leading K elements of C/CT are used 19482 * if not given, automatically determined from sizes of C/CT 19484 NOTE 1: linear (non-bound) constraints are satisfied only approximately - 19485 there always exists some minor violation (about 10^-10...10^-13) 19486 due to numerical errors. 19489 Copyright 19.06.2012 by Bochkanov Sergey 19490 *************************************************************************/ 19491 void minqpsetlc(minqpstate* state, 19492 /* Real */ ae_matrix* c, 19493 /* Integer */ ae_vector* ct, 19506 * First, check for errors in the inputs 19508 ae_assert(k>=0, "MinQPSetLC:
K<0
", _state); 19509 ae_assert(c->cols>=n+1||k==0, "MinQPSetLC: Cols(C)<N+1
", _state); 19510 ae_assert(c->rows>=k, "MinQPSetLC: Rows(C)<
K", _state); 19511 ae_assert(ct->cnt>=k, "MinQPSetLC: Length(CT)<
K", _state); 19512 ae_assert(apservisfinitematrix(c, k, n+1, _state), "MinQPSetLC: C contains infinite
or NaN values!
", _state); 19525 * Equality constraints are stored first, in the upper 19526 * NEC rows of State.CLEIC matrix. Inequality constraints 19527 * are stored in the next NIC rows. 19529 * NOTE: we convert inequality constraints to the form 19530 * A*x<=b before copying them. 19532 rmatrixsetlengthatleast(&state->cleic, k, n+1, _state); 19535 for(i=0; i<=k-1; i++) 19537 if( ct->ptr.p_int[i]==0 ) 19539 ae_v_move(&state->cleic.ptr.pp_double[state->nec][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 19540 state->nec = state->nec+1; 19543 for(i=0; i<=k-1; i++) 19545 if( ct->ptr.p_int[i]!=0 ) 19547 if( ct->ptr.p_int[i]>0 ) 19549 ae_v_moveneg(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 19553 ae_v_move(&state->cleic.ptr.pp_double[state->nec+state->nic][0], 1, &c->ptr.pp_double[i][0], 1, ae_v_len(0,n)); 19555 state->nic = state->nic+1; 19560 * Normalize rows of State.CLEIC: each row must have unit norm. 19561 * Norm is calculated using first N elements (i.e. right part is 19562 * not counted when we calculate norm). 19564 for(i=0; i<=k-1; i++) 19567 for(j=0; j<=n-1; j++) 19569 v = v+ae_sqr(state->cleic.ptr.pp_double[i][j], _state); 19571 if( ae_fp_eq(v,0) ) 19575 v = 1/ae_sqrt(v, _state); 19576 ae_v_muld(&state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n), v); 19581 /************************************************************************* 19582 This function solves quadratic programming problem. 19583 You should call it after setting solver options with MinQPSet...() calls. 19586 State - algorithm state 19588 You should use MinQPResults() function to access results after calls 19592 Copyright 11.01.2011 by Bochkanov Sergey. 19593 Special thanks to Elvira Illarionova for important suggestions on 19594 the linearly constrained QP algorithm. 19595 *************************************************************************/ 19596 void minqpoptimize(minqpstate* state, ae_state *_state) 19607 double noisetolerance; 19611 ae_int_t nextaction; 19612 ae_int_t actstatus; 19614 ae_int_t badnewtonits; 19615 double maxscaledgrad; 19618 noisetolerance = 10; 19620 state->repterminationtype = -5; 19621 state->repinneriterationscount = 0; 19622 state->repouteriterationscount = 0; 19623 state->repncholesky = 0; 19625 state->debugphase1flops = 0; 19626 state->debugphase2flops = 0; 19627 state->debugphase3flops = 0; 19628 rvectorsetlengthatleast(&state->rctmpg, n, _state); 19631 * check correctness of constraints 19633 for(i=0; i<=n-1; i++) 19635 if( state->havebndl.ptr.p_bool[i]&&state->havebndu.ptr.p_bool[i] ) 19637 if( ae_fp_greater(state->bndl.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 19639 state->repterminationtype = -3; 19646 * count number of bound and linear constraints 19649 for(i=0; i<=n-1; i++) 19651 if( state->havebndl.ptr.p_bool[i] ) 19655 if( state->havebndu.ptr.p_bool[i] ) 19663 * * if we have starting point in StartX, we just have to bound it 19664 * * if we do not have StartX, deduce initial point from boundary constraints 19668 for(i=0; i<=n-1; i++) 19670 state->xs.ptr.p_double[i] = state->startx.ptr.p_double[i]; 19671 if( state->havebndl.ptr.p_bool[i]&&ae_fp_less(state->xs.ptr.p_double[i],state->bndl.ptr.p_double[i]) ) 19673 state->xs.ptr.p_double[i] = state->bndl.ptr.p_double[i]; 19675 if( state->havebndu.ptr.p_bool[i]&&ae_fp_greater(state->xs.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 19677 state->xs.ptr.p_double[i] = state->bndu.ptr.p_double[i]; 19683 for(i=0; i<=n-1; i++) 19685 if( state->havebndl.ptr.p_bool[i]&&state->havebndu.ptr.p_bool[i] ) 19687 state->xs.ptr.p_double[i] = 0.5*(state->bndl.ptr.p_double[i]+state->bndu.ptr.p_double[i]); 19690 if( state->havebndl.ptr.p_bool[i] ) 19692 state->xs.ptr.p_double[i] = state->bndl.ptr.p_double[i]; 19695 if( state->havebndu.ptr.p_bool[i] ) 19697 state->xs.ptr.p_double[i] = state->bndu.ptr.p_double[i]; 19700 state->xs.ptr.p_double[i] = 0; 19707 if( state->algokind==1 ) 19711 * Check matrix type. 19712 * Cholesky solver supports only dense matrices. 19714 if( state->akind!=0 ) 19716 state->repterminationtype = -5; 19721 * Our formulation of quadratic problem includes origin point, 19722 * i.e. we have F(x-x_origin) which is minimized subject to 19723 * constraints on x, instead of having simply F(x). 19725 * Here we make transition from non-zero origin to zero one. 19726 * In order to make such transition we have to: 19727 * 1. subtract x_origin from x_start 19728 * 2. modify constraints 19730 * 4. add x_origin to solution 19732 * There is alternate solution - to modify quadratic function 19733 * by expansion of multipliers containing (x-x_origin), but 19734 * we prefer to modify constraints, because it is a) more precise 19735 * and b) easier to to. 19737 * Parts (1)-(2) are done here. After this block is over, 19739 * * XS, which stores shifted XStart (if we don't have XStart, 19740 * value of XS will be ignored later) 19741 * * WorkBndL, WorkBndU, which store modified boundary constraints. 19743 for(i=0; i<=n-1; i++) 19745 if( state->havebndl.ptr.p_bool[i] ) 19747 state->workbndl.ptr.p_double[i] = state->bndl.ptr.p_double[i]-state->xorigin.ptr.p_double[i]; 19751 state->workbndl.ptr.p_double[i] = _state->v_neginf; 19753 if( state->havebndu.ptr.p_bool[i] ) 19755 state->workbndu.ptr.p_double[i] = state->bndu.ptr.p_double[i]-state->xorigin.ptr.p_double[i]; 19759 state->workbndu.ptr.p_double[i] = _state->v_posinf; 19762 rmatrixsetlengthatleast(&state->workcleic, state->nec+state->nic, n+1, _state); 19763 for(i=0; i<=state->nec+state->nic-1; i++) 19765 v = ae_v_dotproduct(&state->cleic.ptr.pp_double[i][0], 1, &state->xorigin.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19766 ae_v_move(&state->workcleic.ptr.pp_double[i][0], 1, &state->cleic.ptr.pp_double[i][0], 1, ae_v_len(0,n-1)); 19767 state->workcleic.ptr.pp_double[i][n] = state->cleic.ptr.pp_double[i][n]-v; 19771 * Starting point XS 19777 * We have starting point in StartX, so we just have to shift and bound it 19779 for(i=0; i<=n-1; i++) 19781 state->xs.ptr.p_double[i] = state->startx.ptr.p_double[i]-state->xorigin.ptr.p_double[i]; 19782 if( state->havebndl.ptr.p_bool[i] ) 19784 if( ae_fp_less(state->xs.ptr.p_double[i],state->workbndl.ptr.p_double[i]) ) 19786 state->xs.ptr.p_double[i] = state->workbndl.ptr.p_double[i]; 19789 if( state->havebndu.ptr.p_bool[i] ) 19791 if( ae_fp_greater(state->xs.ptr.p_double[i],state->workbndu.ptr.p_double[i]) ) 19793 state->xs.ptr.p_double[i] = state->workbndu.ptr.p_double[i]; 19802 * We don't have starting point, so we deduce it from 19803 * constraints (if they are present). 19805 * NOTE: XS contains some meaningless values from previous block 19806 * which are ignored by code below. 19808 for(i=0; i<=n-1; i++) 19810 if( state->havebndl.ptr.p_bool[i]&&state->havebndu.ptr.p_bool[i] ) 19812 state->xs.ptr.p_double[i] = 0.5*(state->workbndl.ptr.p_double[i]+state->workbndu.ptr.p_double[i]); 19813 if( ae_fp_less(state->xs.ptr.p_double[i],state->workbndl.ptr.p_double[i]) ) 19815 state->xs.ptr.p_double[i] = state->workbndl.ptr.p_double[i]; 19817 if( ae_fp_greater(state->xs.ptr.p_double[i],state->workbndu.ptr.p_double[i]) ) 19819 state->xs.ptr.p_double[i] = state->workbndu.ptr.p_double[i]; 19823 if( state->havebndl.ptr.p_bool[i] ) 19825 state->xs.ptr.p_double[i] = state->workbndl.ptr.p_double[i]; 19828 if( state->havebndu.ptr.p_bool[i] ) 19830 state->xs.ptr.p_double[i] = state->workbndu.ptr.p_double[i]; 19833 state->xs.ptr.p_double[i] = 0; 19838 * Handle special case - no constraints 19840 if( nbc==0&&state->nec+state->nic==0 ) 19844 * "Simple
" unconstrained Cholesky 19846 bvectorsetlengthatleast(&state->tmpb, n, _state); 19847 for(i=0; i<=n-1; i++) 19849 state->tmpb.ptr.p_bool[i] = ae_false; 19851 state->repncholesky = state->repncholesky+1; 19852 cqmsetb(&state->a, &state->b, _state); 19853 cqmsetactiveset(&state->a, &state->xs, &state->tmpb, _state); 19854 if( !cqmconstrainedoptimum(&state->a, &state->xn, _state) ) 19856 state->repterminationtype = -5; 19859 ae_v_move(&state->xs.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19860 ae_v_add(&state->xs.ptr.p_double[0], 1, &state->xorigin.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19861 state->repinneriterationscount = 1; 19862 state->repouteriterationscount = 1; 19863 state->repterminationtype = 4; 19868 * Prepare "active
set" structure 19870 sassetbc(&state->sas, &state->workbndl, &state->workbndu, _state); 19871 sassetlcx(&state->sas, &state->workcleic, state->nec, state->nic, _state); 19872 sassetscale(&state->sas, &state->s, _state); 19873 if( !sasstartoptimization(&state->sas, &state->xs, _state) ) 19875 state->repterminationtype = -3; 19880 * Main cycle of CQP algorithm 19882 state->repterminationtype = 4; 19884 maxscaledgrad = 0.0; 19889 * Update iterations count 19891 inc(&state->repouteriterationscount, _state); 19892 inc(&state->repinneriterationscount, _state); 19897 * Determine active set. 19898 * Update MaxScaledGrad. 19900 cqmadx(&state->a, &state->sas.xc, &state->rctmpg, _state); 19901 ae_v_add(&state->rctmpg.ptr.p_double[0], 1, &state->b.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19902 sasreactivateconstraints(&state->sas, &state->rctmpg, _state); 19904 for(i=0; i<=n-1; i++) 19906 v = v+ae_sqr(state->rctmpg.ptr.p_double[i]*state->s.ptr.p_double[i], _state); 19908 maxscaledgrad = ae_maxreal(maxscaledgrad, ae_sqrt(v, _state), _state); 19911 * Phase 2: perform penalized steepest descent step. 19913 * NextAction control variable is set on exit from this loop: 19914 * * NextAction>0 in case we have to proceed to Phase 3 (Newton step) 19915 * * NextAction<0 in case we have to proceed to Phase 1 (recalculate active set) 19916 * * NextAction=0 in case we found solution (step along projected gradient is small enough) 19922 * Calculate constrained descent direction, store to PG. 19923 * Successful termination if PG is zero. 19925 cqmadx(&state->a, &state->sas.xc, &state->gc, _state); 19926 ae_v_add(&state->gc.ptr.p_double[0], 1, &state->b.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19927 sasconstraineddescent(&state->sas, &state->gc, &state->pg, _state); 19928 state->debugphase2flops = state->debugphase2flops+4*(state->nec+state->nic)*n; 19929 v0 = ae_v_dotproduct(&state->pg.ptr.p_double[0], 1, &state->pg.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19930 if( ae_fp_eq(v0,0) ) 19934 * Constrained derivative is zero. 19942 * Build quadratic model of F along descent direction: 19943 * F(xc+alpha*pg) = D2*alpha^2 + D1*alpha + D0 19944 * Store noise level in the XC (noise level is used to classify 19945 * step as singificant or insignificant). 19947 * In case function curvature is negative or product of descent 19948 * direction and gradient is non-negative, iterations are terminated. 19950 * NOTE: D0 is not actually used, but we prefer to maintain it. 19952 fprev = minqp_minqpmodelvalue(&state->a, &state->b, &state->sas.xc, n, &state->tmp0, _state); 19953 fprev = fprev+minqp_penaltyfactor*maxscaledgrad*sasactivelcpenalty1(&state->sas, &state->sas.xc, _state); 19954 cqmevalx(&state->a, &state->sas.xc, &v, &noiselevel, _state); 19955 v0 = cqmxtadx2(&state->a, &state->pg, _state); 19956 state->debugphase2flops = state->debugphase2flops+3*2*n*n; 19958 v1 = ae_v_dotproduct(&state->pg.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 19961 if( ae_fp_less_eq(d2,0) ) 19965 * Second derivative is non-positive, function is non-convex. 19967 state->repterminationtype = -5; 19971 if( ae_fp_greater_eq(d1,0) ) 19975 * Second derivative is positive, first derivative is non-negative. 19983 * Modify quadratic model - add penalty for violation of the active 19986 * Boundary constraints are always satisfied exactly, so we do not 19987 * add penalty term for them. General equality constraint of the 19988 * form a'*(xc+alpha*d)=b adds penalty term: 19989 * P(alpha) = (a'*(xc+alpha*d)-b)^2 19990 * = (alpha*(a'*d) + (a'*xc-b))^2 19991 * = alpha^2*(a'*d)^2 + alpha*2*(a'*d)*(a'*xc-b) + (a'*xc-b)^2 19992 * Each penalty term is multiplied by 100*Anorm before adding it to 19993 * the 1-dimensional quadratic model. 19995 * Penalization of the quadratic model improves behavior of the 19996 * algorithm in the presence of the multiple degenerate constraints. 19997 * In particular, it prevents algorithm from making large steps in 19998 * directions which violate equality constraints. 20000 for(i=0; i<=state->nec+state->nic-1; i++) 20002 if( state->sas.activeset.ptr.p_int[n+i]>0 ) 20004 v0 = ae_v_dotproduct(&state->workcleic.ptr.pp_double[i][0], 1, &state->pg.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20005 v1 = ae_v_dotproduct(&state->workcleic.ptr.pp_double[i][0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20006 v1 = v1-state->workcleic.ptr.pp_double[i][n]; 20007 v = 100*state->anorm; 20008 d2 = d2+v*ae_sqr(v0, _state); 20010 d0 = d0+v*ae_sqr(v1, _state); 20013 state->debugphase2flops = state->debugphase2flops+2*2*(state->nec+state->nic)*n; 20016 * Try unbounded step. 20017 * In case function change is dominated by noise or function actually increased 20018 * instead of decreasing, we terminate iterations. 20021 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20022 ae_v_addd(&state->xn.ptr.p_double[0], 1, &state->pg.ptr.p_double[0], 1, ae_v_len(0,n-1), v); 20023 fcand = minqp_minqpmodelvalue(&state->a, &state->b, &state->xn, n, &state->tmp0, _state); 20024 fcand = fcand+minqp_penaltyfactor*maxscaledgrad*sasactivelcpenalty1(&state->sas, &state->xn, _state); 20025 state->debugphase2flops = state->debugphase2flops+2*n*n; 20026 if( ae_fp_greater_eq(fcand,fprev-noiselevel*noisetolerance) ) 20034 * Perform bounded step with (possible) activation 20036 actstatus = minqp_minqpboundedstepandactivation(state, &state->xn, &state->tmp0, _state); 20037 fcur = minqp_minqpmodelvalue(&state->a, &state->b, &state->sas.xc, n, &state->tmp0, _state); 20038 state->debugphase2flops = state->debugphase2flops+2*n*n; 20041 * Depending on results, decide what to do: 20042 * 1. In case step was performed without activation of constraints, 20043 * we proceed to Newton method 20044 * 2. In case there was activated at least one constraint with ActiveSet[I]<0, 20045 * we proceed to Phase 1 and re-evaluate active set. 20046 * 3. Otherwise (activation of the constraints with ActiveSet[I]=0) 20047 * we try Phase 2 one more time. 20053 * Step without activation, proceed to Newton 20062 * No new constraints added during last activation - only 20063 * ones which were at the boundary (ActiveSet[I]=0), but 20064 * inactive due to numerical noise. 20066 * Now, these constraints are added to the active set, and 20067 * we try to perform steepest descent (Phase 2) one more time. 20075 * Last step activated at least one significantly new 20076 * constraint (ActiveSet[I]<0), we have to re-evaluate 20077 * active set (Phase 1). 20087 if( nextaction==0 ) 20093 * Phase 3: fast equality-constrained solver 20095 * NOTE: this solver uses Augmented Lagrangian algorithm to solve 20096 * equality-constrained subproblems. This algorithm may 20097 * perform steps which increase function values instead of 20098 * decreasing it (in hard cases, like overconstrained problems). 20100 * Such non-monononic steps may create a loop, when Augmented 20101 * Lagrangian algorithm performs uphill step, and steepest 20102 * descent algorithm (Phase 2) performs downhill step in the 20103 * opposite direction. 20105 * In order to prevent iterations to continue forever we 20106 * count iterations when AL algorithm increased function 20107 * value instead of decreasing it. When number of such "bad
" 20108 * iterations will increase beyong MaxBadNewtonIts, we will 20109 * terminate algorithm. 20111 fprev = minqp_minqpmodelvalue(&state->a, &state->b, &state->sas.xc, n, &state->tmp0, _state); 20116 * Calculate optimum subject to presently active constraints 20118 state->repncholesky = state->repncholesky+1; 20119 state->debugphase3flops = state->debugphase3flops+ae_pow(n, 3, _state)/3; 20120 if( !minqp_minqpconstrainedoptimum(state, &state->a, state->anorm, &state->b, &state->xn, &state->tmp0, &state->tmpb, &state->tmp1, _state) ) 20122 state->repterminationtype = -5; 20123 sasstopoptimization(&state->sas, _state); 20129 * If no constraints was added, accept candidate point XN and move to next phase. 20131 if( minqp_minqpboundedstepandactivation(state, &state->xn, &state->tmp0, _state)<0 ) 20136 fcur = minqp_minqpmodelvalue(&state->a, &state->b, &state->sas.xc, n, &state->tmp0, _state); 20137 if( ae_fp_greater_eq(fcur,fprev) ) 20139 badnewtonits = badnewtonits+1; 20141 if( badnewtonits>=minqp_maxbadnewtonits ) 20145 * Algorithm found solution, but keeps iterating because Newton 20146 * algorithm performs uphill steps (noise in the Augmented Lagrangian 20147 * algorithm). We terminate algorithm; it is considered normal 20153 sasstopoptimization(&state->sas, _state); 20156 * Post-process: add XOrigin to XC 20158 for(i=0; i<=n-1; i++) 20160 if( state->havebndl.ptr.p_bool[i]&&ae_fp_eq(state->sas.xc.ptr.p_double[i],state->workbndl.ptr.p_double[i]) ) 20162 state->xs.ptr.p_double[i] = state->bndl.ptr.p_double[i]; 20165 if( state->havebndu.ptr.p_bool[i]&&ae_fp_eq(state->sas.xc.ptr.p_double[i],state->workbndu.ptr.p_double[i]) ) 20167 state->xs.ptr.p_double[i] = state->bndu.ptr.p_double[i]; 20170 state->xs.ptr.p_double[i] = boundval(state->sas.xc.ptr.p_double[i]+state->xorigin.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 20178 if( state->algokind==2 ) 20180 ae_assert(state->akind==0||state->akind==1, "MinQPOptimize: unexpected AKind
", _state); 20181 ivectorsetlengthatleast(&state->tmpi, state->nec+state->nic, _state); 20182 rvectorsetlengthatleast(&state->tmp0, n, _state); 20183 rvectorsetlengthatleast(&state->tmp1, n, _state); 20184 for(i=0; i<=state->nec-1; i++) 20186 state->tmpi.ptr.p_int[i] = 0; 20188 for(i=0; i<=state->nic-1; i++) 20190 state->tmpi.ptr.p_int[state->nec+i] = -1; 20192 minbleicsetlc(&state->solver, &state->cleic, &state->tmpi, state->nec+state->nic, _state); 20193 minbleicsetbc(&state->solver, &state->bndl, &state->bndu, _state); 20194 minbleicsetdrep(&state->solver, ae_true, _state); 20195 minbleicsetcond(&state->solver, ae_minrealnumber, 0.0, 0.0, state->bleicmaxits, _state); 20196 minbleicsetscale(&state->solver, &state->s, _state); 20197 minbleicsetprecscale(&state->solver, _state); 20198 minbleicrestartfrom(&state->solver, &state->xs, _state); 20199 state->repterminationtype = 0; 20200 while(minbleiciteration(&state->solver, _state)) 20204 * Line search started 20206 if( state->solver.lsstart ) 20210 * Iteration counters: 20211 * * inner iterations count is increased on every line search 20212 * * outer iterations count is increased only at steepest descent line search 20214 inc(&state->repinneriterationscount, _state); 20215 if( !state->solver.lbfgssearch ) 20217 inc(&state->repouteriterationscount, _state); 20221 * Build quadratic model of F along descent direction: 20222 * F(x+alpha*d) = D2*alpha^2 + D1*alpha + D0 20224 d0 = state->solver.f; 20225 d1 = ae_v_dotproduct(&state->solver.d.ptr.p_double[0], 1, &state->solver.g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20227 if( state->akind==0 ) 20229 d2 = cqmxtadx2(&state->a, &state->solver.d, _state); 20231 if( state->akind==1 ) 20233 sparsesmv(&state->sparsea, state->sparseaupper, &state->solver.d, &state->tmp0, _state); 20235 for(i=0; i<=n-1; i++) 20237 d2 = d2+state->solver.d.ptr.p_double[i]*state->tmp0.ptr.p_double[i]; 20245 if( ae_fp_less(d1,0)&&ae_fp_greater(d2,0) ) 20247 state->solver.stp = safeminposrv(-d1, 2*d2, state->solver.curstpmax, _state); 20251 * This line search may be started from steepest descent 20252 * stage (stage 2) or from L-BFGS stage (stage 3) of the 20253 * BLEIC algorithm. Depending on stage type, different 20254 * checks are performed. 20256 * Say, L-BFGS stage is an equality-constrained refinement 20257 * stage of BLEIC. This stage refines current iterate 20258 * under "frozen
" equality constraints. We can terminate 20259 * iterations at this stage only when we encounter 20260 * unconstrained direction of negative curvature. In all 20261 * other cases (say, when constrained gradient is zero) 20262 * we should not terminate algorithm because everything may 20263 * change after de-activating presently active constraints. 20265 * At steepest descent stage of BLEIC we can terminate algorithm 20266 * because it found minimum (steepest descent step is zero 20267 * or too short). We also perform check for direction of 20268 * negative curvature. 20270 if( (ae_fp_less(d2,0)||(ae_fp_eq(d2,0)&&ae_fp_less(d1,0)))&&!state->solver.boundedstep ) 20274 * Function is unbounded from below: 20275 * * function will decrease along D, i.e. either: 20278 * * step is unconstrained 20280 * If these conditions are true, we abnormally terminate QP 20281 * algorithm with return code -4 (we can do so at any stage 20282 * of BLEIC - whether it is L-BFGS or steepest descent one). 20284 state->repterminationtype = -4; 20285 for(i=0; i<=n-1; i++) 20287 state->xs.ptr.p_double[i] = state->solver.x.ptr.p_double[i]; 20291 if( !state->solver.lbfgssearch&&ae_fp_greater_eq(d2,0) ) 20295 * Tests for "normal
" convergence. 20297 * These tests are performed only at "steepest descent
" stage 20298 * of the BLEIC algorithm, and only when function is non-concave 20299 * (D2>=0) along direction D. 20301 * NOTE: we do not test iteration count (MaxIts) here, because 20302 * this stopping condition is tested by BLEIC itself. 20304 if( ae_fp_greater_eq(d1,0) ) 20308 * "Emergency
" stopping condition: D is non-descent direction. 20309 * Sometimes it is possible because of numerical noise in the 20312 state->repterminationtype = 4; 20313 for(i=0; i<=n-1; i++) 20315 state->xs.ptr.p_double[i] = state->solver.x.ptr.p_double[i]; 20319 if( ae_fp_greater(d2,0) ) 20323 * Stopping condition #4 - gradient norm is small: 20325 * 1. rescale State.Solver.D and State.Solver.G according to 20326 * current scaling, store results to Tmp0 and Tmp1. 20327 * 2. Normalize Tmp0 (scaled direction vector). 20328 * 3. compute directional derivative (in scaled variables), 20329 * which is equal to DOTPRODUCT(Tmp0,Tmp1). 20332 for(i=0; i<=n-1; i++) 20334 state->tmp0.ptr.p_double[i] = state->solver.d.ptr.p_double[i]/state->s.ptr.p_double[i]; 20335 state->tmp1.ptr.p_double[i] = state->solver.g.ptr.p_double[i]*state->s.ptr.p_double[i]; 20336 v = v+ae_sqr(state->tmp0.ptr.p_double[i], _state); 20338 ae_assert(ae_fp_greater(v,0), "MinQPOptimize: inernal errror (scaled
direction is
zero)
", _state); 20339 v = 1/ae_sqrt(v, _state); 20340 ae_v_muld(&state->tmp0.ptr.p_double[0], 1, ae_v_len(0,n-1), v); 20341 v = ae_v_dotproduct(&state->tmp0.ptr.p_double[0], 1, &state->tmp1.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20342 if( ae_fp_less_eq(ae_fabs(v, _state),state->bleicepsg) ) 20344 state->repterminationtype = 4; 20345 for(i=0; i<=n-1; i++) 20347 state->xs.ptr.p_double[i] = state->solver.x.ptr.p_double[i]; 20353 * Stopping condition #1 - relative function improvement is small: 20355 * 1. calculate steepest descent step: V = -D1/(2*D2) 20356 * 2. calculate function change: V1= D2*V^2 + D1*V 20357 * 3. stop if function change is small enough 20361 if( ae_fp_less_eq(ae_fabs(v1, _state),state->bleicepsf*ae_maxreal(d0, 1.0, _state)) ) 20363 state->repterminationtype = 1; 20364 for(i=0; i<=n-1; i++) 20366 state->xs.ptr.p_double[i] = state->solver.x.ptr.p_double[i]; 20372 * Stopping condition #2 - scaled step is small: 20374 * 1. calculate step multiplier V0 (step itself is D*V0) 20375 * 2. calculate scaled step length V 20376 * 3. stop if step is small enough 20380 for(i=0; i<=n-1; i++) 20382 v = v+ae_sqr(v0*state->solver.d.ptr.p_double[i]/state->s.ptr.p_double[i], _state); 20384 if( ae_fp_less_eq(ae_sqrt(v, _state),state->bleicepsx) ) 20386 state->repterminationtype = 2; 20387 for(i=0; i<=n-1; i++) 20389 state->xs.ptr.p_double[i] = state->solver.x.ptr.p_double[i]; 20398 * Gradient evaluation 20400 if( state->solver.needfg ) 20402 for(i=0; i<=n-1; i++) 20404 state->tmp0.ptr.p_double[i] = state->solver.x.ptr.p_double[i]-state->xorigin.ptr.p_double[i]; 20406 if( state->akind==0 ) 20408 cqmadx(&state->a, &state->tmp0, &state->tmp1, _state); 20410 if( state->akind==1 ) 20412 sparsesmv(&state->sparsea, state->sparseaupper, &state->tmp0, &state->tmp1, _state); 20414 v0 = ae_v_dotproduct(&state->tmp0.ptr.p_double[0], 1, &state->tmp1.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20415 v1 = ae_v_dotproduct(&state->tmp0.ptr.p_double[0], 1, &state->b.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20416 state->solver.f = 0.5*v0+v1; 20417 ae_v_move(&state->solver.g.ptr.p_double[0], 1, &state->tmp1.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20418 ae_v_add(&state->solver.g.ptr.p_double[0], 1, &state->b.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20421 if( state->repterminationtype==0 ) 20425 * BLEIC optimizer was terminated by one of its inner stopping 20426 * conditions. Usually it is iteration counter (if such 20427 * stopping condition was specified by user). 20429 minbleicresults(&state->solver, &state->xs, &state->solverrep, _state); 20430 state->repterminationtype = state->solverrep.terminationtype; 20436 * BLEIC optimizer was terminated in "emergency
" mode by QP 20439 * NOTE: such termination is "emergency
" only when viewed from 20440 * BLEIC's position. QP solver sees such termination as 20441 * routine one, triggered by QP's stopping criteria. 20443 minbleicemergencytermination(&state->solver, _state); 20450 /************************************************************************* 20454 State - algorithm state 20457 X - array[0..N-1], solution. 20458 This array is allocated and initialized only when 20459 Rep.TerminationType parameter is positive (success). 20460 Rep - optimization report. You should check Rep.TerminationType, 20461 which contains completion code, and you may check another 20462 fields which contain another information about algorithm 20465 Failure codes returned by algorithm are: 20466 * -5 inappropriate solver was used: 20467 * Cholesky solver for (semi)indefinite problems 20468 * Cholesky solver for problems with sparse matrix 20469 * -4 BLEIC-QP algorithm found unconstrained direction 20470 of negative curvature (function is unbounded from 20471 below even under constraints), no meaningful 20472 minimum can be found. 20473 * -3 inconsistent constraints (or maybe feasible point 20474 is too hard to find). If you are sure that 20475 constraints are feasible, try to restart optimizer 20476 with better initial approximation. 20478 Completion codes specific for Cholesky algorithm: 20479 * 4 successful completion 20481 Completion codes specific for BLEIC-based algorithm: 20482 * 1 relative function improvement is no more than EpsF. 20483 * 2 scaled step is no more than EpsX. 20484 * 4 scaled gradient norm is no more than EpsG. 20485 * 5 MaxIts steps was taken 20488 Copyright 11.01.2011 by Bochkanov Sergey 20489 *************************************************************************/ 20490 void minqpresults(minqpstate* state, 20491 /* Real */ ae_vector* x, 20496 ae_vector_clear(x); 20497 _minqpreport_clear(rep); 20499 minqpresultsbuf(state, x, rep, _state); 20503 /************************************************************************* 20506 Buffered implementation of MinQPResults() which uses pre-allocated buffer 20507 to store X[]. If buffer size is too small, it resizes buffer. It is 20508 intended to be used in the inner cycles of performance critical algorithms 20509 where array reallocation penalty is too large to be ignored. 20512 Copyright 11.01.2011 by Bochkanov Sergey 20513 *************************************************************************/ 20514 void minqpresultsbuf(minqpstate* state, 20515 /* Real */ ae_vector* x, 20521 if( x->cnt<state->n ) 20523 ae_vector_set_length(x, state->n, _state); 20525 ae_v_move(&x->ptr.p_double[0], 1, &state->xs.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 20526 rep->inneriterationscount = state->repinneriterationscount; 20527 rep->outeriterationscount = state->repouteriterationscount; 20528 rep->nmv = state->repnmv; 20529 rep->ncholesky = state->repncholesky; 20530 rep->terminationtype = state->repterminationtype; 20534 /************************************************************************* 20535 Fast version of MinQPSetLinearTerm(), which doesn't check its arguments. 20536 For internal use only. 20539 Copyright 11.01.2011 by Bochkanov Sergey 20540 *************************************************************************/ 20541 void minqpsetlineartermfast(minqpstate* state, 20542 /* Real */ ae_vector* b, 20547 ae_v_move(&state->b.ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 20551 /************************************************************************* 20552 Fast version of MinQPSetQuadraticTerm(), which doesn't check its arguments. 20554 It accepts additional parameter - shift S, which allows to "shift
" matrix 20555 A by adding s*I to A. S must be positive (although it is not checked). 20557 For internal use only. 20560 Copyright 11.01.2011 by Bochkanov Sergey 20561 *************************************************************************/ 20562 void minqpsetquadratictermfast(minqpstate* state, 20563 /* Real */ ae_matrix* a, 20575 cqmseta(&state->a, a, isupper, 1.0, _state); 20576 if( ae_fp_greater(s,0) ) 20578 rvectorsetlengthatleast(&state->tmp0, n, _state); 20579 for(i=0; i<=n-1; i++) 20581 state->tmp0.ptr.p_double[i] = a->ptr.pp_double[i][i]+s; 20583 cqmrewritedensediagonal(&state->a, &state->tmp0, _state); 20587 * Estimate norm of A 20588 * (it will be used later in the quadratic penalty function) 20591 for(i=0; i<=n-1; i++) 20595 for(j=i; j<=n-1; j++) 20597 state->anorm = ae_maxreal(state->anorm, ae_fabs(a->ptr.pp_double[i][j], _state), _state); 20602 for(j=0; j<=i; j++) 20604 state->anorm = ae_maxreal(state->anorm, ae_fabs(a->ptr.pp_double[i][j], _state), _state); 20608 state->anorm = state->anorm*n; 20612 /************************************************************************* 20613 Internal function which allows to rewrite diagonal of quadratic term. 20614 For internal use only. 20616 This function can be used only when you have dense A and already made 20617 MinQPSetQuadraticTerm(Fast) call. 20620 Copyright 16.01.2011 by Bochkanov Sergey 20621 *************************************************************************/ 20622 void minqprewritediagonal(minqpstate* state, 20623 /* Real */ ae_vector* s, 20628 cqmrewritedensediagonal(&state->a, s, _state); 20632 /************************************************************************* 20633 Fast version of MinQPSetStartingPoint(), which doesn't check its arguments. 20634 For internal use only. 20637 Copyright 11.01.2011 by Bochkanov Sergey 20638 *************************************************************************/ 20639 void minqpsetstartingpointfast(minqpstate* state, 20640 /* Real */ ae_vector* x, 20647 ae_v_move(&state->startx.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20648 state->havex = ae_true; 20652 /************************************************************************* 20653 Fast version of MinQPSetOrigin(), which doesn't check its arguments. 20654 For internal use only. 20657 Copyright 11.01.2011 by Bochkanov Sergey 20658 *************************************************************************/ 20659 void minqpsetoriginfast(minqpstate* state, 20660 /* Real */ ae_vector* xorigin, 20667 ae_v_move(&state->xorigin.ptr.p_double[0], 1, &xorigin->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20671 /************************************************************************* 20672 Having feasible current point XC and possibly infeasible candidate point 20673 XN, this function performs longest step from XC to XN which retains 20674 feasibility. In case XN is found to be infeasible, at least one constraint 20677 For example, if we have: 20681 then this function will move us to X=1.0 and activate constraint "x<=1
". 20684 State - MinQP state. 20685 XC - current point, must be feasible with respect to 20687 XN - candidate point, can be infeasible with respect to some 20688 constraints. Must be located in the subspace of current 20689 active set, i.e. it is feasible with respect to already 20690 active constraints. 20691 Buf - temporary buffer, automatically resized if needed 20694 State - this function changes following fields of State: 20696 * State.ActiveC - active linear constraints 20700 >0, in case at least one inactive non-candidate constraint was activated 20701 =0, in case only "candidate
" constraints were activated 20702 <0, in case no constraints were activated by the step 20706 Copyright 29.02.2012 by Bochkanov Sergey 20707 *************************************************************************/ 20708 static ae_int_t minqp_minqpboundedstepandactivation(minqpstate* state, 20709 /* Real */ ae_vector* xn, 20710 /* Real */ ae_vector* buf, 20723 rvectorsetlengthatleast(buf, n, _state); 20724 ae_v_move(&buf->ptr.p_double[0], 1, &xn->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20725 ae_v_sub(&buf->ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20726 sasexploredirection(&state->sas, buf, &stpmax, &cidx, &cval, _state); 20727 needact = ae_fp_less_eq(stpmax,1); 20728 v = ae_minreal(stpmax, 1.0, _state); 20729 ae_v_muld(&buf->ptr.p_double[0], 1, ae_v_len(0,n-1), v); 20730 ae_v_add(&buf->ptr.p_double[0], 1, &state->sas.xc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 20731 result = sasmoveto(&state->sas, buf, needact, cidx, cval, _state); 20736 /************************************************************************* 20737 Model value: f = 0.5*x'*A*x + b'*x 20740 A - convex quadratic model; only main quadratic term is used, 20741 other parts of the model (D/Q/linear term) are ignored. 20742 This function does not modify model state. 20744 XC - evaluation point 20745 Tmp - temporary buffer, automatically resized if needed 20748 Copyright 20.06.2012 by Bochkanov Sergey 20749 *************************************************************************/ 20750 static double minqp_minqpmodelvalue(convexquadraticmodel* a, 20751 /* Real */ ae_vector* b, 20752 /* Real */ ae_vector* xc, 20754 /* Real */ ae_vector* tmp, 20762 rvectorsetlengthatleast(tmp, n, _state); 20763 cqmadx(a, xc, tmp, _state); 20764 v0 = ae_v_dotproduct(&xc->ptr.p_double[0], 1, &tmp->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20765 v1 = ae_v_dotproduct(&xc->ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20766 result = 0.5*v0+v1; 20771 /************************************************************************* 20772 Optimum of A subject to: 20773 a) active boundary constraints (given by ActiveSet[] and corresponding 20775 b) active linear constraints (given by C, R, LagrangeC) 20778 A - main quadratic term of the model; 20779 although structure may store linear and rank-K terms, 20780 these terms are ignored and rewritten by this function. 20781 ANorm - estimate of ||A|| (2-norm is used) 20782 B - array[N], linear term of the model 20783 XN - possibly preallocated buffer 20784 Tmp - temporary buffer (automatically resized) 20785 Tmp1 - temporary buffer (automatically resized) 20788 A - modified quadratic model (this function changes rank-K 20789 term and linear term of the model) 20790 LagrangeC- current estimate of the Lagrange coefficients 20794 True on success, False on failure (non-SPD model) 20797 Copyright 20.06.2012 by Bochkanov Sergey 20798 *************************************************************************/ 20799 static ae_bool minqp_minqpconstrainedoptimum(minqpstate* state, 20800 convexquadraticmodel* a, 20802 /* Real */ ae_vector* b, 20803 /* Real */ ae_vector* xn, 20804 /* Real */ ae_vector* tmp, 20805 /* Boolean */ ae_vector* tmpb, 20806 /* Real */ ae_vector* lagrangec, 20822 * Rebuild basis accroding to current active set. 20823 * We call SASRebuildBasis() to make sure that fields of SAS 20824 * store up to date values. 20826 sasrebuildbasis(&state->sas, _state); 20829 * Allocate temporaries. 20831 rvectorsetlengthatleast(tmp, ae_maxint(n, state->sas.basissize, _state), _state); 20832 bvectorsetlengthatleast(tmpb, n, _state); 20833 rvectorsetlengthatleast(lagrangec, state->sas.basissize, _state); 20838 for(i=0; i<=state->sas.basissize-1; i++) 20840 tmp->ptr.p_double[i] = state->sas.pbasis.ptr.pp_double[i][n]; 20842 theta = 100.0*anorm; 20843 for(i=0; i<=n-1; i++) 20845 if( state->sas.activeset.ptr.p_int[i]>0 ) 20847 tmpb->ptr.p_bool[i] = ae_true; 20851 tmpb->ptr.p_bool[i] = ae_false; 20854 cqmsetactiveset(a, &state->sas.xc, tmpb, _state); 20855 cqmsetq(a, &state->sas.pbasis, tmp, state->sas.basissize, theta, _state); 20858 * Iterate until optimal values of Lagrange multipliers are found 20860 for(i=0; i<=state->sas.basissize-1; i++) 20862 lagrangec->ptr.p_double[i] = 0; 20864 feaserrnew = ae_maxrealnumber; 20866 for(itidx=1; itidx<=minqp_maxlagrangeits; itidx++) 20870 * Generate right part B using linear term and current 20871 * estimate of the Lagrange multipliers. 20873 ae_v_move(&tmp->ptr.p_double[0], 1, &b->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20874 for(i=0; i<=state->sas.basissize-1; i++) 20876 v = lagrangec->ptr.p_double[i]; 20877 ae_v_subd(&tmp->ptr.p_double[0], 1, &state->sas.pbasis.ptr.pp_double[i][0], 1, ae_v_len(0,n-1), v); 20879 cqmsetb(a, tmp, _state); 20884 result = cqmconstrainedoptimum(a, xn, _state); 20891 * Compare feasibility errors. 20892 * Terminate if error decreased too slowly. 20894 feaserrold = feaserrnew; 20896 for(i=0; i<=state->sas.basissize-1; i++) 20898 v = ae_v_dotproduct(&state->sas.pbasis.ptr.pp_double[i][0], 1, &xn->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20899 feaserrnew = feaserrnew+ae_sqr(v-state->sas.pbasis.ptr.pp_double[i][n], _state); 20901 feaserrnew = ae_sqrt(feaserrnew, _state); 20902 if( ae_fp_greater_eq(feaserrnew,0.2*feaserrold) ) 20908 * Update Lagrange multipliers 20910 for(i=0; i<=state->sas.basissize-1; i++) 20912 v = ae_v_dotproduct(&state->sas.pbasis.ptr.pp_double[i][0], 1, &xn->ptr.p_double[0], 1, ae_v_len(0,n-1)); 20913 lagrangec->ptr.p_double[i] = lagrangec->ptr.p_double[i]-theta*(v-state->sas.pbasis.ptr.pp_double[i][n]); 20920 ae_bool _minqpstate_init(void* _p, ae_state *_state, ae_bool make_automatic) 20922 minqpstate *p = (minqpstate*)_p; 20923 ae_touch_ptr((void*)p); 20924 if( !_convexquadraticmodel_init(&p->a, _state, make_automatic) ) 20926 if( !_sparsematrix_init(&p->sparsea, _state, make_automatic) ) 20928 if( !ae_vector_init(&p->b, 0, DT_REAL, _state, make_automatic) ) 20930 if( !ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic) ) 20932 if( !ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic) ) 20934 if( !ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic) ) 20936 if( !ae_vector_init(&p->havebndl, 0, DT_BOOL, _state, make_automatic) ) 20938 if( !ae_vector_init(&p->havebndu, 0, DT_BOOL, _state, make_automatic) ) 20940 if( !ae_vector_init(&p->xorigin, 0, DT_REAL, _state, make_automatic) ) 20942 if( !ae_vector_init(&p->startx, 0, DT_REAL, _state, make_automatic) ) 20944 if( !ae_matrix_init(&p->cleic, 0, 0, DT_REAL, _state, make_automatic) ) 20946 if( !_sactiveset_init(&p->sas, _state, make_automatic) ) 20948 if( !ae_vector_init(&p->gc, 0, DT_REAL, _state, make_automatic) ) 20950 if( !ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic) ) 20952 if( !ae_vector_init(&p->pg, 0, DT_REAL, _state, make_automatic) ) 20954 if( !ae_vector_init(&p->workbndl, 0, DT_REAL, _state, make_automatic) ) 20956 if( !ae_vector_init(&p->workbndu, 0, DT_REAL, _state, make_automatic) ) 20958 if( !ae_matrix_init(&p->workcleic, 0, 0, DT_REAL, _state, make_automatic) ) 20960 if( !ae_vector_init(&p->xs, 0, DT_REAL, _state, make_automatic) ) 20962 if( !ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic) ) 20964 if( !ae_vector_init(&p->tmp1, 0, DT_REAL, _state, make_automatic) ) 20966 if( !ae_vector_init(&p->tmpb, 0, DT_BOOL, _state, make_automatic) ) 20968 if( !ae_vector_init(&p->rctmpg, 0, DT_REAL, _state, make_automatic) ) 20970 if( !ae_vector_init(&p->tmpi, 0, DT_INT, _state, make_automatic) ) 20972 if( !_normestimatorstate_init(&p->estimator, _state, make_automatic) ) 20974 if( !_minbleicstate_init(&p->solver, _state, make_automatic) ) 20976 if( !_minbleicreport_init(&p->solverrep, _state, make_automatic) ) 20982 ae_bool _minqpstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 20984 minqpstate *dst = (minqpstate*)_dst; 20985 minqpstate *src = (minqpstate*)_src; 20987 dst->algokind = src->algokind; 20988 dst->akind = src->akind; 20989 if( !_convexquadraticmodel_init_copy(&dst->a, &src->a, _state, make_automatic) ) 20991 if( !_sparsematrix_init_copy(&dst->sparsea, &src->sparsea, _state, make_automatic) ) 20993 dst->sparseaupper = src->sparseaupper; 20994 dst->anorm = src->anorm; 20995 if( !ae_vector_init_copy(&dst->b, &src->b, _state, make_automatic) ) 20997 if( !ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic) ) 20999 if( !ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic) ) 21001 if( !ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic) ) 21003 if( !ae_vector_init_copy(&dst->havebndl, &src->havebndl, _state, make_automatic) ) 21005 if( !ae_vector_init_copy(&dst->havebndu, &src->havebndu, _state, make_automatic) ) 21007 if( !ae_vector_init_copy(&dst->xorigin, &src->xorigin, _state, make_automatic) ) 21009 if( !ae_vector_init_copy(&dst->startx, &src->startx, _state, make_automatic) ) 21011 dst->havex = src->havex; 21012 if( !ae_matrix_init_copy(&dst->cleic, &src->cleic, _state, make_automatic) ) 21014 dst->nec = src->nec; 21015 dst->nic = src->nic; 21016 dst->bleicepsg = src->bleicepsg; 21017 dst->bleicepsf = src->bleicepsf; 21018 dst->bleicepsx = src->bleicepsx; 21019 dst->bleicmaxits = src->bleicmaxits; 21020 if( !_sactiveset_init_copy(&dst->sas, &src->sas, _state, make_automatic) ) 21022 if( !ae_vector_init_copy(&dst->gc, &src->gc, _state, make_automatic) ) 21024 if( !ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic) ) 21026 if( !ae_vector_init_copy(&dst->pg, &src->pg, _state, make_automatic) ) 21028 if( !ae_vector_init_copy(&dst->workbndl, &src->workbndl, _state, make_automatic) ) 21030 if( !ae_vector_init_copy(&dst->workbndu, &src->workbndu, _state, make_automatic) ) 21032 if( !ae_matrix_init_copy(&dst->workcleic, &src->workcleic, _state, make_automatic) ) 21034 if( !ae_vector_init_copy(&dst->xs, &src->xs, _state, make_automatic) ) 21036 dst->repinneriterationscount = src->repinneriterationscount; 21037 dst->repouteriterationscount = src->repouteriterationscount; 21038 dst->repncholesky = src->repncholesky; 21039 dst->repnmv = src->repnmv; 21040 dst->repterminationtype = src->repterminationtype; 21041 dst->debugphase1flops = src->debugphase1flops; 21042 dst->debugphase2flops = src->debugphase2flops; 21043 dst->debugphase3flops = src->debugphase3flops; 21044 if( !ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic) ) 21046 if( !ae_vector_init_copy(&dst->tmp1, &src->tmp1, _state, make_automatic) ) 21048 if( !ae_vector_init_copy(&dst->tmpb, &src->tmpb, _state, make_automatic) ) 21050 if( !ae_vector_init_copy(&dst->rctmpg, &src->rctmpg, _state, make_automatic) ) 21052 if( !ae_vector_init_copy(&dst->tmpi, &src->tmpi, _state, make_automatic) ) 21054 if( !_normestimatorstate_init_copy(&dst->estimator, &src->estimator, _state, make_automatic) ) 21056 if( !_minbleicstate_init_copy(&dst->solver, &src->solver, _state, make_automatic) ) 21058 if( !_minbleicreport_init_copy(&dst->solverrep, &src->solverrep, _state, make_automatic) ) 21064 void _minqpstate_clear(void* _p) 21066 minqpstate *p = (minqpstate*)_p; 21067 ae_touch_ptr((void*)p); 21068 _convexquadraticmodel_clear(&p->a); 21069 _sparsematrix_clear(&p->sparsea); 21070 ae_vector_clear(&p->b); 21071 ae_vector_clear(&p->bndl); 21072 ae_vector_clear(&p->bndu); 21073 ae_vector_clear(&p->s); 21074 ae_vector_clear(&p->havebndl); 21075 ae_vector_clear(&p->havebndu); 21076 ae_vector_clear(&p->xorigin); 21077 ae_vector_clear(&p->startx); 21078 ae_matrix_clear(&p->cleic); 21079 _sactiveset_clear(&p->sas); 21080 ae_vector_clear(&p->gc); 21081 ae_vector_clear(&p->xn); 21082 ae_vector_clear(&p->pg); 21083 ae_vector_clear(&p->workbndl); 21084 ae_vector_clear(&p->workbndu); 21085 ae_matrix_clear(&p->workcleic); 21086 ae_vector_clear(&p->xs); 21087 ae_vector_clear(&p->tmp0); 21088 ae_vector_clear(&p->tmp1); 21089 ae_vector_clear(&p->tmpb); 21090 ae_vector_clear(&p->rctmpg); 21091 ae_vector_clear(&p->tmpi); 21092 _normestimatorstate_clear(&p->estimator); 21093 _minbleicstate_clear(&p->solver); 21094 _minbleicreport_clear(&p->solverrep); 21098 void _minqpstate_destroy(void* _p) 21100 minqpstate *p = (minqpstate*)_p; 21101 ae_touch_ptr((void*)p); 21102 _convexquadraticmodel_destroy(&p->a); 21103 _sparsematrix_destroy(&p->sparsea); 21104 ae_vector_destroy(&p->b); 21105 ae_vector_destroy(&p->bndl); 21106 ae_vector_destroy(&p->bndu); 21107 ae_vector_destroy(&p->s); 21108 ae_vector_destroy(&p->havebndl); 21109 ae_vector_destroy(&p->havebndu); 21110 ae_vector_destroy(&p->xorigin); 21111 ae_vector_destroy(&p->startx); 21112 ae_matrix_destroy(&p->cleic); 21113 _sactiveset_destroy(&p->sas); 21114 ae_vector_destroy(&p->gc); 21115 ae_vector_destroy(&p->xn); 21116 ae_vector_destroy(&p->pg); 21117 ae_vector_destroy(&p->workbndl); 21118 ae_vector_destroy(&p->workbndu); 21119 ae_matrix_destroy(&p->workcleic); 21120 ae_vector_destroy(&p->xs); 21121 ae_vector_destroy(&p->tmp0); 21122 ae_vector_destroy(&p->tmp1); 21123 ae_vector_destroy(&p->tmpb); 21124 ae_vector_destroy(&p->rctmpg); 21125 ae_vector_destroy(&p->tmpi); 21126 _normestimatorstate_destroy(&p->estimator); 21127 _minbleicstate_destroy(&p->solver); 21128 _minbleicreport_destroy(&p->solverrep); 21132 ae_bool _minqpreport_init(void* _p, ae_state *_state, ae_bool make_automatic) 21134 minqpreport *p = (minqpreport*)_p; 21135 ae_touch_ptr((void*)p); 21140 ae_bool _minqpreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 21142 minqpreport *dst = (minqpreport*)_dst; 21143 minqpreport *src = (minqpreport*)_src; 21144 dst->inneriterationscount = src->inneriterationscount; 21145 dst->outeriterationscount = src->outeriterationscount; 21146 dst->nmv = src->nmv; 21147 dst->ncholesky = src->ncholesky; 21148 dst->terminationtype = src->terminationtype; 21153 void _minqpreport_clear(void* _p) 21155 minqpreport *p = (minqpreport*)_p; 21156 ae_touch_ptr((void*)p); 21160 void _minqpreport_destroy(void* _p) 21162 minqpreport *p = (minqpreport*)_p; 21163 ae_touch_ptr((void*)p); 21169 /************************************************************************* 21170 IMPROVED LEVENBERG-MARQUARDT METHOD FOR 21171 NON-LINEAR LEAST SQUARES OPTIMIZATION 21174 This function is used to find minimum of function which is represented as 21176 F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) 21177 using value of function vector f[] and Jacobian of f[]. 21181 This algorithm will request following information during its operation: 21183 * function vector f[] at given point X 21184 * function vector f[] and Jacobian of f[] (simultaneously) at given point 21186 There are several overloaded versions of MinLMOptimize() function which 21187 correspond to different LM-like optimization algorithms provided by this 21188 unit. You should choose version which accepts fvec() and jac() callbacks. 21189 First one is used to calculate f[] at given point, second one calculates 21190 f[] and Jacobian df[i]/dx[j]. 21192 You can try to initialize MinLMState structure with VJ function and then 21193 use incorrect version of MinLMOptimize() (for example, version which 21194 works with general form function and does not provide Jacobian), but it 21195 will lead to exception being thrown after first attempt to calculate 21200 1. User initializes algorithm state with MinLMCreateVJ() call 21201 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and 21203 3. User calls MinLMOptimize() function which takes algorithm state and 21204 callback functions. 21205 4. User calls MinLMResults() to get solution 21206 5. Optionally, user may call MinLMRestartFrom() to solve another problem 21207 with same N/M but another starting point and/or another function. 21208 MinLMRestartFrom() allows to reuse already initialized structure. 21213 * if given, only leading N elements of X are used 21214 * if not given, automatically determined from size of X 21215 M - number of functions f[i] 21216 X - initial solution, array[0..N-1] 21219 State - structure which stores algorithm state 21222 1. you may tune stopping conditions with MinLMSetCond() function 21223 2. if target function contains exp() or other fast growing functions, and 21224 optimization algorithm makes too large steps which leads to overflow, 21225 use MinLMSetStpMax() function to bound algorithm's steps. 21228 Copyright 30.03.2009 by Bochkanov Sergey 21229 *************************************************************************/ 21230 void minlmcreatevj(ae_int_t n, 21232 /* Real */ ae_vector* x, 21237 _minlmstate_clear(state); 21239 ae_assert(n>=1, "MinLMCreateVJ: N<1!
", _state); 21240 ae_assert(m>=1, "MinLMCreateVJ: M<1!
", _state); 21241 ae_assert(x->cnt>=n, "MinLMCreateVJ: Length(X)<N!
", _state); 21242 ae_assert(isfinitevector(x, n, _state), "MinLMCreateVJ: X contains infinite
or NaN values!
", _state); 21245 * initialize, check parameters 21247 state->teststep = 0; 21250 state->algomode = 1; 21251 state->hasf = ae_false; 21252 state->hasfi = ae_true; 21253 state->hasg = ae_false; 21256 * second stage of initialization 21258 minlm_lmprepare(n, m, ae_false, state, _state); 21259 minlmsetacctype(state, 0, _state); 21260 minlmsetcond(state, 0, 0, 0, 0, _state); 21261 minlmsetxrep(state, ae_false, _state); 21262 minlmsetstpmax(state, 0, _state); 21263 minlmrestartfrom(state, x, _state); 21267 /************************************************************************* 21268 IMPROVED LEVENBERG-MARQUARDT METHOD FOR 21269 NON-LINEAR LEAST SQUARES OPTIMIZATION 21272 This function is used to find minimum of function which is represented as 21274 F(x) = f[0]^2(x[0],...,x[n-1]) + ... + f[m-1]^2(x[0],...,x[n-1]) 21275 using value of function vector f[] only. Finite differences are used to 21276 calculate Jacobian. 21280 This algorithm will request following information during its operation: 21281 * function vector f[] at given point X 21283 There are several overloaded versions of MinLMOptimize() function which 21284 correspond to different LM-like optimization algorithms provided by this 21285 unit. You should choose version which accepts fvec() callback. 21287 You can try to initialize MinLMState structure with VJ function and then 21288 use incorrect version of MinLMOptimize() (for example, version which 21289 works with general form function and does not accept function vector), but 21290 it will lead to exception being thrown after first attempt to calculate 21295 1. User initializes algorithm state with MinLMCreateV() call 21296 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and 21298 3. User calls MinLMOptimize() function which takes algorithm state and 21299 callback functions. 21300 4. User calls MinLMResults() to get solution 21301 5. Optionally, user may call MinLMRestartFrom() to solve another problem 21302 with same N/M but another starting point and/or another function. 21303 MinLMRestartFrom() allows to reuse already initialized structure. 21308 * if given, only leading N elements of X are used 21309 * if not given, automatically determined from size of X 21310 M - number of functions f[i] 21311 X - initial solution, array[0..N-1] 21312 DiffStep- differentiation step, >0 21315 State - structure which stores algorithm state 21317 See also MinLMIteration, MinLMResults. 21320 1. you may tune stopping conditions with MinLMSetCond() function 21321 2. if target function contains exp() or other fast growing functions, and 21322 optimization algorithm makes too large steps which leads to overflow, 21323 use MinLMSetStpMax() function to bound algorithm's steps. 21326 Copyright 30.03.2009 by Bochkanov Sergey 21327 *************************************************************************/ 21328 void minlmcreatev(ae_int_t n, 21330 /* Real */ ae_vector* x, 21336 _minlmstate_clear(state); 21338 ae_assert(ae_isfinite(diffstep, _state), "MinLMCreateV: DiffStep is not finite!
", _state); 21339 ae_assert(ae_fp_greater(diffstep,0), "MinLMCreateV: DiffStep<=0!
", _state); 21340 ae_assert(n>=1, "MinLMCreateV: N<1!
", _state); 21341 ae_assert(m>=1, "MinLMCreateV: M<1!
", _state); 21342 ae_assert(x->cnt>=n, "MinLMCreateV: Length(X)<N!
", _state); 21343 ae_assert(isfinitevector(x, n, _state), "MinLMCreateV: X contains infinite
or NaN values!
", _state); 21348 state->teststep = 0; 21351 state->algomode = 0; 21352 state->hasf = ae_false; 21353 state->hasfi = ae_true; 21354 state->hasg = ae_false; 21355 state->diffstep = diffstep; 21358 * Second stage of initialization 21360 minlm_lmprepare(n, m, ae_false, state, _state); 21361 minlmsetacctype(state, 1, _state); 21362 minlmsetcond(state, 0, 0, 0, 0, _state); 21363 minlmsetxrep(state, ae_false, _state); 21364 minlmsetstpmax(state, 0, _state); 21365 minlmrestartfrom(state, x, _state); 21369 /************************************************************************* 21370 LEVENBERG-MARQUARDT-LIKE METHOD FOR NON-LINEAR OPTIMIZATION 21373 This function is used to find minimum of general form (not "sum-of-
21374 -squares
") function 21375 F = F(x[0], ..., x[n-1]) 21376 using its gradient and Hessian. Levenberg-Marquardt modification with 21377 L-BFGS pre-optimization and internal pre-conditioned L-BFGS optimization 21378 after each Levenberg-Marquardt step is used. 21382 This algorithm will request following information during its operation: 21384 * function value F at given point X 21385 * F and gradient G (simultaneously) at given point X 21386 * F, G and Hessian H (simultaneously) at given point X 21388 There are several overloaded versions of MinLMOptimize() function which 21389 correspond to different LM-like optimization algorithms provided by this 21390 unit. You should choose version which accepts func(), grad() and hess() 21391 function pointers. First pointer is used to calculate F at given point, 21392 second one calculates F(x) and grad F(x), third one calculates F(x), 21393 grad F(x), hess F(x). 21395 You can try to initialize MinLMState structure with FGH-function and then 21396 use incorrect version of MinLMOptimize() (for example, version which does 21397 not provide Hessian matrix), but it will lead to exception being thrown 21398 after first attempt to calculate Hessian. 21402 1. User initializes algorithm state with MinLMCreateFGH() call 21403 2. User tunes solver parameters with MinLMSetCond(), MinLMSetStpMax() and 21405 3. User calls MinLMOptimize() function which takes algorithm state and 21406 pointers (delegates, etc.) to callback functions. 21407 4. User calls MinLMResults() to get solution 21408 5. Optionally, user may call MinLMRestartFrom() to solve another problem 21409 with same N but another starting point and/or another function. 21410 MinLMRestartFrom() allows to reuse already initialized structure. 21415 * if given, only leading N elements of X are used 21416 * if not given, automatically determined from size of X 21417 X - initial solution, array[0..N-1] 21420 State - structure which stores algorithm state 21423 1. you may tune stopping conditions with MinLMSetCond() function 21424 2. if target function contains exp() or other fast growing functions, and 21425 optimization algorithm makes too large steps which leads to overflow, 21426 use MinLMSetStpMax() function to bound algorithm's steps. 21429 Copyright 30.03.2009 by Bochkanov Sergey 21430 *************************************************************************/ 21431 void minlmcreatefgh(ae_int_t n, 21432 /* Real */ ae_vector* x, 21437 _minlmstate_clear(state); 21439 ae_assert(n>=1, "MinLMCreateFGH: N<1!
", _state); 21440 ae_assert(x->cnt>=n, "MinLMCreateFGH: Length(X)<N!
", _state); 21441 ae_assert(isfinitevector(x, n, _state), "MinLMCreateFGH: X contains infinite
or NaN values!
", _state); 21446 state->teststep = 0; 21449 state->algomode = 2; 21450 state->hasf = ae_true; 21451 state->hasfi = ae_false; 21452 state->hasg = ae_true; 21457 minlm_lmprepare(n, 0, ae_true, state, _state); 21458 minlmsetacctype(state, 2, _state); 21459 minlmsetcond(state, 0, 0, 0, 0, _state); 21460 minlmsetxrep(state, ae_false, _state); 21461 minlmsetstpmax(state, 0, _state); 21462 minlmrestartfrom(state, x, _state); 21466 /************************************************************************* 21467 This function sets stopping conditions for Levenberg-Marquardt optimization 21471 State - structure which stores algorithm state 21473 The subroutine finishes its work if the condition 21474 |v|<EpsG is satisfied, where: 21475 * |.| means Euclidian norm 21476 * v - scaled gradient vector, v[i]=g[i]*s[i] 21478 * s - scaling coefficients set by MinLMSetScale() 21480 The subroutine finishes its work if on k+1-th iteration 21481 the condition |F(k+1)-F(k)|<=EpsF*max{|F(k)|,|F(k+1)|,1} 21484 The subroutine finishes its work if on k+1-th iteration 21485 the condition |v|<=EpsX is fulfilled, where: 21486 * |.| means Euclidian norm 21487 * v - scaled step vector, v[i]=dx[i]/s[i] 21488 * dx - ste pvector, dx=X(k+1)-X(k) 21489 * s - scaling coefficients set by MinLMSetScale() 21490 MaxIts - maximum number of iterations. If MaxIts=0, the number of 21491 iterations is unlimited. Only Levenberg-Marquardt 21492 iterations are counted (L-BFGS/CG iterations are NOT 21493 counted because their cost is very low compared to that of 21496 Passing EpsG=0, EpsF=0, EpsX=0 and MaxIts=0 (simultaneously) will lead to 21497 automatic stopping criterion selection (small EpsX). 21500 Copyright 02.04.2010 by Bochkanov Sergey 21501 *************************************************************************/ 21502 void minlmsetcond(minlmstate* state, 21511 ae_assert(ae_isfinite(epsg, _state), "MinLMSetCond: EpsG is not finite number!
", _state); 21512 ae_assert(ae_fp_greater_eq(epsg,0), "MinLMSetCond: negative EpsG!
", _state); 21513 ae_assert(ae_isfinite(epsf, _state), "MinLMSetCond: EpsF is not finite number!
", _state); 21514 ae_assert(ae_fp_greater_eq(epsf,0), "MinLMSetCond: negative EpsF!
", _state); 21515 ae_assert(ae_isfinite(epsx, _state), "MinLMSetCond: EpsX is not finite number!
", _state); 21516 ae_assert(ae_fp_greater_eq(epsx,0), "MinLMSetCond: negative EpsX!
", _state); 21517 ae_assert(maxits>=0, "MinLMSetCond: negative MaxIts!
", _state); 21518 if( ((ae_fp_eq(epsg,0)&&ae_fp_eq(epsf,0))&&ae_fp_eq(epsx,0))&&maxits==0 ) 21522 state->epsg = epsg; 21523 state->epsf = epsf; 21524 state->epsx = epsx; 21525 state->maxits = maxits; 21529 /************************************************************************* 21530 This function turns on/off reporting. 21533 State - structure which stores algorithm state 21534 NeedXRep- whether iteration reports are needed or not 21536 If NeedXRep is True, algorithm will call rep() callback function if it is 21537 provided to MinLMOptimize(). Both Levenberg-Marquardt and internal L-BFGS 21538 iterations are reported. 21541 Copyright 02.04.2010 by Bochkanov Sergey 21542 *************************************************************************/ 21543 void minlmsetxrep(minlmstate* state, ae_bool needxrep, ae_state *_state) 21547 state->xrep = needxrep; 21551 /************************************************************************* 21552 This function sets maximum step length 21555 State - structure which stores algorithm state 21556 StpMax - maximum step length, >=0. Set StpMax to 0.0, if you don't 21557 want to limit step length. 21559 Use this subroutine when you optimize target function which contains exp() 21560 or other fast growing functions, and optimization algorithm makes too 21561 large steps which leads to overflow. This function allows us to reject 21562 steps that are too large (and therefore expose us to the possible 21563 overflow) without actually calculating function value at the x+stp*d. 21565 NOTE: non-zero StpMax leads to moderate performance degradation because 21566 intermediate step of preconditioned L-BFGS optimization is incompatible 21567 with limits on step size. 21570 Copyright 02.04.2010 by Bochkanov Sergey 21571 *************************************************************************/ 21572 void minlmsetstpmax(minlmstate* state, double stpmax, ae_state *_state) 21576 ae_assert(ae_isfinite(stpmax, _state), "MinLMSetStpMax: StpMax is not finite!
", _state); 21577 ae_assert(ae_fp_greater_eq(stpmax,0), "MinLMSetStpMax: StpMax<0!
", _state); 21578 state->stpmax = stpmax; 21582 /************************************************************************* 21583 This function sets scaling coefficients for LM optimizer. 21585 ALGLIB optimizers use scaling matrices to test stopping conditions (step 21586 size and gradient are scaled before comparison with tolerances). Scale of 21587 the I-th variable is a translation invariant measure of: 21588 a) "how large
" the variable is 21589 b) how large the step should be to make significant changes in the function 21591 Generally, scale is NOT considered to be a form of preconditioner. But LM 21592 optimizer is unique in that it uses scaling matrix both in the stopping 21593 condition tests and as Marquardt damping factor. 21595 Proper scaling is very important for the algorithm performance. It is less 21596 important for the quality of results, but still has some influence (it is 21597 easier to converge when variables are properly scaled, so premature 21598 stopping is possible when very badly scalled variables are combined with 21599 relaxed stopping conditions). 21602 State - structure stores algorithm state 21603 S - array[N], non-zero scaling coefficients 21604 S[i] may be negative, sign doesn't matter. 21607 Copyright 14.01.2011 by Bochkanov Sergey 21608 *************************************************************************/ 21609 void minlmsetscale(minlmstate* state, 21610 /* Real */ ae_vector* s, 21616 ae_assert(s->cnt>=state->n, "MinLMSetScale: Length(S)<N
", _state); 21617 for(i=0; i<=state->n-1; i++) 21619 ae_assert(ae_isfinite(s->ptr.p_double[i], _state), "MinLMSetScale: S contains infinite
or NAN elements
", _state); 21620 ae_assert(ae_fp_neq(s->ptr.p_double[i],0), "MinLMSetScale: S contains zero elements
", _state); 21621 state->s.ptr.p_double[i] = ae_fabs(s->ptr.p_double[i], _state); 21626 /************************************************************************* 21627 This function sets boundary constraints for LM optimizer 21629 Boundary constraints are inactive by default (after initial creation). 21630 They are preserved until explicitly turned off with another SetBC() call. 21633 State - structure stores algorithm state 21634 BndL - lower bounds, array[N]. 21635 If some (all) variables are unbounded, you may specify 21636 very small number or -INF (latter is recommended because 21637 it will allow solver to use better algorithm). 21638 BndU - upper bounds, array[N]. 21639 If some (all) variables are unbounded, you may specify 21640 very large number or +INF (latter is recommended because 21641 it will allow solver to use better algorithm). 21643 NOTE 1: it is possible to specify BndL[i]=BndU[i]. In this case I-th 21644 variable will be "frozen
" at X[i]=BndL[i]=BndU[i]. 21646 NOTE 2: this solver has following useful properties: 21647 * bound constraints are always satisfied exactly 21648 * function is evaluated only INSIDE area specified by bound constraints 21652 Copyright 14.01.2011 by Bochkanov Sergey 21653 *************************************************************************/ 21654 void minlmsetbc(minlmstate* state, 21655 /* Real */ ae_vector* bndl, 21656 /* Real */ ae_vector* bndu, 21664 ae_assert(bndl->cnt>=n, "MinLMSetBC: Length(BndL)<N
", _state); 21665 ae_assert(bndu->cnt>=n, "MinLMSetBC: Length(BndU)<N
", _state); 21666 for(i=0; i<=n-1; i++) 21668 ae_assert(ae_isfinite(bndl->ptr.p_double[i], _state)||ae_isneginf(bndl->ptr.p_double[i], _state), "MinLMSetBC: BndL contains NAN
or +
INF", _state); 21669 ae_assert(ae_isfinite(bndu->ptr.p_double[i], _state)||ae_isposinf(bndu->ptr.p_double[i], _state), "MinLMSetBC: BndU contains NAN
or -
INF", _state); 21670 state->bndl.ptr.p_double[i] = bndl->ptr.p_double[i]; 21671 state->havebndl.ptr.p_bool[i] = ae_isfinite(bndl->ptr.p_double[i], _state); 21672 state->bndu.ptr.p_double[i] = bndu->ptr.p_double[i]; 21673 state->havebndu.ptr.p_bool[i] = ae_isfinite(bndu->ptr.p_double[i], _state); 21678 /************************************************************************* 21679 This function is used to change acceleration settings 21681 You can choose between three acceleration strategies: 21682 * AccType=0, no acceleration. 21683 * AccType=1, secant updates are used to update quadratic model after each 21684 iteration. After fixed number of iterations (or after model breakdown) 21685 we recalculate quadratic model using analytic Jacobian or finite 21686 differences. Number of secant-based iterations depends on optimization 21687 settings: about 3 iterations - when we have analytic Jacobian, up to 2*N 21688 iterations - when we use finite differences to calculate Jacobian. 21690 AccType=1 is recommended when Jacobian calculation cost is prohibitive 21691 high (several Mx1 function vector calculations followed by several NxN 21692 Cholesky factorizations are faster than calculation of one M*N Jacobian). 21693 It should also be used when we have no Jacobian, because finite difference 21694 approximation takes too much time to compute. 21696 Table below list optimization protocols (XYZ protocol corresponds to 21697 MinLMCreateXYZ) and acceleration types they support (and use by default). 21699 ACCELERATION TYPES SUPPORTED BY OPTIMIZATION PROTOCOLS: 21701 protocol 0 1 comment 21708 protocol 0 1 comment 21709 V x without acceleration it is so slooooooooow 21713 NOTE: this function should be called before optimization. Attempt to call 21714 it during algorithm iterations may result in unexpected behavior. 21716 NOTE: attempt to call this function with unsupported protocol/acceleration 21717 combination will result in exception being thrown. 21720 Copyright 14.10.2010 by Bochkanov Sergey 21721 *************************************************************************/ 21722 void minlmsetacctype(minlmstate* state, 21728 ae_assert((acctype==0||acctype==1)||acctype==2, "MinLMSetAccType: incorrect AccType!
", _state); 21735 state->maxmodelage = 0; 21736 state->makeadditers = ae_false; 21741 ae_assert(state->hasfi, "MinLMSetAccType: AccType=1 is incompatible with current protocol!
", _state); 21742 if( state->algomode==0 ) 21744 state->maxmodelage = 2*state->n; 21748 state->maxmodelage = minlm_smallmodelage; 21750 state->makeadditers = ae_false; 21756 /************************************************************************* 21759 1. Depending on function used to create state structure, this algorithm 21760 may accept Jacobian and/or Hessian and/or gradient. According to the 21761 said above, there ase several versions of this function, which accept 21762 different sets of callbacks. 21764 This flexibility opens way to subtle errors - you may create state with 21765 MinLMCreateFGH() (optimization using Hessian), but call function which 21766 does not accept Hessian. So when algorithm will request Hessian, there 21767 will be no callback to call. In this case exception will be thrown. 21769 Be careful to avoid such errors because there is no way to find them at 21770 compile time - you can see them at runtime only. 21773 Copyright 10.03.2009 by Bochkanov Sergey 21774 *************************************************************************/ 21775 ae_bool minlmiteration(minlmstate* state, ae_state *_state) 21791 * Reverse communication preparations 21792 * I know it looks ugly, but it works the same way 21793 * anywhere from C++ to Python. 21795 * This code initializes locals by: 21796 * * random values determined during code 21797 * generation - on first subroutine call 21798 * * values from previous call - on subsequent calls 21800 if( state->rstate.stage>=0 ) 21802 n = state->rstate.ia.ptr.p_int[0]; 21803 m = state->rstate.ia.ptr.p_int[1]; 21804 iflag = state->rstate.ia.ptr.p_int[2]; 21805 i = state->rstate.ia.ptr.p_int[3]; 21806 k = state->rstate.ia.ptr.p_int[4]; 21807 bflag = state->rstate.ba.ptr.p_bool[0]; 21808 v = state->rstate.ra.ptr.p_double[0]; 21809 s = state->rstate.ra.ptr.p_double[1]; 21810 t = state->rstate.ra.ptr.p_double[2]; 21824 if( state->rstate.stage==0 ) 21828 if( state->rstate.stage==1 ) 21832 if( state->rstate.stage==2 ) 21836 if( state->rstate.stage==3 ) 21840 if( state->rstate.stage==4 ) 21844 if( state->rstate.stage==5 ) 21848 if( state->rstate.stage==6 ) 21852 if( state->rstate.stage==7 ) 21856 if( state->rstate.stage==8 ) 21860 if( state->rstate.stage==9 ) 21864 if( state->rstate.stage==10 ) 21868 if( state->rstate.stage==11 ) 21872 if( state->rstate.stage==12 ) 21876 if( state->rstate.stage==13 ) 21880 if( state->rstate.stage==14 ) 21884 if( state->rstate.stage==15 ) 21888 if( state->rstate.stage==16 ) 21892 if( state->rstate.stage==17 ) 21896 if( state->rstate.stage==18 ) 21910 state->repiterationscount = 0; 21911 state->repterminationtype = 0; 21912 state->repfuncidx = -1; 21913 state->repvaridx = -1; 21914 state->repnfunc = 0; 21915 state->repnjac = 0; 21916 state->repngrad = 0; 21917 state->repnhess = 0; 21918 state->repncholesky = 0; 21921 * check consistency of constraints, 21922 * enforce feasibility of the solution 21925 if( !enforceboundaryconstraints(&state->xbase, &state->bndl, &state->havebndl, &state->bndu, &state->havebndu, n, 0, _state) ) 21927 state->repterminationtype = -3; 21931 minqpsetbc(&state->qpstate, &state->bndl, &state->bndu, _state); 21934 * Check, that transferred derivative value is right 21936 minlm_clearrequestfields(state, _state); 21937 if( !(state->algomode==1&&ae_fp_greater(state->teststep,0)) ) 21941 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 21942 state->needfij = ae_true; 21949 ae_assert((state->havebndl.ptr.p_bool[i]&&ae_fp_less_eq(state->bndl.ptr.p_double[i],state->x.ptr.p_double[i]))||!state->havebndl.ptr.p_bool[i], "MinLM:
internal error(State.X is out of bounds)
", _state); 21950 ae_assert((state->havebndu.ptr.p_bool[i]&&ae_fp_less_eq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i]))||!state->havebndu.ptr.p_bool[i], "MinLMIteration:
internal error(State.X is out of bounds)
", _state); 21951 v = state->x.ptr.p_double[i]; 21952 state->x.ptr.p_double[i] = v-state->teststep*state->s.ptr.p_double[i]; 21953 if( state->havebndl.ptr.p_bool[i] ) 21955 state->x.ptr.p_double[i] = ae_maxreal(state->x.ptr.p_double[i], state->bndl.ptr.p_double[i], _state); 21957 state->xm1 = state->x.ptr.p_double[i]; 21958 state->rstate.stage = 0; 21961 ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 21962 ae_v_move(&state->gm1.ptr.p_double[0], 1, &state->j.ptr.pp_double[0][i], state->j.stride, ae_v_len(0,m-1)); 21963 state->x.ptr.p_double[i] = v+state->teststep*state->s.ptr.p_double[i]; 21964 if( state->havebndu.ptr.p_bool[i] ) 21966 state->x.ptr.p_double[i] = ae_minreal(state->x.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 21968 state->xp1 = state->x.ptr.p_double[i]; 21969 state->rstate.stage = 1; 21972 ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 21973 ae_v_move(&state->gp1.ptr.p_double[0], 1, &state->j.ptr.pp_double[0][i], state->j.stride, ae_v_len(0,m-1)); 21974 state->x.ptr.p_double[i] = (state->xm1+state->xp1)/2; 21975 if( state->havebndl.ptr.p_bool[i] ) 21977 state->x.ptr.p_double[i] = ae_maxreal(state->x.ptr.p_double[i], state->bndl.ptr.p_double[i], _state); 21979 if( state->havebndu.ptr.p_bool[i] ) 21981 state->x.ptr.p_double[i] = ae_minreal(state->x.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 21983 state->rstate.stage = 2; 21986 ae_v_move(&state->fc1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 21987 ae_v_move(&state->gc1.ptr.p_double[0], 1, &state->j.ptr.pp_double[0][i], state->j.stride, ae_v_len(0,m-1)); 21988 state->x.ptr.p_double[i] = v; 21989 for(k=0; k<=m-1; k++) 21991 if( !derivativecheck(state->fm1.ptr.p_double[k], state->gm1.ptr.p_double[k], state->fp1.ptr.p_double[k], state->gp1.ptr.p_double[k], state->fc1.ptr.p_double[k], state->gc1.ptr.p_double[k], state->xp1-state->xm1, _state) ) 21993 state->repfuncidx = k; 21994 state->repvaridx = i; 21995 state->repterminationtype = -7; 22003 state->needfij = ae_false; 22007 * Initial report of current point 22009 * Note 1: we rewrite State.X twice because 22010 * user may accidentally change it after first call. 22012 * Note 2: we set NeedF or NeedFI depending on what 22013 * information about function we have. 22019 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22020 minlm_clearrequestfields(state, _state); 22025 state->needf = ae_true; 22026 state->rstate.stage = 3; 22029 state->needf = ae_false; 22032 ae_assert(state->hasfi, "MinLM:
internal error 2!
", _state); 22033 state->needfi = ae_true; 22034 state->rstate.stage = 4; 22037 state->needfi = ae_false; 22038 v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22041 state->repnfunc = state->repnfunc+1; 22042 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22043 minlm_clearrequestfields(state, _state); 22044 state->xupdated = ae_true; 22045 state->rstate.stage = 5; 22048 state->xupdated = ae_false; 22052 * Prepare control variables 22055 state->lambdav = -ae_maxrealnumber; 22056 state->modelage = state->maxmodelage+1; 22057 state->deltaxready = ae_false; 22058 state->deltafready = ae_false; 22063 * We move through it until either: 22064 * * one of the stopping conditions is met 22065 * * we decide that stopping conditions are too stringent 22066 * and break from cycle 22076 * First, we have to prepare quadratic model for our function. 22077 * We use BFlag to ensure that model is prepared; 22078 * if it is false at the end of this block, something went wrong. 22080 * We may either calculate brand new model or update old one. 22082 * Before this block we have: 22083 * * State.XBase - current position. 22084 * * State.DeltaX - if DeltaXReady is True 22085 * * State.DeltaF - if DeltaFReady is True 22087 * After this block is over, we will have: 22088 * * State.XBase - base point (unchanged) 22089 * * State.FBase - F(XBase) 22090 * * State.GBase - linear term 22091 * * State.QuadraticModel - quadratic term 22092 * * State.LambdaV - current estimate for lambda 22094 * We also clear DeltaXReady/DeltaFReady flags 22095 * after initialization is done. 22098 if( !(state->algomode==0||state->algomode==1) ) 22104 * Calculate f[] and Jacobian 22106 if( !(state->modelage>state->maxmodelage||!(state->deltaxready&&state->deltafready)) ) 22112 * Refresh model (using either finite differences or analytic Jacobian) 22114 if( state->algomode!=0 ) 22120 * Optimization using F values only. 22121 * Use finite differences to estimate Jacobian. 22123 ae_assert(state->hasfi, "MinLMIteration:
internal error when estimating Jacobian (no
f[])
", _state); 22132 * We guard X[k] from leaving [BndL,BndU]. 22133 * In case BndL=BndU, we assume that derivative in this direction is zero. 22135 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22136 state->x.ptr.p_double[k] = state->x.ptr.p_double[k]-state->s.ptr.p_double[k]*state->diffstep; 22137 if( state->havebndl.ptr.p_bool[k] ) 22139 state->x.ptr.p_double[k] = ae_maxreal(state->x.ptr.p_double[k], state->bndl.ptr.p_double[k], _state); 22141 if( state->havebndu.ptr.p_bool[k] ) 22143 state->x.ptr.p_double[k] = ae_minreal(state->x.ptr.p_double[k], state->bndu.ptr.p_double[k], _state); 22145 state->xm1 = state->x.ptr.p_double[k]; 22146 minlm_clearrequestfields(state, _state); 22147 state->needfi = ae_true; 22148 state->rstate.stage = 6; 22151 state->repnfunc = state->repnfunc+1; 22152 ae_v_move(&state->fm1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22153 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22154 state->x.ptr.p_double[k] = state->x.ptr.p_double[k]+state->s.ptr.p_double[k]*state->diffstep; 22155 if( state->havebndl.ptr.p_bool[k] ) 22157 state->x.ptr.p_double[k] = ae_maxreal(state->x.ptr.p_double[k], state->bndl.ptr.p_double[k], _state); 22159 if( state->havebndu.ptr.p_bool[k] ) 22161 state->x.ptr.p_double[k] = ae_minreal(state->x.ptr.p_double[k], state->bndu.ptr.p_double[k], _state); 22163 state->xp1 = state->x.ptr.p_double[k]; 22164 minlm_clearrequestfields(state, _state); 22165 state->needfi = ae_true; 22166 state->rstate.stage = 7; 22169 state->repnfunc = state->repnfunc+1; 22170 ae_v_move(&state->fp1.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22171 v = state->xp1-state->xm1; 22172 if( ae_fp_neq(v,0) ) 22175 ae_v_moved(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fp1.ptr.p_double[0], 1, ae_v_len(0,m-1), v); 22176 ae_v_subd(&state->j.ptr.pp_double[0][k], state->j.stride, &state->fm1.ptr.p_double[0], 1, ae_v_len(0,m-1), v); 22180 for(i=0; i<=m-1; i++) 22182 state->j.ptr.pp_double[i][k] = 0; 22190 * Calculate F(XBase) 22192 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22193 minlm_clearrequestfields(state, _state); 22194 state->needfi = ae_true; 22195 state->rstate.stage = 8; 22198 state->needfi = ae_false; 22199 state->repnfunc = state->repnfunc+1; 22200 state->repnjac = state->repnjac+1; 22205 state->modelage = 0; 22210 * Obtain f[] and Jacobian 22212 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22213 minlm_clearrequestfields(state, _state); 22214 state->needfij = ae_true; 22215 state->rstate.stage = 9; 22218 state->needfij = ae_false; 22219 state->repnfunc = state->repnfunc+1; 22220 state->repnjac = state->repnjac+1; 22225 state->modelage = 0; 22231 * State.J contains Jacobian or its current approximation; 22232 * refresh it using secant updates: 22234 * f(x0+dx) = f(x0) + J*dx, 22235 * J_new = J_old + u*h' 22237 * u = (f_new - f_old - J_old*h)/(h'h) 22239 * We can explicitly generate h and u, but it is 22240 * preferential to do in-place calculations. Only 22241 * I-th row of J_old is needed to calculate u[I], 22242 * so we can update J row by row in one pass. 22244 * NOTE: we expect that State.XBase contains new point, 22245 * State.FBase contains old point, State.DeltaX and 22246 * State.DeltaY contain updates from last step. 22248 ae_assert(state->deltaxready&&state->deltafready, "MinLMIteration: uninitialized DeltaX/DeltaF
", _state); 22249 t = ae_v_dotproduct(&state->deltax.ptr.p_double[0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22250 ae_assert(ae_fp_neq(t,0), "MinLM:
internal error (T=0)
", _state); 22251 for(i=0; i<=m-1; i++) 22253 v = ae_v_dotproduct(&state->j.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22254 v = (state->deltaf.ptr.p_double[i]-v)/t; 22255 ae_v_addd(&state->j.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1), v); 22257 ae_v_move(&state->fi.ptr.p_double[0], 1, &state->fibase.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22258 ae_v_add(&state->fi.ptr.p_double[0], 1, &state->deltaf.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22261 * Increase model age 22263 state->modelage = state->modelage+1; 22267 * Generate quadratic model: 22269 * = (f0 + J*dx)'(f0 + J*dx) 22270 * = f0^2 + dx'J'f0 + f0*J*dx + dx'J'J*dx 22271 * = f0^2 + 2*f0*J*dx + dx'J'J*dx 22273 * Note that we calculate 2*(J'J) instead of J'J because 22274 * our quadratic model is based on Tailor decomposition, 22275 * i.e. it has 0.5 before quadratic term. 22277 rmatrixgemm(n, n, m, 2.0, &state->j, 0, 0, 1, &state->j, 0, 0, 0, 0.0, &state->quadraticmodel, 0, 0, _state); 22278 rmatrixmv(n, m, &state->j, 0, 0, 1, &state->fi, 0, &state->gbase, 0, _state); 22279 ae_v_muld(&state->gbase.ptr.p_double[0], 1, ae_v_len(0,n-1), 2); 22280 v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22282 ae_v_move(&state->fibase.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22285 * set control variables 22289 if( state->algomode!=2 ) 22293 ae_assert(!state->hasfi, "MinLMIteration:
internal error (HasFI is True
in Hessian-based
mode)
", _state); 22298 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22299 minlm_clearrequestfields(state, _state); 22300 state->needfgh = ae_true; 22301 state->rstate.stage = 10; 22304 state->needfgh = ae_false; 22305 state->repnfunc = state->repnfunc+1; 22306 state->repngrad = state->repngrad+1; 22307 state->repnhess = state->repnhess+1; 22308 rmatrixcopy(n, n, &state->h, 0, 0, &state->quadraticmodel, 0, 0, _state); 22309 ae_v_move(&state->gbase.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22310 state->fbase = state->f; 22313 * set control variables 22316 state->modelage = 0; 22318 ae_assert(bflag, "MinLM:
internal integrity
check failed!
", _state); 22319 state->deltaxready = ae_false; 22320 state->deltafready = ae_false; 22323 * If Lambda is not initialized, initialize it using quadratic model 22325 if( ae_fp_less(state->lambdav,0) ) 22327 state->lambdav = 0; 22328 for(i=0; i<=n-1; i++) 22330 state->lambdav = ae_maxreal(state->lambdav, ae_fabs(state->quadraticmodel.ptr.pp_double[i][i], _state)*ae_sqr(state->s.ptr.p_double[i], _state), _state); 22332 state->lambdav = 0.001*state->lambdav; 22333 if( ae_fp_eq(state->lambdav,0) ) 22335 state->lambdav = 1; 22340 * Test stopping conditions for function gradient 22342 if( ae_fp_greater(minlm_boundedscaledantigradnorm(state, &state->xbase, &state->gbase, _state),state->epsg) ) 22346 if( state->modelage!=0 ) 22352 * Model is fresh, we can rely on it and terminate algorithm 22354 state->repterminationtype = 4; 22359 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22360 state->f = state->fbase; 22361 minlm_clearrequestfields(state, _state); 22362 state->xupdated = ae_true; 22363 state->rstate.stage = 11; 22366 state->xupdated = ae_false; 22374 * Model is not fresh, we should refresh it and test 22375 * conditions once more 22377 state->modelage = state->maxmodelage+1; 22383 * Find value of Levenberg-Marquardt damping parameter which: 22384 * * leads to positive definite damped model 22385 * * within bounds specified by StpMax 22386 * * generates step which decreases function value 22388 * After this block IFlag is set to: 22389 * * -3, if constraints are infeasible 22390 * * -2, if model update is needed (either Lambda growth is too large 22391 * or step is too short, but we can't rely on model and stop iterations) 22392 * * -1, if model is fresh, Lambda have grown too large, termination is needed 22393 * * 0, if everything is OK, continue iterations 22395 * State.Nu can have any value on enter, but after exit it is set to 1.0 22405 * Do we need model update? 22407 if( state->modelage>0&&ae_fp_greater_eq(state->nu,minlm_suspiciousnu) ) 22414 * Setup quadratic solver and solve quadratic programming problem. 22415 * After problem is solved we'll try to bound step by StpMax 22416 * (Lambda will be increased if step size is too large). 22418 * We use BFlag variable to indicate that we have to increase Lambda. 22419 * If it is False, we will try to increase Lambda and move to new iteration. 22422 minqpsetstartingpointfast(&state->qpstate, &state->xbase, _state); 22423 minqpsetoriginfast(&state->qpstate, &state->xbase, _state); 22424 minqpsetlineartermfast(&state->qpstate, &state->gbase, _state); 22425 minqpsetquadratictermfast(&state->qpstate, &state->quadraticmodel, ae_true, 0.0, _state); 22426 for(i=0; i<=n-1; i++) 22428 state->tmp0.ptr.p_double[i] = state->quadraticmodel.ptr.pp_double[i][i]+state->lambdav/ae_sqr(state->s.ptr.p_double[i], _state); 22430 minqprewritediagonal(&state->qpstate, &state->tmp0, _state); 22431 minqpoptimize(&state->qpstate, _state); 22432 minqpresultsbuf(&state->qpstate, &state->xdir, &state->qprep, _state); 22433 if( state->qprep.terminationtype>0 ) 22437 * successful solution of QP problem 22439 ae_v_sub(&state->xdir.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22440 v = ae_v_dotproduct(&state->xdir.ptr.p_double[0], 1, &state->xdir.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22441 if( ae_isfinite(v, _state) ) 22443 v = ae_sqrt(v, _state); 22444 if( ae_fp_greater(state->stpmax,0)&&ae_fp_greater(v,state->stpmax) ) 22458 * Either problem is non-convex (increase LambdaV) or constraints are inconsistent 22460 ae_assert(state->qprep.terminationtype==-3||state->qprep.terminationtype==-5, "MinLM: unexpected completion code from QP solver
", _state); 22461 if( state->qprep.terminationtype==-3 ) 22473 * try to increase lambda to make matrix positive definite and continue. 22475 if( !minlm_increaselambda(&state->lambdav, &state->nu, _state) ) 22484 * Step in State.XDir and it is bounded by StpMax. 22486 * We should check stopping conditions on step size here. 22487 * DeltaX, which is used for secant updates, is initialized here. 22489 * This code is a bit tricky because sometimes XDir<>0, but 22490 * it is so small that XDir+XBase==XBase (in finite precision 22491 * arithmetics). So we set DeltaX to XBase, then 22492 * add XDir, and then subtract XBase to get exact value of 22495 * Step length is estimated using DeltaX. 22497 * NOTE: stopping conditions are tested 22498 * for fresh models only (ModelAge=0) 22500 ae_v_move(&state->deltax.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22501 ae_v_add(&state->deltax.ptr.p_double[0], 1, &state->xdir.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22502 ae_v_sub(&state->deltax.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22503 state->deltaxready = ae_true; 22505 for(i=0; i<=n-1; i++) 22507 v = v+ae_sqr(state->deltax.ptr.p_double[i]/state->s.ptr.p_double[i], _state); 22509 v = ae_sqrt(v, _state); 22510 if( ae_fp_greater(v,state->epsx) ) 22514 if( state->modelage!=0 ) 22520 * Step is too short, model is fresh and we can rely on it. 22523 state->repterminationtype = 2; 22528 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22529 state->f = state->fbase; 22530 minlm_clearrequestfields(state, _state); 22531 state->xupdated = ae_true; 22532 state->rstate.stage = 12; 22535 state->xupdated = ae_false; 22543 * Step is suspiciously short, but model is not fresh 22544 * and we can't rely on it. 22552 * Let's evaluate new step: 22553 * a) if we have Fi vector, we evaluate it using rcomm, and 22554 * then we manually calculate State.F as sum of squares of Fi[] 22555 * b) if we have F value, we just evaluate it through rcomm interface 22557 * We prefer (a) because we may need Fi vector for additional 22560 ae_assert(state->hasfi||state->hasf, "MinLM:
internal error 2!
", _state); 22561 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22562 ae_v_add(&state->x.ptr.p_double[0], 1, &state->xdir.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22563 minlm_clearrequestfields(state, _state); 22564 if( !state->hasfi ) 22568 state->needfi = ae_true; 22569 state->rstate.stage = 13; 22572 state->needfi = ae_false; 22573 v = ae_v_dotproduct(&state->fi.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22575 ae_v_move(&state->deltaf.ptr.p_double[0], 1, &state->fi.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22576 ae_v_sub(&state->deltaf.ptr.p_double[0], 1, &state->fibase.ptr.p_double[0], 1, ae_v_len(0,m-1)); 22577 state->deltafready = ae_true; 22580 state->needf = ae_true; 22581 state->rstate.stage = 14; 22584 state->needf = ae_false; 22586 state->repnfunc = state->repnfunc+1; 22587 if( ae_fp_greater_eq(state->f,state->fbase) ) 22591 * Increase lambda and continue 22593 if( !minlm_increaselambda(&state->lambdav, &state->nu, _state) ) 22602 * We've found our step! 22609 ae_assert(iflag>=-3&&iflag<=0, "MinLM:
internal integrity
check failed!
", _state); 22612 state->repterminationtype = -3; 22618 state->modelage = state->maxmodelage+1; 22627 * Levenberg-Marquardt step is ready. 22628 * Compare predicted vs. actual decrease and decide what to do with lambda. 22630 * NOTE: we expect that State.DeltaX contains direction of step, 22631 * State.F contains function value at new point. 22633 ae_assert(state->deltaxready, "MinLM: deltaX is not ready
", _state); 22635 for(i=0; i<=n-1; i++) 22637 v = ae_v_dotproduct(&state->quadraticmodel.ptr.pp_double[i][0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22638 t = t+state->deltax.ptr.p_double[i]*state->gbase.ptr.p_double[i]+0.5*state->deltax.ptr.p_double[i]*v; 22640 state->predicteddecrease = -t; 22641 state->actualdecrease = -(state->f-state->fbase); 22642 if( ae_fp_less_eq(state->predicteddecrease,0) ) 22646 v = state->actualdecrease/state->predicteddecrease; 22647 if( ae_fp_greater_eq(v,0.1) ) 22651 if( minlm_increaselambda(&state->lambdav, &state->nu, _state) ) 22657 * Lambda is too large, we have to break iterations. 22659 state->repterminationtype = 7; 22664 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22665 state->f = state->fbase; 22666 minlm_clearrequestfields(state, _state); 22667 state->xupdated = ae_true; 22668 state->rstate.stage = 15; 22671 state->xupdated = ae_false; 22677 if( ae_fp_greater(v,0.5) ) 22679 minlm_decreaselambda(&state->lambdav, &state->nu, _state); 22683 * Accept step, report it and 22684 * test stopping conditions on iterations count and function decrease. 22686 * NOTE: we expect that State.DeltaX contains direction of step, 22687 * State.F contains function value at new point. 22689 * NOTE2: we should update XBase ONLY. In the beginning of the next 22690 * iteration we expect that State.FIBase is NOT updated and 22691 * contains old value of a function vector. 22693 ae_v_add(&state->xbase.ptr.p_double[0], 1, &state->deltax.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22698 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22699 minlm_clearrequestfields(state, _state); 22700 state->xupdated = ae_true; 22701 state->rstate.stage = 16; 22704 state->xupdated = ae_false; 22706 state->repiterationscount = state->repiterationscount+1; 22707 if( state->repiterationscount>=state->maxits&&state->maxits>0 ) 22709 state->repterminationtype = 5; 22711 if( state->modelage==0 ) 22713 if( ae_fp_less_eq(ae_fabs(state->f-state->fbase, _state),state->epsf*ae_maxreal(1, ae_maxreal(ae_fabs(state->f, _state), ae_fabs(state->fbase, _state), _state), _state)) ) 22715 state->repterminationtype = 1; 22718 if( state->repterminationtype<=0 ) 22728 * Report: XBase contains new point, F contains function value at new point 22730 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22731 minlm_clearrequestfields(state, _state); 22732 state->xupdated = ae_true; 22733 state->rstate.stage = 17; 22736 state->xupdated = ae_false; 22741 state->modelage = state->modelage+1; 22746 * Lambda is too large, we have to break iterations. 22748 state->repterminationtype = 7; 22753 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xbase.ptr.p_double[0], 1, ae_v_len(0,n-1)); 22754 state->f = state->fbase; 22755 minlm_clearrequestfields(state, _state); 22756 state->xupdated = ae_true; 22757 state->rstate.stage = 18; 22760 state->xupdated = ae_false; 22770 state->rstate.ia.ptr.p_int[0] = n; 22771 state->rstate.ia.ptr.p_int[1] = m; 22772 state->rstate.ia.ptr.p_int[2] = iflag; 22773 state->rstate.ia.ptr.p_int[3] = i; 22774 state->rstate.ia.ptr.p_int[4] = k; 22775 state->rstate.ba.ptr.p_bool[0] = bflag; 22776 state->rstate.ra.ptr.p_double[0] = v; 22777 state->rstate.ra.ptr.p_double[1] = s; 22778 state->rstate.ra.ptr.p_double[2] = t; 22783 /************************************************************************* 22784 Levenberg-Marquardt algorithm results 22787 State - algorithm state 22790 X - array[0..N-1], solution 22791 Rep - optimization report; 22792 see comments for this structure for more info. 22795 Copyright 10.03.2009 by Bochkanov Sergey 22796 *************************************************************************/ 22797 void minlmresults(minlmstate* state, 22798 /* Real */ ae_vector* x, 22803 ae_vector_clear(x); 22804 _minlmreport_clear(rep); 22806 minlmresultsbuf(state, x, rep, _state); 22810 /************************************************************************* 22811 Levenberg-Marquardt algorithm results 22813 Buffered implementation of MinLMResults(), which uses pre-allocated buffer 22814 to store X[]. If buffer size is too small, it resizes buffer. It is 22815 intended to be used in the inner cycles of performance critical algorithms 22816 where array reallocation penalty is too large to be ignored. 22819 Copyright 10.03.2009 by Bochkanov Sergey 22820 *************************************************************************/ 22821 void minlmresultsbuf(minlmstate* state, 22822 /* Real */ ae_vector* x, 22828 if( x->cnt<state->n ) 22830 ae_vector_set_length(x, state->n, _state); 22832 ae_v_move(&x->ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 22833 rep->iterationscount = state->repiterationscount; 22834 rep->terminationtype = state->repterminationtype; 22835 rep->funcidx = state->repfuncidx; 22836 rep->varidx = state->repvaridx; 22837 rep->nfunc = state->repnfunc; 22838 rep->njac = state->repnjac; 22839 rep->ngrad = state->repngrad; 22840 rep->nhess = state->repnhess; 22841 rep->ncholesky = state->repncholesky; 22845 /************************************************************************* 22846 This subroutine restarts LM algorithm from new point. All optimization 22847 parameters are left unchanged. 22849 This function allows to solve multiple optimization problems (which 22850 must have same number of dimensions) without object reallocation penalty. 22853 State - structure used for reverse communication previously 22854 allocated with MinLMCreateXXX call. 22855 X - new starting point. 22858 Copyright 30.07.2010 by Bochkanov Sergey 22859 *************************************************************************/ 22860 void minlmrestartfrom(minlmstate* state, 22861 /* Real */ ae_vector* x, 22866 ae_assert(x->cnt>=state->n, "MinLMRestartFrom: Length(X)<N!
", _state); 22867 ae_assert(isfinitevector(x, state->n, _state), "MinLMRestartFrom: X contains infinite
or NaN values!
", _state); 22868 ae_v_move(&state->xbase.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 22869 ae_vector_set_length(&state->rstate.ia, 4+1, _state); 22870 ae_vector_set_length(&state->rstate.ba, 0+1, _state); 22871 ae_vector_set_length(&state->rstate.ra, 2+1, _state); 22872 state->rstate.stage = -1; 22873 minlm_clearrequestfields(state, _state); 22877 /************************************************************************* 22878 This is obsolete function. 22880 Since ALGLIB 3.3 it is equivalent to MinLMCreateVJ(). 22883 Copyright 30.03.2009 by Bochkanov Sergey 22884 *************************************************************************/ 22885 void minlmcreatevgj(ae_int_t n, 22887 /* Real */ ae_vector* x, 22892 _minlmstate_clear(state); 22894 minlmcreatevj(n, m, x, state, _state); 22898 /************************************************************************* 22899 This is obsolete function. 22901 Since ALGLIB 3.3 it is equivalent to MinLMCreateFJ(). 22904 Copyright 30.03.2009 by Bochkanov Sergey 22905 *************************************************************************/ 22906 void minlmcreatefgj(ae_int_t n, 22908 /* Real */ ae_vector* x, 22913 _minlmstate_clear(state); 22915 minlmcreatefj(n, m, x, state, _state); 22919 /************************************************************************* 22920 This function is considered obsolete since ALGLIB 3.1.0 and is present for 22921 backward compatibility only. We recommend to use MinLMCreateVJ, which 22922 provides similar, but more consistent and feature-rich interface. 22925 Copyright 30.03.2009 by Bochkanov Sergey 22926 *************************************************************************/ 22927 void minlmcreatefj(ae_int_t n, 22929 /* Real */ ae_vector* x, 22934 _minlmstate_clear(state); 22936 ae_assert(n>=1, "MinLMCreateFJ: N<1!
", _state); 22937 ae_assert(m>=1, "MinLMCreateFJ: M<1!
", _state); 22938 ae_assert(x->cnt>=n, "MinLMCreateFJ: Length(X)<N!
", _state); 22939 ae_assert(isfinitevector(x, n, _state), "MinLMCreateFJ: X contains infinite
or NaN values!
", _state); 22944 state->teststep = 0; 22947 state->algomode = 1; 22948 state->hasf = ae_true; 22949 state->hasfi = ae_false; 22950 state->hasg = ae_false; 22955 minlm_lmprepare(n, m, ae_true, state, _state); 22956 minlmsetacctype(state, 0, _state); 22957 minlmsetcond(state, 0, 0, 0, 0, _state); 22958 minlmsetxrep(state, ae_false, _state); 22959 minlmsetstpmax(state, 0, _state); 22960 minlmrestartfrom(state, x, _state); 22964 /************************************************************************* 22965 This subroutine turns on verification of the user-supplied analytic 22967 * user calls this subroutine before optimization begins 22968 * MinLMOptimize() is called 22969 * prior to actual optimization, for each function Fi and each component 22970 of parameters being optimized X[j] algorithm performs following steps: 22971 * two trial steps are made to X[j]-TestStep*S[j] and X[j]+TestStep*S[j], 22972 where X[j] is j-th parameter and S[j] is a scale of j-th parameter 22973 * if needed, steps are bounded with respect to constraints on X[] 22974 * Fi(X) is evaluated at these trial points 22975 * we perform one more evaluation in the middle point of the interval 22976 * we build cubic model using function values and derivatives at trial 22977 points and we compare its prediction with actual value in the middle 22979 * in case difference between prediction and actual value is higher than 22980 some predetermined threshold, algorithm stops with completion code -7; 22981 Rep.VarIdx is set to index of the parameter with incorrect derivative, 22982 Rep.FuncIdx is set to index of the function. 22983 * after verification is over, algorithm proceeds to the actual optimization. 22985 NOTE 1: verification needs N (parameters count) Jacobian evaluations. It 22986 is very costly and you should use it only for low dimensional 22987 problems, when you want to be sure that you've correctly 22988 calculated analytic derivatives. You should not use it in the 22989 production code (unless you want to check derivatives provided 22990 by some third party). 22992 NOTE 2: you should carefully choose TestStep. Value which is too large 22993 (so large that function behaviour is significantly non-cubic) will 22994 lead to false alarms. You may use different step for different 22995 parameters by means of setting scale with MinLMSetScale(). 22997 NOTE 3: this function may lead to false positives. In case it reports that 22998 I-th derivative was calculated incorrectly, you may decrease test 22999 step and try one more time - maybe your function changes too 23000 sharply and your step is too large for such rapidly chanding 23004 State - structure used to store algorithm state 23005 TestStep - verification step: 23006 * TestStep=0 turns verification off 23007 * TestStep>0 activates verification 23010 Copyright 15.06.2012 by Bochkanov Sergey 23011 *************************************************************************/ 23012 void minlmsetgradientcheck(minlmstate* state, 23018 ae_assert(ae_isfinite(teststep, _state), "MinLMSetGradientCheck: TestStep contains NaN
or Infinite
", _state); 23019 ae_assert(ae_fp_greater_eq(teststep,0), "MinLMSetGradientCheck: invalid argument TestStep(TestStep<0)
", _state); 23020 state->teststep = teststep; 23024 /************************************************************************* 23025 Prepare internal structures (except for RComm). 23027 Note: M must be zero for FGH mode, non-zero for V/VJ/FJ/FGJ mode. 23028 *************************************************************************/ 23029 static void minlm_lmprepare(ae_int_t n, 23044 ae_vector_set_length(&state->g, n, _state); 23048 ae_matrix_set_length(&state->j, m, n, _state); 23049 ae_vector_set_length(&state->fi, m, _state); 23050 ae_vector_set_length(&state->fibase, m, _state); 23051 ae_vector_set_length(&state->deltaf, m, _state); 23052 ae_vector_set_length(&state->fm1, m, _state); 23053 ae_vector_set_length(&state->fp1, m, _state); 23054 ae_vector_set_length(&state->fc1, m, _state); 23055 ae_vector_set_length(&state->gm1, m, _state); 23056 ae_vector_set_length(&state->gp1, m, _state); 23057 ae_vector_set_length(&state->gc1, m, _state); 23061 ae_matrix_set_length(&state->h, n, n, _state); 23063 ae_vector_set_length(&state->x, n, _state); 23064 ae_vector_set_length(&state->deltax, n, _state); 23065 ae_matrix_set_length(&state->quadraticmodel, n, n, _state); 23066 ae_vector_set_length(&state->xbase, n, _state); 23067 ae_vector_set_length(&state->gbase, n, _state); 23068 ae_vector_set_length(&state->xdir, n, _state); 23069 ae_vector_set_length(&state->tmp0, n, _state); 23072 * prepare internal L-BFGS 23074 for(i=0; i<=n-1; i++) 23076 state->x.ptr.p_double[i] = 0; 23078 minlbfgscreate(n, ae_minint(minlm_additers, n, _state), &state->x, &state->internalstate, _state); 23079 minlbfgssetcond(&state->internalstate, 0.0, 0.0, 0.0, ae_minint(minlm_additers, n, _state), _state); 23082 * Prepare internal QP solver 23084 minqpcreate(n, &state->qpstate, _state); 23085 minqpsetalgocholesky(&state->qpstate, _state); 23088 * Prepare boundary constraints 23090 ae_vector_set_length(&state->bndl, n, _state); 23091 ae_vector_set_length(&state->bndu, n, _state); 23092 ae_vector_set_length(&state->havebndl, n, _state); 23093 ae_vector_set_length(&state->havebndu, n, _state); 23094 for(i=0; i<=n-1; i++) 23096 state->bndl.ptr.p_double[i] = _state->v_neginf; 23097 state->havebndl.ptr.p_bool[i] = ae_false; 23098 state->bndu.ptr.p_double[i] = _state->v_posinf; 23099 state->havebndu.ptr.p_bool[i] = ae_false; 23103 * Prepare scaling matrix 23105 ae_vector_set_length(&state->s, n, _state); 23106 for(i=0; i<=n-1; i++) 23108 state->s.ptr.p_double[i] = 1.0; 23113 /************************************************************************* 23114 Clears request fileds (to be sure that we don't forgot to clear something) 23115 *************************************************************************/ 23116 static void minlm_clearrequestfields(minlmstate* state, ae_state *_state) 23120 state->needf = ae_false; 23121 state->needfg = ae_false; 23122 state->needfgh = ae_false; 23123 state->needfij = ae_false; 23124 state->needfi = ae_false; 23125 state->xupdated = ae_false; 23129 /************************************************************************* 23130 Increases lambda, returns False when there is a danger of overflow 23131 *************************************************************************/ 23132 static ae_bool minlm_increaselambda(double* lambdav, 23144 lnlambda = ae_log(*lambdav, _state); 23145 lnlambdaup = ae_log(minlm_lambdaup, _state); 23146 lnnu = ae_log(*nu, _state); 23147 lnmax = ae_log(ae_maxrealnumber, _state); 23148 if( ae_fp_greater(lnlambda+lnlambdaup+lnnu,0.25*lnmax) ) 23152 if( ae_fp_greater(lnnu+ae_log(2, _state),lnmax) ) 23156 *lambdav = *lambdav*minlm_lambdaup*(*nu); 23163 /************************************************************************* 23164 Decreases lambda, but leaves it unchanged when there is danger of underflow. 23165 *************************************************************************/ 23166 static void minlm_decreaselambda(double* lambdav, 23173 if( ae_fp_less(ae_log(*lambdav, _state)+ae_log(minlm_lambdadown, _state),ae_log(ae_minrealnumber, _state)) ) 23175 *lambdav = ae_minrealnumber; 23179 *lambdav = *lambdav*minlm_lambdadown; 23184 /************************************************************************* 23185 Returns norm of bounded scaled anti-gradient. 23187 Bounded antigradient is a vector obtained from anti-gradient by zeroing 23188 components which point outwards: 23190 v[i]=0 if ((-g[i]<0)and(x[i]=bndl[i])) or 23191 ((-g[i]>0)and(x[i]=bndu[i])) 23192 v[i]=-g[i]*s[i] otherwise, where s[i] is a scale for I-th variable 23194 This function may be used to check a stopping criterion. 23197 Copyright 14.01.2011 by Bochkanov Sergey 23198 *************************************************************************/ 23199 static double minlm_boundedscaledantigradnorm(minlmstate* state, 23200 /* Real */ ae_vector* x, 23201 /* Real */ ae_vector* g, 23212 for(i=0; i<=n-1; i++) 23214 v = -g->ptr.p_double[i]*state->s.ptr.p_double[i]; 23215 if( state->havebndl.ptr.p_bool[i] ) 23217 if( ae_fp_less_eq(x->ptr.p_double[i],state->bndl.ptr.p_double[i])&&ae_fp_less(-g->ptr.p_double[i],0) ) 23222 if( state->havebndu.ptr.p_bool[i] ) 23224 if( ae_fp_greater_eq(x->ptr.p_double[i],state->bndu.ptr.p_double[i])&&ae_fp_greater(-g->ptr.p_double[i],0) ) 23229 result = result+ae_sqr(v, _state); 23231 result = ae_sqrt(result, _state); 23236 ae_bool _minlmstate_init(void* _p, ae_state *_state, ae_bool make_automatic) 23238 minlmstate *p = (minlmstate*)_p; 23239 ae_touch_ptr((void*)p); 23240 if( !ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic) ) 23242 if( !ae_vector_init(&p->fi, 0, DT_REAL, _state, make_automatic) ) 23244 if( !ae_matrix_init(&p->j, 0, 0, DT_REAL, _state, make_automatic) ) 23246 if( !ae_matrix_init(&p->h, 0, 0, DT_REAL, _state, make_automatic) ) 23248 if( !ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic) ) 23250 if( !ae_vector_init(&p->xbase, 0, DT_REAL, _state, make_automatic) ) 23252 if( !ae_vector_init(&p->fibase, 0, DT_REAL, _state, make_automatic) ) 23254 if( !ae_vector_init(&p->gbase, 0, DT_REAL, _state, make_automatic) ) 23256 if( !ae_matrix_init(&p->quadraticmodel, 0, 0, DT_REAL, _state, make_automatic) ) 23258 if( !ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic) ) 23260 if( !ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic) ) 23262 if( !ae_vector_init(&p->havebndl, 0, DT_BOOL, _state, make_automatic) ) 23264 if( !ae_vector_init(&p->havebndu, 0, DT_BOOL, _state, make_automatic) ) 23266 if( !ae_vector_init(&p->s, 0, DT_REAL, _state, make_automatic) ) 23268 if( !ae_vector_init(&p->xdir, 0, DT_REAL, _state, make_automatic) ) 23270 if( !ae_vector_init(&p->deltax, 0, DT_REAL, _state, make_automatic) ) 23272 if( !ae_vector_init(&p->deltaf, 0, DT_REAL, _state, make_automatic) ) 23274 if( !_rcommstate_init(&p->rstate, _state, make_automatic) ) 23276 if( !ae_vector_init(&p->choleskybuf, 0, DT_REAL, _state, make_automatic) ) 23278 if( !ae_vector_init(&p->tmp0, 0, DT_REAL, _state, make_automatic) ) 23280 if( !ae_vector_init(&p->fm1, 0, DT_REAL, _state, make_automatic) ) 23282 if( !ae_vector_init(&p->fp1, 0, DT_REAL, _state, make_automatic) ) 23284 if( !ae_vector_init(&p->fc1, 0, DT_REAL, _state, make_automatic) ) 23286 if( !ae_vector_init(&p->gm1, 0, DT_REAL, _state, make_automatic) ) 23288 if( !ae_vector_init(&p->gp1, 0, DT_REAL, _state, make_automatic) ) 23290 if( !ae_vector_init(&p->gc1, 0, DT_REAL, _state, make_automatic) ) 23292 if( !_minlbfgsstate_init(&p->internalstate, _state, make_automatic) ) 23294 if( !_minlbfgsreport_init(&p->internalrep, _state, make_automatic) ) 23296 if( !_minqpstate_init(&p->qpstate, _state, make_automatic) ) 23298 if( !_minqpreport_init(&p->qprep, _state, make_automatic) ) 23304 ae_bool _minlmstate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 23306 minlmstate *dst = (minlmstate*)_dst; 23307 minlmstate *src = (minlmstate*)_src; 23310 dst->diffstep = src->diffstep; 23311 dst->epsg = src->epsg; 23312 dst->epsf = src->epsf; 23313 dst->epsx = src->epsx; 23314 dst->maxits = src->maxits; 23315 dst->xrep = src->xrep; 23316 dst->stpmax = src->stpmax; 23317 dst->maxmodelage = src->maxmodelage; 23318 dst->makeadditers = src->makeadditers; 23319 if( !ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic) ) 23322 if( !ae_vector_init_copy(&dst->fi, &src->fi, _state, make_automatic) ) 23324 if( !ae_matrix_init_copy(&dst->j, &src->j, _state, make_automatic) ) 23326 if( !ae_matrix_init_copy(&dst->h, &src->h, _state, make_automatic) ) 23328 if( !ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic) ) 23330 dst->needf = src->needf; 23331 dst->needfg = src->needfg; 23332 dst->needfgh = src->needfgh; 23333 dst->needfij = src->needfij; 23334 dst->needfi = src->needfi; 23335 dst->xupdated = src->xupdated; 23336 dst->algomode = src->algomode; 23337 dst->hasf = src->hasf; 23338 dst->hasfi = src->hasfi; 23339 dst->hasg = src->hasg; 23340 if( !ae_vector_init_copy(&dst->xbase, &src->xbase, _state, make_automatic) ) 23342 dst->fbase = src->fbase; 23343 if( !ae_vector_init_copy(&dst->fibase, &src->fibase, _state, make_automatic) ) 23345 if( !ae_vector_init_copy(&dst->gbase, &src->gbase, _state, make_automatic) ) 23347 if( !ae_matrix_init_copy(&dst->quadraticmodel, &src->quadraticmodel, _state, make_automatic) ) 23349 if( !ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic) ) 23351 if( !ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic) ) 23353 if( !ae_vector_init_copy(&dst->havebndl, &src->havebndl, _state, make_automatic) ) 23355 if( !ae_vector_init_copy(&dst->havebndu, &src->havebndu, _state, make_automatic) ) 23357 if( !ae_vector_init_copy(&dst->s, &src->s, _state, make_automatic) ) 23359 dst->lambdav = src->lambdav; 23361 dst->modelage = src->modelage; 23362 if( !ae_vector_init_copy(&dst->xdir, &src->xdir, _state, make_automatic) ) 23364 if( !ae_vector_init_copy(&dst->deltax, &src->deltax, _state, make_automatic) ) 23366 if( !ae_vector_init_copy(&dst->deltaf, &src->deltaf, _state, make_automatic) ) 23368 dst->deltaxready = src->deltaxready; 23369 dst->deltafready = src->deltafready; 23370 dst->teststep = src->teststep; 23371 dst->repiterationscount = src->repiterationscount; 23372 dst->repterminationtype = src->repterminationtype; 23373 dst->repfuncidx = src->repfuncidx; 23374 dst->repvaridx = src->repvaridx; 23375 dst->repnfunc = src->repnfunc; 23376 dst->repnjac = src->repnjac; 23377 dst->repngrad = src->repngrad; 23378 dst->repnhess = src->repnhess; 23379 dst->repncholesky = src->repncholesky; 23380 if( !_rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic) ) 23382 if( !ae_vector_init_copy(&dst->choleskybuf, &src->choleskybuf, _state, make_automatic) ) 23384 if( !ae_vector_init_copy(&dst->tmp0, &src->tmp0, _state, make_automatic) ) 23386 dst->actualdecrease = src->actualdecrease; 23387 dst->predicteddecrease = src->predicteddecrease; 23388 dst->xm1 = src->xm1; 23389 dst->xp1 = src->xp1; 23390 if( !ae_vector_init_copy(&dst->fm1, &src->fm1, _state, make_automatic) ) 23392 if( !ae_vector_init_copy(&dst->fp1, &src->fp1, _state, make_automatic) ) 23394 if( !ae_vector_init_copy(&dst->fc1, &src->fc1, _state, make_automatic) ) 23396 if( !ae_vector_init_copy(&dst->gm1, &src->gm1, _state, make_automatic) ) 23398 if( !ae_vector_init_copy(&dst->gp1, &src->gp1, _state, make_automatic) ) 23400 if( !ae_vector_init_copy(&dst->gc1, &src->gc1, _state, make_automatic) ) 23402 if( !_minlbfgsstate_init_copy(&dst->internalstate, &src->internalstate, _state, make_automatic) ) 23404 if( !_minlbfgsreport_init_copy(&dst->internalrep, &src->internalrep, _state, make_automatic) ) 23406 if( !_minqpstate_init_copy(&dst->qpstate, &src->qpstate, _state, make_automatic) ) 23408 if( !_minqpreport_init_copy(&dst->qprep, &src->qprep, _state, make_automatic) ) 23414 void _minlmstate_clear(void* _p) 23416 minlmstate *p = (minlmstate*)_p; 23417 ae_touch_ptr((void*)p); 23418 ae_vector_clear(&p->x); 23419 ae_vector_clear(&p->fi); 23420 ae_matrix_clear(&p->j); 23421 ae_matrix_clear(&p->h); 23422 ae_vector_clear(&p->g); 23423 ae_vector_clear(&p->xbase); 23424 ae_vector_clear(&p->fibase); 23425 ae_vector_clear(&p->gbase); 23426 ae_matrix_clear(&p->quadraticmodel); 23427 ae_vector_clear(&p->bndl); 23428 ae_vector_clear(&p->bndu); 23429 ae_vector_clear(&p->havebndl); 23430 ae_vector_clear(&p->havebndu); 23431 ae_vector_clear(&p->s); 23432 ae_vector_clear(&p->xdir); 23433 ae_vector_clear(&p->deltax); 23434 ae_vector_clear(&p->deltaf); 23435 _rcommstate_clear(&p->rstate); 23436 ae_vector_clear(&p->choleskybuf); 23437 ae_vector_clear(&p->tmp0); 23438 ae_vector_clear(&p->fm1); 23439 ae_vector_clear(&p->fp1); 23440 ae_vector_clear(&p->fc1); 23441 ae_vector_clear(&p->gm1); 23442 ae_vector_clear(&p->gp1); 23443 ae_vector_clear(&p->gc1); 23444 _minlbfgsstate_clear(&p->internalstate); 23445 _minlbfgsreport_clear(&p->internalrep); 23446 _minqpstate_clear(&p->qpstate); 23447 _minqpreport_clear(&p->qprep); 23451 void _minlmstate_destroy(void* _p) 23453 minlmstate *p = (minlmstate*)_p; 23454 ae_touch_ptr((void*)p); 23455 ae_vector_destroy(&p->x); 23456 ae_vector_destroy(&p->fi); 23457 ae_matrix_destroy(&p->j); 23458 ae_matrix_destroy(&p->h); 23459 ae_vector_destroy(&p->g); 23460 ae_vector_destroy(&p->xbase); 23461 ae_vector_destroy(&p->fibase); 23462 ae_vector_destroy(&p->gbase); 23463 ae_matrix_destroy(&p->quadraticmodel); 23464 ae_vector_destroy(&p->bndl); 23465 ae_vector_destroy(&p->bndu); 23466 ae_vector_destroy(&p->havebndl); 23467 ae_vector_destroy(&p->havebndu); 23468 ae_vector_destroy(&p->s); 23469 ae_vector_destroy(&p->xdir); 23470 ae_vector_destroy(&p->deltax); 23471 ae_vector_destroy(&p->deltaf); 23472 _rcommstate_destroy(&p->rstate); 23473 ae_vector_destroy(&p->choleskybuf); 23474 ae_vector_destroy(&p->tmp0); 23475 ae_vector_destroy(&p->fm1); 23476 ae_vector_destroy(&p->fp1); 23477 ae_vector_destroy(&p->fc1); 23478 ae_vector_destroy(&p->gm1); 23479 ae_vector_destroy(&p->gp1); 23480 ae_vector_destroy(&p->gc1); 23481 _minlbfgsstate_destroy(&p->internalstate); 23482 _minlbfgsreport_destroy(&p->internalrep); 23483 _minqpstate_destroy(&p->qpstate); 23484 _minqpreport_destroy(&p->qprep); 23488 ae_bool _minlmreport_init(void* _p, ae_state *_state, ae_bool make_automatic) 23490 minlmreport *p = (minlmreport*)_p; 23491 ae_touch_ptr((void*)p); 23496 ae_bool _minlmreport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 23498 minlmreport *dst = (minlmreport*)_dst; 23499 minlmreport *src = (minlmreport*)_src; 23500 dst->iterationscount = src->iterationscount; 23501 dst->terminationtype = src->terminationtype; 23502 dst->funcidx = src->funcidx; 23503 dst->varidx = src->varidx; 23504 dst->nfunc = src->nfunc; 23505 dst->njac = src->njac; 23506 dst->ngrad = src->ngrad; 23507 dst->nhess = src->nhess; 23508 dst->ncholesky = src->ncholesky; 23513 void _minlmreport_clear(void* _p) 23515 minlmreport *p = (minlmreport*)_p; 23516 ae_touch_ptr((void*)p); 23520 void _minlmreport_destroy(void* _p) 23522 minlmreport *p = (minlmreport*)_p; 23523 ae_touch_ptr((void*)p); 23529 /************************************************************************* 23530 Obsolete function, use MinLBFGSSetPrecDefault() instead. 23533 Copyright 13.10.2010 by Bochkanov Sergey 23534 *************************************************************************/ 23535 void minlbfgssetdefaultpreconditioner(minlbfgsstate* state, 23540 minlbfgssetprecdefault(state, _state); 23544 /************************************************************************* 23545 Obsolete function, use MinLBFGSSetCholeskyPreconditioner() instead. 23548 Copyright 13.10.2010 by Bochkanov Sergey 23549 *************************************************************************/ 23550 void minlbfgssetcholeskypreconditioner(minlbfgsstate* state, 23551 /* Real */ ae_matrix* p, 23557 minlbfgssetpreccholesky(state, p, isupper, _state); 23561 /************************************************************************* 23562 This is obsolete function which was used by previous version of the BLEIC 23563 optimizer. It does nothing in the current version of BLEIC. 23566 Copyright 28.11.2010 by Bochkanov Sergey 23567 *************************************************************************/ 23568 void minbleicsetbarrierwidth(minbleicstate* state, 23577 /************************************************************************* 23578 This is obsolete function which was used by previous version of the BLEIC 23579 optimizer. It does nothing in the current version of BLEIC. 23582 Copyright 28.11.2010 by Bochkanov Sergey 23583 *************************************************************************/ 23584 void minbleicsetbarrierdecay(minbleicstate* state, 23593 /************************************************************************* 23594 Obsolete optimization algorithm. 23595 Was replaced by MinBLEIC subpackage. 23598 Copyright 25.03.2010 by Bochkanov Sergey 23599 *************************************************************************/ 23600 void minasacreate(ae_int_t n, 23601 /* Real */ ae_vector* x, 23602 /* Real */ ae_vector* bndl, 23603 /* Real */ ae_vector* bndu, 23604 minasastate* state, 23609 _minasastate_clear(state); 23611 ae_assert(n>=1, "MinASA: N too small!
", _state); 23612 ae_assert(x->cnt>=n, "MinCGCreate: Length(X)<N!
", _state); 23613 ae_assert(isfinitevector(x, n, _state), "MinCGCreate: X contains infinite
or NaN values!
", _state); 23614 ae_assert(bndl->cnt>=n, "MinCGCreate: Length(BndL)<N!
", _state); 23615 ae_assert(isfinitevector(bndl, n, _state), "MinCGCreate: BndL contains infinite
or NaN values!
", _state); 23616 ae_assert(bndu->cnt>=n, "MinCGCreate: Length(BndU)<N!
", _state); 23617 ae_assert(isfinitevector(bndu, n, _state), "MinCGCreate: BndU contains infinite
or NaN values!
", _state); 23618 for(i=0; i<=n-1; i++) 23620 ae_assert(ae_fp_less_eq(bndl->ptr.p_double[i],bndu->ptr.p_double[i]), "MinASA: inconsistent bounds!
", _state); 23621 ae_assert(ae_fp_less_eq(bndl->ptr.p_double[i],x->ptr.p_double[i]), "MinASA: infeasible X!
", _state); 23622 ae_assert(ae_fp_less_eq(x->ptr.p_double[i],bndu->ptr.p_double[i]), "MinASA: infeasible X!
", _state); 23629 minasasetcond(state, 0, 0, 0, 0, _state); 23630 minasasetxrep(state, ae_false, _state); 23631 minasasetstpmax(state, 0, _state); 23632 minasasetalgorithm(state, -1, _state); 23633 ae_vector_set_length(&state->bndl, n, _state); 23634 ae_vector_set_length(&state->bndu, n, _state); 23635 ae_vector_set_length(&state->ak, n, _state); 23636 ae_vector_set_length(&state->xk, n, _state); 23637 ae_vector_set_length(&state->dk, n, _state); 23638 ae_vector_set_length(&state->an, n, _state); 23639 ae_vector_set_length(&state->xn, n, _state); 23640 ae_vector_set_length(&state->dn, n, _state); 23641 ae_vector_set_length(&state->x, n, _state); 23642 ae_vector_set_length(&state->d, n, _state); 23643 ae_vector_set_length(&state->g, n, _state); 23644 ae_vector_set_length(&state->gc, n, _state); 23645 ae_vector_set_length(&state->work, n, _state); 23646 ae_vector_set_length(&state->yk, n, _state); 23647 minasarestartfrom(state, x, bndl, bndu, _state); 23651 /************************************************************************* 23652 Obsolete optimization algorithm. 23653 Was replaced by MinBLEIC subpackage. 23656 Copyright 02.04.2010 by Bochkanov Sergey 23657 *************************************************************************/ 23658 void minasasetcond(minasastate* state, 23667 ae_assert(ae_isfinite(epsg, _state), "MinASASetCond: EpsG is not finite number!
", _state); 23668 ae_assert(ae_fp_greater_eq(epsg,0), "MinASASetCond: negative EpsG!
", _state); 23669 ae_assert(ae_isfinite(epsf, _state), "MinASASetCond: EpsF is not finite number!
", _state); 23670 ae_assert(ae_fp_greater_eq(epsf,0), "MinASASetCond: negative EpsF!
", _state); 23671 ae_assert(ae_isfinite(epsx, _state), "MinASASetCond: EpsX is not finite number!
", _state); 23672 ae_assert(ae_fp_greater_eq(epsx,0), "MinASASetCond: negative EpsX!
", _state); 23673 ae_assert(maxits>=0, "MinASASetCond: negative MaxIts!
", _state); 23674 if( ((ae_fp_eq(epsg,0)&&ae_fp_eq(epsf,0))&&ae_fp_eq(epsx,0))&&maxits==0 ) 23678 state->epsg = epsg; 23679 state->epsf = epsf; 23680 state->epsx = epsx; 23681 state->maxits = maxits; 23685 /************************************************************************* 23686 Obsolete optimization algorithm. 23687 Was replaced by MinBLEIC subpackage. 23690 Copyright 02.04.2010 by Bochkanov Sergey 23691 *************************************************************************/ 23692 void minasasetxrep(minasastate* state, ae_bool needxrep, ae_state *_state) 23696 state->xrep = needxrep; 23700 /************************************************************************* 23701 Obsolete optimization algorithm. 23702 Was replaced by MinBLEIC subpackage. 23705 Copyright 02.04.2010 by Bochkanov Sergey 23706 *************************************************************************/ 23707 void minasasetalgorithm(minasastate* state, 23713 ae_assert(algotype>=-1&&algotype<=1, "MinASASetAlgorithm: incorrect AlgoType!
", _state); 23718 state->cgtype = algotype; 23722 /************************************************************************* 23723 Obsolete optimization algorithm. 23724 Was replaced by MinBLEIC subpackage. 23727 Copyright 02.04.2010 by Bochkanov Sergey 23728 *************************************************************************/ 23729 void minasasetstpmax(minasastate* state, double stpmax, ae_state *_state) 23733 ae_assert(ae_isfinite(stpmax, _state), "MinASASetStpMax: StpMax is not finite!
", _state); 23734 ae_assert(ae_fp_greater_eq(stpmax,0), "MinASASetStpMax: StpMax<0!
", _state); 23735 state->stpmax = stpmax; 23739 /************************************************************************* 23742 Copyright 20.03.2009 by Bochkanov Sergey 23743 *************************************************************************/ 23744 ae_bool minasaiteration(minasastate* state, ae_state *_state) 23760 * Reverse communication preparations 23761 * I know it looks ugly, but it works the same way 23762 * anywhere from C++ to Python. 23764 * This code initializes locals by: 23765 * * random values determined during code 23766 * generation - on first subroutine call 23767 * * values from previous call - on subsequent calls 23769 if( state->rstate.stage>=0 ) 23771 n = state->rstate.ia.ptr.p_int[0]; 23772 i = state->rstate.ia.ptr.p_int[1]; 23773 mcinfo = state->rstate.ia.ptr.p_int[2]; 23774 diffcnt = state->rstate.ia.ptr.p_int[3]; 23775 b = state->rstate.ba.ptr.p_bool[0]; 23776 stepfound = state->rstate.ba.ptr.p_bool[1]; 23777 betak = state->rstate.ra.ptr.p_double[0]; 23778 v = state->rstate.ra.ptr.p_double[1]; 23779 vv = state->rstate.ra.ptr.p_double[2]; 23788 stepfound = ae_false; 23793 if( state->rstate.stage==0 ) 23797 if( state->rstate.stage==1 ) 23801 if( state->rstate.stage==2 ) 23805 if( state->rstate.stage==3 ) 23809 if( state->rstate.stage==4 ) 23813 if( state->rstate.stage==5 ) 23817 if( state->rstate.stage==6 ) 23821 if( state->rstate.stage==7 ) 23825 if( state->rstate.stage==8 ) 23829 if( state->rstate.stage==9 ) 23833 if( state->rstate.stage==10 ) 23837 if( state->rstate.stage==11 ) 23841 if( state->rstate.stage==12 ) 23845 if( state->rstate.stage==13 ) 23849 if( state->rstate.stage==14 ) 23862 state->repterminationtype = 0; 23863 state->repiterationscount = 0; 23864 state->repnfev = 0; 23865 state->debugrestartscount = 0; 23867 ae_v_move(&state->xk.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 23868 for(i=0; i<=n-1; i++) 23870 if( ae_fp_eq(state->xk.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->xk.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 23872 state->ak.ptr.p_double[i] = 0; 23876 state->ak.ptr.p_double[i] = 1; 23880 state->curalgo = 0; 23883 * Calculate F/G, initialize algorithm 23885 mincomp_clearrequestfields(state, _state); 23886 state->needfg = ae_true; 23887 state->rstate.stage = 0; 23890 state->needfg = ae_false; 23899 mincomp_clearrequestfields(state, _state); 23900 state->xupdated = ae_true; 23901 state->rstate.stage = 1; 23904 state->xupdated = ae_false; 23906 if( ae_fp_less_eq(mincomp_asaboundedantigradnorm(state, _state),state->epsg) ) 23908 state->repterminationtype = 4; 23912 state->repnfev = state->repnfev+1; 23917 * At the beginning of new iteration: 23918 * * CurAlgo stores current algorithm selector 23919 * * State.XK, State.F and State.G store current X/F/G 23920 * * State.AK stores current set of active constraints 23931 if( state->curalgo!=0 ) 23944 * Determine Dk = proj(xk - gk)-xk 23946 for(i=0; i<=n-1; i++) 23948 state->d.ptr.p_double[i] = boundval(state->xk.ptr.p_double[i]-state->g.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state)-state->xk.ptr.p_double[i]; 23952 * Armijo line search. 23953 * * exact search with alpha=1 is tried first, 23954 * 'exact' means that we evaluate f() EXACTLY at 23955 * bound(x-g,bndl,bndu), without intermediate floating 23956 * point operations. 23957 * * alpha<1 are tried if explicit search wasn't successful 23958 * Result is placed into XN. 23960 * Two types of search are needed because we can't 23961 * just use second type with alpha=1 because in finite 23962 * precision arithmetics (x1-x0)+x0 may differ from x1. 23963 * So while x1 is correctly bounded (it lie EXACTLY on 23964 * boundary, if it is active), (x1-x0)+x0 may be 23967 v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->g.ptr.p_double[0], 1, ae_v_len(0,n-1)); 23969 state->finit = state->f; 23970 if( !(ae_fp_less_eq(mincomp_asad1norm(state, _state),state->stpmax)||ae_fp_eq(state->stpmax,0)) ) 23976 * Try alpha=1 step first 23978 for(i=0; i<=n-1; i++) 23980 state->x.ptr.p_double[i] = boundval(state->xk.ptr.p_double[i]-state->g.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 23982 mincomp_clearrequestfields(state, _state); 23983 state->needfg = ae_true; 23984 state->rstate.stage = 2; 23987 state->needfg = ae_false; 23988 state->repnfev = state->repnfev+1; 23989 stepfound = ae_fp_less_eq(state->f,state->finit+mincomp_gpaftol*state->dginit); 23992 stepfound = ae_false; 24000 * we are at the boundary(ies) 24002 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24008 * alpha=1 is too large, try smaller values 24011 linminnormalized(&state->d, &state->stp, n, _state); 24012 state->dginit = state->dginit/state->stp; 24013 state->stp = mincomp_gpadecay*state->stp; 24014 if( ae_fp_greater(state->stpmax,0) ) 24016 state->stp = ae_minreal(state->stp, state->stpmax, _state); 24024 ae_v_move(&state->x.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24025 ae_v_addd(&state->x.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1), v); 24026 mincomp_clearrequestfields(state, _state); 24027 state->needfg = ae_true; 24028 state->rstate.stage = 3; 24031 state->needfg = ae_false; 24032 state->repnfev = state->repnfev+1; 24033 if( ae_fp_less_eq(state->stp,mincomp_stpmin) ) 24037 if( ae_fp_less_eq(state->f,state->finit+state->stp*mincomp_gpaftol*state->dginit) ) 24041 state->stp = state->stp*mincomp_gpadecay; 24044 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24046 state->repiterationscount = state->repiterationscount+1; 24055 mincomp_clearrequestfields(state, _state); 24056 state->xupdated = ae_true; 24057 state->rstate.stage = 4; 24060 state->xupdated = ae_false; 24064 * Calculate new set of active constraints. 24065 * Reset counter if active set was changed. 24066 * Prepare for the new iteration 24068 for(i=0; i<=n-1; i++) 24070 if( ae_fp_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 24072 state->an.ptr.p_double[i] = 0; 24076 state->an.ptr.p_double[i] = 1; 24079 for(i=0; i<=n-1; i++) 24081 if( ae_fp_neq(state->ak.ptr.p_double[i],state->an.ptr.p_double[i]) ) 24083 state->acount = -1; 24087 state->acount = state->acount+1; 24088 ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24089 ae_v_move(&state->ak.ptr.p_double[0], 1, &state->an.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24092 * Stopping conditions 24094 if( !(state->repiterationscount>=state->maxits&&state->maxits>0) ) 24100 * Too many iterations 24102 state->repterminationtype = 5; 24107 mincomp_clearrequestfields(state, _state); 24108 state->xupdated = ae_true; 24109 state->rstate.stage = 5; 24112 state->xupdated = ae_false; 24117 if( ae_fp_greater(mincomp_asaboundedantigradnorm(state, _state),state->epsg) ) 24123 * Gradient is small enough 24125 state->repterminationtype = 4; 24130 mincomp_clearrequestfields(state, _state); 24131 state->xupdated = ae_true; 24132 state->rstate.stage = 6; 24135 state->xupdated = ae_false; 24140 v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24141 if( ae_fp_greater(ae_sqrt(v, _state)*state->stp,state->epsx) ) 24147 * Step size is too small, no further improvement is 24150 state->repterminationtype = 2; 24155 mincomp_clearrequestfields(state, _state); 24156 state->xupdated = ae_true; 24157 state->rstate.stage = 7; 24160 state->xupdated = ae_false; 24165 if( ae_fp_greater(state->finit-state->f,state->epsf*ae_maxreal(ae_fabs(state->finit, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) 24171 * F(k+1)-F(k) is small enough 24173 state->repterminationtype = 1; 24178 mincomp_clearrequestfields(state, _state); 24179 state->xupdated = ae_true; 24180 state->rstate.stage = 8; 24183 state->xupdated = ae_false; 24190 * Decide - should we switch algorithm or not 24192 if( mincomp_asauisempty(state, _state) ) 24194 if( ae_fp_greater_eq(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state)) ) 24196 state->curalgo = 1; 24201 state->mu = state->mu*mincomp_asarho; 24206 if( state->acount==mincomp_n1 ) 24208 if( ae_fp_greater_eq(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state)) ) 24210 state->curalgo = 1; 24219 state->k = state->k+1; 24227 if( state->curalgo!=1 ) 24233 * first, check that there are non-active constraints. 24234 * move to GPA algorithm, if all constraints are active 24237 for(i=0; i<=n-1; i++) 24239 if( ae_fp_neq(state->ak.ptr.p_double[i],0) ) 24247 state->curalgo = 0; 24254 state->fold = state->f; 24255 ae_v_move(&state->xk.ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24256 for(i=0; i<=n-1; i++) 24258 state->dk.ptr.p_double[i] = -state->g.ptr.p_double[i]*state->ak.ptr.p_double[i]; 24259 state->gc.ptr.p_double[i] = state->g.ptr.p_double[i]*state->ak.ptr.p_double[i]; 24268 * Store G[k] for later calculation of Y[k] 24270 for(i=0; i<=n-1; i++) 24272 state->yk.ptr.p_double[i] = -state->gc.ptr.p_double[i]; 24276 * Make a CG step in direction given by DK[]: 24277 * * calculate step. Step projection into feasible set 24278 * is used. It has several benefits: a) step may be 24279 * found with usual line search, b) multiple constraints 24280 * may be activated with one step, c) activated constraints 24281 * are detected in a natural way - just compare x[i] with 24283 * * update active set, set B to True, if there 24284 * were changes in the set. 24286 ae_v_move(&state->d.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24287 ae_v_move(&state->xn.ptr.p_double[0], 1, &state->xk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24288 state->mcstage = 0; 24290 linminnormalized(&state->d, &state->stp, n, _state); 24291 if( ae_fp_neq(state->laststep,0) ) 24293 state->stp = state->laststep; 24295 mcsrch(n, &state->xn, &state->f, &state->gc, &state->d, &state->stp, state->stpmax, mincomp_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 24297 if( state->mcstage==0 ) 24303 * preprocess data: bound State.XN so it belongs to the 24304 * feasible set and store it in the State.X 24306 for(i=0; i<=n-1; i++) 24308 state->x.ptr.p_double[i] = boundval(state->xn.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 24314 mincomp_clearrequestfields(state, _state); 24315 state->needfg = ae_true; 24316 state->rstate.stage = 9; 24319 state->needfg = ae_false; 24322 * postprocess data: zero components of G corresponding to 24323 * the active constraints 24325 for(i=0; i<=n-1; i++) 24327 if( ae_fp_eq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 24329 state->gc.ptr.p_double[i] = 0; 24333 state->gc.ptr.p_double[i] = state->g.ptr.p_double[i]; 24336 mcsrch(n, &state->xn, &state->f, &state->gc, &state->d, &state->stp, state->stpmax, mincomp_gtol, &mcinfo, &state->nfev, &state->work, &state->lstate, &state->mcstage, _state); 24340 for(i=0; i<=n-1; i++) 24344 * XN contains unprojected result, project it, 24345 * save copy to X (will be used for progress reporting) 24347 state->xn.ptr.p_double[i] = boundval(state->xn.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state); 24350 * update active set 24352 if( ae_fp_eq(state->xn.ptr.p_double[i],state->bndl.ptr.p_double[i])||ae_fp_eq(state->xn.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 24354 state->an.ptr.p_double[i] = 0; 24358 state->an.ptr.p_double[i] = 1; 24360 if( ae_fp_neq(state->an.ptr.p_double[i],state->ak.ptr.p_double[i]) ) 24362 diffcnt = diffcnt+1; 24364 state->ak.ptr.p_double[i] = state->an.ptr.p_double[i]; 24366 ae_v_move(&state->xk.ptr.p_double[0], 1, &state->xn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24367 state->repnfev = state->repnfev+state->nfev; 24368 state->repiterationscount = state->repiterationscount+1; 24377 mincomp_clearrequestfields(state, _state); 24378 state->xupdated = ae_true; 24379 state->rstate.stage = 10; 24382 state->xupdated = ae_false; 24386 * Update info about step length 24388 v = ae_v_dotproduct(&state->d.ptr.p_double[0], 1, &state->d.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24389 state->laststep = ae_sqrt(v, _state)*state->stp; 24392 * Check stopping conditions. 24394 if( ae_fp_greater(mincomp_asaboundedantigradnorm(state, _state),state->epsg) ) 24400 * Gradient is small enough 24402 state->repterminationtype = 4; 24407 mincomp_clearrequestfields(state, _state); 24408 state->xupdated = ae_true; 24409 state->rstate.stage = 11; 24412 state->xupdated = ae_false; 24417 if( !(state->repiterationscount>=state->maxits&&state->maxits>0) ) 24423 * Too many iterations 24425 state->repterminationtype = 5; 24430 mincomp_clearrequestfields(state, _state); 24431 state->xupdated = ae_true; 24432 state->rstate.stage = 12; 24435 state->xupdated = ae_false; 24440 if( !(ae_fp_greater_eq(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state))&&diffcnt==0) ) 24446 * These conditions (EpsF/EpsX) are explicitly or implicitly 24447 * related to the current step size and influenced 24448 * by changes in the active constraints. 24450 * For these reasons they are checked only when we don't 24451 * want to 'unstick' at the end of the iteration and there 24452 * were no changes in the active set. 24454 * NOTE: consition |G|>=Mu*|D1| must be exactly opposite 24455 * to the condition used to switch back to GPA. At least 24456 * one inequality must be strict, otherwise infinite cycle 24457 * may occur when |G|=Mu*|D1| (we DON'T test stopping 24458 * conditions and we DON'T switch to GPA, so we cycle 24461 if( ae_fp_greater(state->fold-state->f,state->epsf*ae_maxreal(ae_fabs(state->fold, _state), ae_maxreal(ae_fabs(state->f, _state), 1.0, _state), _state)) ) 24467 * F(k+1)-F(k) is small enough 24469 state->repterminationtype = 1; 24474 mincomp_clearrequestfields(state, _state); 24475 state->xupdated = ae_true; 24476 state->rstate.stage = 13; 24479 state->xupdated = ae_false; 24484 if( ae_fp_greater(state->laststep,state->epsx) ) 24490 * X(k+1)-X(k) is small enough 24492 state->repterminationtype = 2; 24497 mincomp_clearrequestfields(state, _state); 24498 state->xupdated = ae_true; 24499 state->rstate.stage = 14; 24502 state->xupdated = ae_false; 24510 * Check conditions for switching 24512 if( ae_fp_less(mincomp_asaginorm(state, _state),state->mu*mincomp_asad1norm(state, _state)) ) 24514 state->curalgo = 0; 24519 if( mincomp_asauisempty(state, _state)||diffcnt>=mincomp_n2 ) 24521 state->curalgo = 1; 24525 state->curalgo = 0; 24533 * Line search may result in: 24534 * * maximum feasible step being taken (already processed) 24535 * * point satisfying Wolfe conditions 24536 * * some kind of error (CG is restarted by assigning 0.0 to Beta) 24542 * Standard Wolfe conditions are satisfied: 24543 * * calculate Y[K] and BetaK 24545 ae_v_add(&state->yk.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24546 vv = ae_v_dotproduct(&state->yk.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24547 v = ae_v_dotproduct(&state->gc.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24548 state->betady = v/vv; 24549 v = ae_v_dotproduct(&state->gc.ptr.p_double[0], 1, &state->yk.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24550 state->betahs = v/vv; 24551 if( state->cgtype==0 ) 24553 betak = state->betady; 24555 if( state->cgtype==1 ) 24557 betak = ae_maxreal(0, ae_minreal(state->betady, state->betahs, _state), _state); 24564 * Something is wrong (may be function is too wild or too flat). 24566 * We'll set BetaK=0, which will restart CG algorithm. 24567 * We can stop later (during normal checks) if stopping conditions are met. 24570 state->debugrestartscount = state->debugrestartscount+1; 24572 ae_v_moveneg(&state->dn.ptr.p_double[0], 1, &state->gc.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24573 ae_v_addd(&state->dn.ptr.p_double[0], 1, &state->dk.ptr.p_double[0], 1, ae_v_len(0,n-1), betak); 24574 ae_v_move(&state->dk.ptr.p_double[0], 1, &state->dn.ptr.p_double[0], 1, ae_v_len(0,n-1)); 24577 * update other information 24579 state->fold = state->f; 24580 state->k = state->k+1; 24594 state->rstate.ia.ptr.p_int[0] = n; 24595 state->rstate.ia.ptr.p_int[1] = i; 24596 state->rstate.ia.ptr.p_int[2] = mcinfo; 24597 state->rstate.ia.ptr.p_int[3] = diffcnt; 24598 state->rstate.ba.ptr.p_bool[0] = b; 24599 state->rstate.ba.ptr.p_bool[1] = stepfound; 24600 state->rstate.ra.ptr.p_double[0] = betak; 24601 state->rstate.ra.ptr.p_double[1] = v; 24602 state->rstate.ra.ptr.p_double[2] = vv; 24607 /************************************************************************* 24608 Obsolete optimization algorithm. 24609 Was replaced by MinBLEIC subpackage. 24612 Copyright 20.03.2009 by Bochkanov Sergey 24613 *************************************************************************/ 24614 void minasaresults(minasastate* state, 24615 /* Real */ ae_vector* x, 24620 ae_vector_clear(x); 24621 _minasareport_clear(rep); 24623 minasaresultsbuf(state, x, rep, _state); 24627 /************************************************************************* 24628 Obsolete optimization algorithm. 24629 Was replaced by MinBLEIC subpackage. 24632 Copyright 20.03.2009 by Bochkanov Sergey 24633 *************************************************************************/ 24634 void minasaresultsbuf(minasastate* state, 24635 /* Real */ ae_vector* x, 24642 if( x->cnt<state->n ) 24644 ae_vector_set_length(x, state->n, _state); 24646 ae_v_move(&x->ptr.p_double[0], 1, &state->x.ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 24647 rep->iterationscount = state->repiterationscount; 24648 rep->nfev = state->repnfev; 24649 rep->terminationtype = state->repterminationtype; 24650 rep->activeconstraints = 0; 24651 for(i=0; i<=state->n-1; i++) 24653 if( ae_fp_eq(state->ak.ptr.p_double[i],0) ) 24655 rep->activeconstraints = rep->activeconstraints+1; 24661 /************************************************************************* 24662 Obsolete optimization algorithm. 24663 Was replaced by MinBLEIC subpackage. 24666 Copyright 30.07.2010 by Bochkanov Sergey 24667 *************************************************************************/ 24668 void minasarestartfrom(minasastate* state, 24669 /* Real */ ae_vector* x, 24670 /* Real */ ae_vector* bndl, 24671 /* Real */ ae_vector* bndu, 24676 ae_assert(x->cnt>=state->n, "MinASARestartFrom: Length(X)<N!
", _state); 24677 ae_assert(isfinitevector(x, state->n, _state), "MinASARestartFrom: X contains infinite
or NaN values!
", _state); 24678 ae_assert(bndl->cnt>=state->n, "MinASARestartFrom: Length(BndL)<N!
", _state); 24679 ae_assert(isfinitevector(bndl, state->n, _state), "MinASARestartFrom: BndL contains infinite
or NaN values!
", _state); 24680 ae_assert(bndu->cnt>=state->n, "MinASARestartFrom: Length(BndU)<N!
", _state); 24681 ae_assert(isfinitevector(bndu, state->n, _state), "MinASARestartFrom: BndU contains infinite
or NaN values!
", _state); 24682 ae_v_move(&state->x.ptr.p_double[0], 1, &x->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 24683 ae_v_move(&state->bndl.ptr.p_double[0], 1, &bndl->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 24684 ae_v_move(&state->bndu.ptr.p_double[0], 1, &bndu->ptr.p_double[0], 1, ae_v_len(0,state->n-1)); 24685 state->laststep = 0; 24686 ae_vector_set_length(&state->rstate.ia, 3+1, _state); 24687 ae_vector_set_length(&state->rstate.ba, 1+1, _state); 24688 ae_vector_set_length(&state->rstate.ra, 2+1, _state); 24689 state->rstate.stage = -1; 24690 mincomp_clearrequestfields(state, _state); 24694 /************************************************************************* 24695 Returns norm of bounded anti-gradient. 24697 Bounded antigradient is a vector obtained from anti-gradient by zeroing 24698 components which point outwards: 24700 v[i]=0 if ((-g[i]<0)and(x[i]=bndl[i])) or 24701 ((-g[i]>0)and(x[i]=bndu[i])) 24702 v[i]=-g[i] otherwise 24704 This function may be used to check a stopping criterion. 24707 Copyright 20.03.2009 by Bochkanov Sergey 24708 *************************************************************************/ 24709 static double mincomp_asaboundedantigradnorm(minasastate* state, 24718 for(i=0; i<=state->n-1; i++) 24720 v = -state->g.ptr.p_double[i]; 24721 if( ae_fp_eq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i])&&ae_fp_less(-state->g.ptr.p_double[i],0) ) 24725 if( ae_fp_eq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i])&&ae_fp_greater(-state->g.ptr.p_double[i],0) ) 24729 result = result+ae_sqr(v, _state); 24731 result = ae_sqrt(result, _state); 24736 /************************************************************************* 24737 Returns norm of GI(x). 24739 GI(x) is a gradient vector whose components associated with active 24740 constraints are zeroed. It differs from bounded anti-gradient because 24741 components of GI(x) are zeroed independently of sign(g[i]), and 24742 anti-gradient's components are zeroed with respect to both constraint and 24746 Copyright 20.03.2009 by Bochkanov Sergey 24747 *************************************************************************/ 24748 static double mincomp_asaginorm(minasastate* state, ae_state *_state) 24755 for(i=0; i<=state->n-1; i++) 24757 if( ae_fp_neq(state->x.ptr.p_double[i],state->bndl.ptr.p_double[i])&&ae_fp_neq(state->x.ptr.p_double[i],state->bndu.ptr.p_double[i]) ) 24759 result = result+ae_sqr(state->g.ptr.p_double[i], _state); 24762 result = ae_sqrt(result, _state); 24767 /************************************************************************* 24768 Returns norm(D1(State.X)) 24770 For a meaning of D1 see 'NEW ACTIVE SET ALGORITHM FOR BOX CONSTRAINED 24771 OPTIMIZATION' by WILLIAM W. HAGER AND HONGCHAO ZHANG. 24774 Copyright 20.03.2009 by Bochkanov Sergey 24775 *************************************************************************/ 24776 static double mincomp_asad1norm(minasastate* state, ae_state *_state) 24783 for(i=0; i<=state->n-1; i++) 24785 result = result+ae_sqr(boundval(state->x.ptr.p_double[i]-state->g.ptr.p_double[i], state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i], _state)-state->x.ptr.p_double[i], _state); 24787 result = ae_sqrt(result, _state); 24792 /************************************************************************* 24793 Returns True, if U set is empty. 24795 * State.X is used as point, 24796 * State.G - as gradient, 24797 * D is calculated within function (because State.D may have different 24798 meaning depending on current optimization algorithm) 24800 For a meaning of U see 'NEW ACTIVE SET ALGORITHM FOR BOX CONSTRAINED 24801 OPTIMIZATION' by WILLIAM W. HAGER AND HONGCHAO ZHANG. 24804 Copyright 20.03.2009 by Bochkanov Sergey 24805 *************************************************************************/ 24806 static ae_bool mincomp_asauisempty(minasastate* state, ae_state *_state) 24815 d = mincomp_asad1norm(state, _state); 24816 d2 = ae_sqrt(d, _state); 24819 for(i=0; i<=state->n-1; i++) 24821 if( ae_fp_greater_eq(ae_fabs(state->g.ptr.p_double[i], _state),d2)&&ae_fp_greater_eq(ae_minreal(state->x.ptr.p_double[i]-state->bndl.ptr.p_double[i], state->bndu.ptr.p_double[i]-state->x.ptr.p_double[i], _state),d32) ) 24831 /************************************************************************* 24832 Clears request fileds (to be sure that we don't forgot to clear something) 24833 *************************************************************************/ 24834 static void mincomp_clearrequestfields(minasastate* state, 24839 state->needfg = ae_false; 24840 state->xupdated = ae_false; 24844 ae_bool _minasastate_init(void* _p, ae_state *_state, ae_bool make_automatic) 24846 minasastate *p = (minasastate*)_p; 24847 ae_touch_ptr((void*)p); 24848 if( !ae_vector_init(&p->bndl, 0, DT_REAL, _state, make_automatic) ) 24850 if( !ae_vector_init(&p->bndu, 0, DT_REAL, _state, make_automatic) ) 24852 if( !ae_vector_init(&p->ak, 0, DT_REAL, _state, make_automatic) ) 24854 if( !ae_vector_init(&p->xk, 0, DT_REAL, _state, make_automatic) ) 24856 if( !ae_vector_init(&p->dk, 0, DT_REAL, _state, make_automatic) ) 24858 if( !ae_vector_init(&p->an, 0, DT_REAL, _state, make_automatic) ) 24860 if( !ae_vector_init(&p->xn, 0, DT_REAL, _state, make_automatic) ) 24862 if( !ae_vector_init(&p->dn, 0, DT_REAL, _state, make_automatic) ) 24864 if( !ae_vector_init(&p->d, 0, DT_REAL, _state, make_automatic) ) 24866 if( !ae_vector_init(&p->work, 0, DT_REAL, _state, make_automatic) ) 24868 if( !ae_vector_init(&p->yk, 0, DT_REAL, _state, make_automatic) ) 24870 if( !ae_vector_init(&p->gc, 0, DT_REAL, _state, make_automatic) ) 24872 if( !ae_vector_init(&p->x, 0, DT_REAL, _state, make_automatic) ) 24874 if( !ae_vector_init(&p->g, 0, DT_REAL, _state, make_automatic) ) 24876 if( !_rcommstate_init(&p->rstate, _state, make_automatic) ) 24878 if( !_linminstate_init(&p->lstate, _state, make_automatic) ) 24884 ae_bool _minasastate_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 24886 minasastate *dst = (minasastate*)_dst; 24887 minasastate *src = (minasastate*)_src; 24889 dst->epsg = src->epsg; 24890 dst->epsf = src->epsf; 24891 dst->epsx = src->epsx; 24892 dst->maxits = src->maxits; 24893 dst->xrep = src->xrep; 24894 dst->stpmax = src->stpmax; 24895 dst->cgtype = src->cgtype; 24897 dst->nfev = src->nfev; 24898 dst->mcstage = src->mcstage; 24899 if( !ae_vector_init_copy(&dst->bndl, &src->bndl, _state, make_automatic) ) 24901 if( !ae_vector_init_copy(&dst->bndu, &src->bndu, _state, make_automatic) ) 24903 dst->curalgo = src->curalgo; 24904 dst->acount = src->acount; 24906 dst->finit = src->finit; 24907 dst->dginit = src->dginit; 24908 if( !ae_vector_init_copy(&dst->ak, &src->ak, _state, make_automatic) ) 24910 if( !ae_vector_init_copy(&dst->xk, &src->xk, _state, make_automatic) ) 24912 if( !ae_vector_init_copy(&dst->dk, &src->dk, _state, make_automatic) ) 24914 if( !ae_vector_init_copy(&dst->an, &src->an, _state, make_automatic) ) 24916 if( !ae_vector_init_copy(&dst->xn, &src->xn, _state, make_automatic) ) 24918 if( !ae_vector_init_copy(&dst->dn, &src->dn, _state, make_automatic) ) 24920 if( !ae_vector_init_copy(&dst->d, &src->d, _state, make_automatic) ) 24922 dst->fold = src->fold; 24923 dst->stp = src->stp; 24924 if( !ae_vector_init_copy(&dst->work, &src->work, _state, make_automatic) ) 24926 if( !ae_vector_init_copy(&dst->yk, &src->yk, _state, make_automatic) ) 24928 if( !ae_vector_init_copy(&dst->gc, &src->gc, _state, make_automatic) ) 24930 dst->laststep = src->laststep; 24931 if( !ae_vector_init_copy(&dst->x, &src->x, _state, make_automatic) ) 24934 if( !ae_vector_init_copy(&dst->g, &src->g, _state, make_automatic) ) 24936 dst->needfg = src->needfg; 24937 dst->xupdated = src->xupdated; 24938 if( !_rcommstate_init_copy(&dst->rstate, &src->rstate, _state, make_automatic) ) 24940 dst->repiterationscount = src->repiterationscount; 24941 dst->repnfev = src->repnfev; 24942 dst->repterminationtype = src->repterminationtype; 24943 dst->debugrestartscount = src->debugrestartscount; 24944 if( !_linminstate_init_copy(&dst->lstate, &src->lstate, _state, make_automatic) ) 24946 dst->betahs = src->betahs; 24947 dst->betady = src->betady; 24952 void _minasastate_clear(void* _p) 24954 minasastate *p = (minasastate*)_p; 24955 ae_touch_ptr((void*)p); 24956 ae_vector_clear(&p->bndl); 24957 ae_vector_clear(&p->bndu); 24958 ae_vector_clear(&p->ak); 24959 ae_vector_clear(&p->xk); 24960 ae_vector_clear(&p->dk); 24961 ae_vector_clear(&p->an); 24962 ae_vector_clear(&p->xn); 24963 ae_vector_clear(&p->dn); 24964 ae_vector_clear(&p->d); 24965 ae_vector_clear(&p->work); 24966 ae_vector_clear(&p->yk); 24967 ae_vector_clear(&p->gc); 24968 ae_vector_clear(&p->x); 24969 ae_vector_clear(&p->g); 24970 _rcommstate_clear(&p->rstate); 24971 _linminstate_clear(&p->lstate); 24975 void _minasastate_destroy(void* _p) 24977 minasastate *p = (minasastate*)_p; 24978 ae_touch_ptr((void*)p); 24979 ae_vector_destroy(&p->bndl); 24980 ae_vector_destroy(&p->bndu); 24981 ae_vector_destroy(&p->ak); 24982 ae_vector_destroy(&p->xk); 24983 ae_vector_destroy(&p->dk); 24984 ae_vector_destroy(&p->an); 24985 ae_vector_destroy(&p->xn); 24986 ae_vector_destroy(&p->dn); 24987 ae_vector_destroy(&p->d); 24988 ae_vector_destroy(&p->work); 24989 ae_vector_destroy(&p->yk); 24990 ae_vector_destroy(&p->gc); 24991 ae_vector_destroy(&p->x); 24992 ae_vector_destroy(&p->g); 24993 _rcommstate_destroy(&p->rstate); 24994 _linminstate_destroy(&p->lstate); 24998 ae_bool _minasareport_init(void* _p, ae_state *_state, ae_bool make_automatic) 25000 minasareport *p = (minasareport*)_p; 25001 ae_touch_ptr((void*)p); 25006 ae_bool _minasareport_init_copy(void* _dst, void* _src, ae_state *_state, ae_bool make_automatic) 25008 minasareport *dst = (minasareport*)_dst; 25009 minasareport *src = (minasareport*)_src; 25010 dst->iterationscount = src->iterationscount; 25011 dst->nfev = src->nfev; 25012 dst->terminationtype = src->terminationtype; 25013 dst->activeconstraints = src->activeconstraints; 25018 void _minasareport_clear(void* _p) 25020 minasareport *p = (minasareport*)_p; 25021 ae_touch_ptr((void*)p); 25025 void _minasareport_destroy(void* _p) 25027 minasareport *p = (minasareport*)_p; 25028 ae_touch_ptr((void*)p); ae_bool isactivesetchanged
void minlmcreatevj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
void _minqpstate_clear(void *_p)
minbleicreport & operator=(const minbleicreport &rhs)
void minlmsetcond(const minlmstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
struct alglib_impl::ae_state ae_state
void minbleicsetprecdefault(const minbleicstate &state)
ae_bool _mincgstate_init(void *_p, ae_state *_state, ae_bool make_automatic)
void mincgsetstpmax(mincgstate *state, double stpmax, ae_state *_state)
alglib_impl::minlbfgsstate * c_ptr()
void calculatestepbound(ae_vector *x, ae_vector *d, double alpha, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_int_t *variabletofreeze, double *valuetofreeze, double *maxsteplen, ae_state *_state)
void minlbfgssetstpmax(minlbfgsstate *state, double stpmax, ae_state *_state)
ae_bool ae_fp_greater_eq(double v1, double v2)
void trimprepare(double f, double *threshold, ae_state *_state)
void minqpsetscale(const minqpstate &state, const real_1d_array &s)
void mincgsetxrep(mincgstate *state, ae_bool needxrep, ae_state *_state)
void rmatrixsyrk(const ae_int_t n, const ae_int_t k, const double alpha, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t optypea, const double beta, const real_2d_array &c, const ae_int_t ic, const ae_int_t jc, const bool isupper)
minlbfgsreport & operator=(const minlbfgsreport &rhs)
ae_int_t & terminationtype
void mincgresultsbuf(const mincgstate &state, real_1d_array &x, mincgreport &rep)
void rmatrixmv(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, const ae_int_t opa, const real_1d_array &x, const ae_int_t ix, real_1d_array &y, const ae_int_t iy)
void minbleicsetprecdiag(const minbleicstate &state, const real_1d_array &d)
void ae_v_moved(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n, double alpha)
alglib_impl::mincgstate * c_ptr()
void minlbfgssetscale(minlbfgsstate *state, ae_vector *s, ae_state *_state)
ae_bool _minbleicreport_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
ae_bool ismaintermchanged
void minbleicsetgradientcheck(const minbleicstate &state, const double teststep)
void mincgsetprecdiag(const mincgstate &state, const real_1d_array &d)
void minlmcreatev(ae_int_t n, ae_int_t m, ae_vector *x, double diffstep, minlmstate *state, ae_state *_state)
void mincgsetprecdefault(mincgstate *state, ae_state *_state)
alglib_impl::minasareport * c_ptr()
void minlbfgssetgradientcheck(const minlbfgsstate &state, const double teststep)
void minbleiccreate(const ae_int_t n, const real_1d_array &x, minbleicstate &state)
ae_bool cqmconstrainedoptimum(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void minbleicresultsbuf(minbleicstate *state, ae_vector *x, minbleicreport *rep, ae_state *_state)
bool minasaiteration(const minasastate &state)
ae_bool ae_is_symmetric(ae_matrix *a)
ae_bool isfinitertrmatrix(ae_matrix *x, ae_int_t n, ae_bool isupper, ae_state *_state)
void ae_v_muld(double *vdst, ae_int_t stride_dst, ae_int_t n, double alpha)
void mincgcreatef(ae_int_t n, ae_vector *x, double diffstep, mincgstate *state, ae_state *_state)
void mincgsetprecdiag(mincgstate *state, ae_vector *d, ae_state *_state)
ae_bool _minlmreport_init(void *_p, ae_state *_state, ae_bool make_automatic)
double ae_fabs(double x, ae_state *state)
void minbleicsetxrep(const minbleicstate &state, const bool needxrep)
ae_bool derivativecheck(double f0, double df0, double f1, double df1, double f, double df, double width, ae_state *_state)
ae_int_t numberofchangedconstraints(ae_vector *x, ae_vector *xprev, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_state *_state)
ae_int_t & iterationscount
void minlbfgssetcholeskypreconditioner(minlbfgsstate *state, ae_matrix *p, ae_bool isupper, ae_state *_state)
void mincgsetxrep(const mincgstate &state, const bool needxrep)
void cqmsetq(convexquadraticmodel *s, ae_matrix *q, ae_vector *r, ae_int_t k, double theta, ae_state *_state)
void minlmsetxrep(minlmstate *state, ae_bool needxrep, ae_state *_state)
void * ae_malloc(size_t size, ae_state *state)
void minlbfgssetprecdefault(minlbfgsstate *state, ae_state *_state)
void mincgsetstpmax(const mincgstate &state, const double stpmax)
double cqmeval(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
ae_bool _mincgstate_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
bool minbleiciteration(const minbleicstate &state)
void mincgresults(mincgstate *state, ae_vector *x, mincgreport *rep, ae_state *_state)
void minlmsetbc(minlmstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
union alglib_impl::ae_matrix::@12 ptr
void minlbfgssetdefaultpreconditioner(minlbfgsstate *state, ae_state *_state)
void cqmsetactiveset(convexquadraticmodel *s, ae_vector *x, ae_vector *activeset, ae_state *_state)
void minasaresults(const minasastate &state, real_1d_array &x, minasareport &rep)
void mincgoptimize(mincgstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr), void *ptr)
_mincgstate_owner & operator=(const _mincgstate_owner &rhs)
void ae_frame_make(ae_state *state, ae_frame *tmp)
ae_bool _minlbfgsreport_init(void *_p, ae_state *_state, ae_bool make_automatic)
void minqpsetbc(const minqpstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
bool mincgiteration(const mincgstate &state)
ae_int_t & debugfeasgpaits
minlmstate & operator=(const minlmstate &rhs)
ae_int_t & outeriterationscount
void minlbfgssetpreccholesky(const minlbfgsstate &state, const real_2d_array &p, const bool isupper)
void minlmcreatefgh(const ae_int_t n, const real_1d_array &x, minlmstate &state)
ae_bool _minlbfgsreport_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void minasaoptimize(minasastate &state, void(*grad)(const real_1d_array &x, double &func, real_1d_array &grad, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr), void *ptr)
void _minbleicstate_clear(void *_p)
virtual ~_minlmreport_owner()
void minlbfgssetxrep(const minlbfgsstate &state, const bool needxrep)
void minqpsetalgobleic(const minqpstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minlbfgssetprecdiag(const minlbfgsstate &state, const real_1d_array &d)
void cqmadx(convexquadraticmodel *s, ae_vector *x, ae_vector *y, ae_state *_state)
ae_bool apservisfinitematrix(ae_matrix *x, ae_int_t m, ae_int_t n, ae_state *_state)
void mincgrestartfrom(const mincgstate &state, const real_1d_array &x)
void bvectorsetlengthatleast(ae_vector *x, ae_int_t n, ae_state *_state)
void minqpsetorigin(minqpstate *state, ae_vector *xorigin, ae_state *_state)
ae_bool _minqpstate_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void minlbfgscreatef(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlbfgsstate &state)
void cqmsetb(convexquadraticmodel *s, ae_vector *b, ae_state *_state)
void minqpsetquadraticterm(minqpstate *state, ae_matrix *a, ae_bool isupper, ae_state *_state)
void snnlsinit(ae_int_t nsmax, ae_int_t ndmax, ae_int_t nrmax, snnlssolver *s, ae_state *_state)
void minbleicsetbarrierwidth(const minbleicstate &state, const double mu)
void filterdirection(ae_vector *d, ae_vector *x, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_vector *s, ae_int_t nmain, ae_int_t nslack, double droptol, ae_state *_state)
void mincgcreate(ae_int_t n, ae_vector *x, mincgstate *state, ae_state *_state)
ae_bool _minbleicstate_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void minbleicsetbarrierdecay(minbleicstate *state, double mudecay, ae_state *_state)
void minlmresultsbuf(const minlmstate &state, real_1d_array &x, minlmreport &rep)
alglib_impl::mincgreport * p_struct
void mincgsetcgtype(const mincgstate &state, const ae_int_t cgtype)
ae_bool minlmiteration(minlmstate *state, ae_state *_state)
minasareport & operator=(const minasareport &rhs)
void cqmsetd(convexquadraticmodel *s, ae_vector *d, double tau, ae_state *_state)
_minqpreport_owner & operator=(const _minqpreport_owner &rhs)
ae_bool _minbleicreport_init(void *_p, ae_state *_state, ae_bool make_automatic)
ae_bool _minqpreport_init(void *_p, ae_state *_state, ae_bool make_automatic)
void minqpsetlc(minqpstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
virtual ~_minasastate_owner()
void minlbfgsrestartfrom(minlbfgsstate *state, ae_vector *x, ae_state *_state)
void ae_state_clear(ae_state *state)
const alglib_impl::ae_matrix * c_ptr() const
void minlmoptimize(minlmstate &state, void(*fvec)(const real_1d_array &x, real_1d_array &fi, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr), void *ptr)
ae_bool ae_fp_eq(double v1, double v2)
void cqmevalx(convexquadraticmodel *s, ae_vector *x, double *r, double *noise, ae_state *_state)
minlmreport & operator=(const minlmreport &rhs)
void minlmrestartfrom(minlmstate *state, ae_vector *x, ae_state *_state)
void _minlmstate_clear(void *_p)
void minbleicresultsbuf(const minbleicstate &state, real_1d_array &x, minbleicreport &rep)
void minlbfgssetdefaultpreconditioner(const minlbfgsstate &state)
void minlbfgssetprecscale(const minlbfgsstate &state)
void minqpcreate(const ae_int_t n, minqpstate &state)
void minbleicsetgradientcheck(minbleicstate *state, double teststep, ae_state *_state)
ae_bool _minlmstate_init(void *_p, ae_state *_state, ae_bool make_automatic)
alglib_impl::mincgstate * p_struct
alglib_impl::minbleicreport * p_struct
void minasasetstpmax(minasastate *state, double stpmax, ae_state *_state)
ae_bool ae_matrix_init_copy(ae_matrix *dst, ae_matrix *src, ae_state *state, ae_bool make_automatic)
ae_bool ae_matrix_init(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_datatype datatype, ae_state *state, ae_bool make_automatic)
void cqmdropa(convexquadraticmodel *s, ae_state *_state)
void _minasastate_clear(void *_p)
void minqpsetbc(minqpstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void ae_matrix_destroy(ae_matrix *dst)
void minbleicsetcond(const minbleicstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minlmcreatefgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
void minbleicrestartfrom(minbleicstate *state, ae_vector *x, ae_state *_state)
void _minqpreport_clear(void *_p)
ql0001_ & k(htemp+1),(cvec+1),(atemp+1),(bj+1),(bl+1),(bu+1),(x+1),(clamda+1), &iout, infoqp, &zero,(w+1), &lenw,(iw+1), &leniw, &glob_grd.epsmac
ae_bool issecondarytermchanged
void ae_v_add(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n)
void mincgsetgradientcheck(const mincgstate &state, const double teststep)
ae_int_t & activeconstraints
void cqmrewritedensediagonal(convexquadraticmodel *s, ae_vector *z, ae_state *_state)
void mincgsetscale(mincgstate *state, ae_vector *s, ae_state *_state)
void minlmcreatefgh(ae_int_t n, ae_vector *x, minlmstate *state, ae_state *_state)
ae_bool _minlmstate_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void minqpsetalgocholesky(const minqpstate &state)
ae_bool _minqpstate_init(void *_p, ae_state *_state, ae_bool make_automatic)
ae_bool _minbleicstate_init(void *_p, ae_state *_state, ae_bool make_automatic)
virtual ~_minbleicreport_owner()
ae_bool minbleiciteration(minbleicstate *state, ae_state *_state)
ae_bool minlbfgsiteration(minlbfgsstate *state, ae_state *_state)
_minlmstate_owner & operator=(const _minlmstate_owner &rhs)
void _minbleicreport_clear(void *_p)
ae_int_t & iterationscount
void minasarestartfrom(minasastate *state, ae_vector *x, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
void _convexquadraticmodel_clear(void *_p)
void threshold(double *phi, unsigned long nvox, double limit)
void minlbfgssetstpmax(const minlbfgsstate &state, const double stpmax)
void mincgsuggeststep(const mincgstate &state, const double stp)
void minlmsetstpmax(minlmstate *state, double stpmax, ae_state *_state)
void minqpresultsbuf(minqpstate *state, ae_vector *x, minqpreport *rep, ae_state *_state)
void mincgresults(const mincgstate &state, real_1d_array &x, mincgreport &rep)
void minasaresults(minasastate *state, ae_vector *x, minasareport *rep, ae_state *_state)
void minbleicresults(minbleicstate *state, ae_vector *x, minbleicreport *rep, ae_state *_state)
virtual ~_minlbfgsreport_owner()
void trimfunction(double *f, ae_vector *g, ae_int_t n, double threshold, ae_state *_state)
ae_int_t ae_v_len(ae_int_t a, ae_int_t b)
ae_int_t & iterationscount
void ae_vector_destroy(ae_vector *dst)
void _minlbfgsreport_clear(void *_p)
void minbleicoptimize(minbleicstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr), void *ptr)
void minlbfgssetprecscale(minlbfgsstate *state, ae_state *_state)
void minlmsetacctype(minlmstate *state, ae_int_t acctype, ae_state *_state)
void minbleicsetprecdiag(minbleicstate *state, ae_vector *d, ae_state *_state)
ae_bool _convexquadraticmodel_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
ae_bool enforceboundaryconstraints(ae_vector *x, ae_vector *bl, ae_vector *havebl, ae_vector *bu, ae_vector *havebu, ae_int_t nmain, ae_int_t nslack, ae_state *_state)
minqpreport & operator=(const minqpreport &rhs)
void minqpresults(const minqpstate &state, real_1d_array &x, minqpreport &rep)
ae_int_t & inneriterationscount
double cqmxtadx2(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void minasasetxrep(const minasastate &state, const bool needxrep)
void _minlbfgsstate_clear(void *_p)
mincgreport & operator=(const mincgreport &rhs)
void minlbfgscreate(ae_int_t n, ae_int_t m, ae_vector *x, minlbfgsstate *state, ae_state *_state)
void minlbfgscreatef(ae_int_t n, ae_int_t m, ae_vector *x, double diffstep, minlbfgsstate *state, ae_state *_state)
void minbleicsetscale(const minbleicstate &state, const real_1d_array &s)
alglib_impl::minqpstate * c_ptr()
void ae_v_move(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n)
void minlmcreatefj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
ae_bool _minqpreport_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
_mincgreport_owner & operator=(const _mincgreport_owner &rhs)
void minlbfgssetscale(const minlbfgsstate &state, const real_1d_array &s)
void minlmcreatefgj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
void minlbfgsoptimize(minlbfgsstate &state, void(*func)(const real_1d_array &x, double &func, void *ptr), void(*rep)(const real_1d_array &x, double func, void *ptr), void *ptr)
alglib_impl::minlmstate * p_struct
void mincgcreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, mincgstate &state)
void minlbfgsrestartfrom(const minlbfgsstate &state, const real_1d_array &x)
alglib_impl::minlbfgsstate * p_struct
void ae_vector_clear(ae_vector *dst)
void minlbfgssetpreccholesky(minlbfgsstate *state, ae_matrix *p, ae_bool isupper, ae_state *_state)
void mincgsetprecscale(const mincgstate &state)
void rmatrixcopy(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t ia, const ae_int_t ja, real_2d_array &b, const ae_int_t ib, const ae_int_t jb)
void _convexquadraticmodel_destroy(void *_p)
ae_bool islineartermchanged
_minasastate_owner & operator=(const _minasastate_owner &rhs)
ae_bool _mincgreport_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
ae_bool ae_fp_less(double v1, double v2)
alglib_impl::minlmreport * p_struct
alglib_impl::minqpreport * c_ptr()
void minbleicsetxrep(minbleicstate *state, ae_bool needxrep, ae_state *_state)
void mincgresultsbuf(mincgstate *state, ae_vector *x, mincgreport *rep, ae_state *_state)
void minqpsetstartingpoint(minqpstate *state, ae_vector *x, ae_state *_state)
void minqpoptimize(const minqpstate &state)
double safeminposrv(double x, double y, double v, ae_state *_state)
void _minasareport_clear(void *_p)
virtual ~_minbleicstate_owner()
void minbleicsetprecscale(const minbleicstate &state)
ae_bool _minlmreport_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
ae_int_t & terminationtype
void minlbfgssetxrep(minlbfgsstate *state, ae_bool needxrep, ae_state *_state)
void minbleicsetlc(minbleicstate *state, ae_matrix *c, ae_vector *ct, ae_int_t k, ae_state *_state)
void minbleicsetbarrierwidth(minbleicstate *state, double mu, ae_state *_state)
void minbleicsetprecdefault(minbleicstate *state, ae_state *_state)
void minqpsetalgocholesky(minqpstate *state, ae_state *_state)
void minlbfgssetcond(minlbfgsstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
virtual ~_minlbfgsstate_owner()
void mincgsetcond(const mincgstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
quaternion_type< T > normalize(quaternion_type< T > q)
ae_bool ae_fp_neq(double v1, double v2)
ae_int_t & terminationtype
ae_bool isfinitevector(ae_vector *x, ae_int_t n, ae_state *_state)
void rvectorsetlengthatleast(ae_vector *x, ae_int_t n, ae_state *_state)
ae_int_t & debugfeasqpits
void minbleicsetbc(minbleicstate *state, ae_vector *bndl, ae_vector *bndu, ae_state *_state)
alglib_impl::minlmreport * c_ptr()
__host__ __device__ float length(float2 v)
void minlbfgsresultsbuf(minlbfgsstate *state, ae_vector *x, minlbfgsreport *rep, ae_state *_state)
void mincgcreate(const ae_int_t n, const real_1d_array &x, mincgstate &state)
ae_bool _minasareport_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void ae_touch_ptr(void *p)
minbleicstate & operator=(const minbleicstate &rhs)
void minbleicresults(const minbleicstate &state, real_1d_array &x, minbleicreport &rep)
minlbfgsstate & operator=(const minlbfgsstate &rhs)
void minasarestartfrom(const minasastate &state, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu)
double ae_maxreal(double m1, double m2, ae_state *state)
void direction(const MultidimArray< double > &orMap, MultidimArray< double > &qualityMap, double lambda, int size, MultidimArray< double > &dirMap, int x, int y)
void minlmcreatevj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
void minasacreate(const ae_int_t n, const real_1d_array &x, const real_1d_array &bndl, const real_1d_array &bndu, minasastate &state)
void minqpresults(minqpstate *state, ae_vector *x, minqpreport *rep, ae_state *_state)
ae_int_t & terminationtype
ae_bool ae_vector_set_length(ae_vector *dst, ae_int_t newsize, ae_state *state)
void ae_v_sub(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n)
void mincgsuggeststep(mincgstate *state, double stp, ae_state *_state)
void minasasetalgorithm(minasastate *state, ae_int_t algotype, ae_state *_state)
bool rmatrixsvd(const real_2d_array &a, const ae_int_t m, const ae_int_t n, const ae_int_t uneeded, const ae_int_t vtneeded, const ae_int_t additionalmemory, real_1d_array &w, real_2d_array &u, real_2d_array &vt)
void minbleicsetstpmax(const minbleicstate &state, const double stpmax)
alglib_impl::minqpreport * p_struct
_minlmreport_owner & operator=(const _minlmreport_owner &rhs)
void _minlmreport_clear(void *_p)
void minasasetcond(const minasastate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
void minlmsetcond(minlmstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
ae_int_t postprocessboundedstep(ae_vector *x, ae_vector *xprev, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_int_t variabletofreeze, double valuetofreeze, double steptaken, double maxsteplen, ae_state *_state)
_minbleicstate_owner & operator=(const _minbleicstate_owner &rhs)
struct alglib_impl::ae_vector ae_vector
ae_bool _apbuffers_init(void *_p, ae_state *_state, ae_bool make_automatic)
ae_bool _minasastate_init(void *_p, ae_state *_state, ae_bool make_automatic)
const alglib_impl::ae_vector * c_ptr() const
bool minlbfgsiteration(const minlbfgsstate &state)
void mincgsetcond(mincgstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
double ae_minreal(double m1, double m2, ae_state *state)
ae_bool _mincgreport_init(void *_p, ae_state *_state, ae_bool make_automatic)
alglib_impl::minlbfgsreport * p_struct
void minqpsetquadratictermsparse(const minqpstate &state, const sparsematrix &a, const bool isupper)
ae_bool _minasareport_init(void *_p, ae_state *_state, ae_bool make_automatic)
void cqmscalevector(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void minlmsetgradientcheck(const minlmstate &state, const double teststep)
void minlmsetxrep(const minlmstate &state, const bool needxrep)
alglib_impl::minasastate * c_ptr()
void mincgsetprecdefault(const mincgstate &state)
alglib_impl::mincgreport * c_ptr()
void minlmcreatevgj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
void minlmsetscale(const minlmstate &state, const real_1d_array &s)
void minlmcreatevgj(ae_int_t n, ae_int_t m, ae_vector *x, minlmstate *state, ae_state *_state)
struct alglib_impl::ae_matrix ae_matrix
double cqmdebugconstrainedevale(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void minbleicsetstpmax(minbleicstate *state, double stpmax, ae_state *_state)
ae_int_t & iterationscount
ae_int_t & inneriterationscount
ae_int_t & terminationtype
alglib_impl::minlmstate * c_ptr()
void ae_state_init(ae_state *state)
void minbleicsetscale(minbleicstate *state, ae_vector *s, ae_state *_state)
virtual ~minlbfgsreport()
alglib_impl::minqpstate * p_struct
void rmatrixrighttrsm(const ae_int_t m, const ae_int_t n, const real_2d_array &a, const ae_int_t i1, const ae_int_t j1, const bool isupper, const bool isunit, const ae_int_t optype, const real_2d_array &x, const ae_int_t i2, const ae_int_t j2)
void rmatrixsetlengthatleast(ae_matrix *x, ae_int_t m, ae_int_t n, ae_state *_state)
double ae_sqrt(double x, ae_state *state)
void mincgsetprecscale(mincgstate *state, ae_state *_state)
void ae_assert(ae_bool cond, const char *msg, ae_state *state)
union alglib_impl::ae_vector::@11 ptr
ae_bool _minlbfgsstate_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void minlbfgssetgradientcheck(minlbfgsstate *state, double teststep, ae_state *_state)
void minqpsetquadraticterm(const minqpstate &state, const real_2d_array &a, const bool isupper)
ae_bool _minasastate_init_copy(void *_dst, void *_src, ae_state *_state, ae_bool make_automatic)
void cqmseta(convexquadraticmodel *s, ae_matrix *a, ae_bool isupper, double alpha, ae_state *_state)
void ae_v_moveneg(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n)
void minqpsetlc(const minqpstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
_minqpstate_owner & operator=(const _minqpstate_owner &rhs)
ae_bool findfeasiblepoint(ae_vector *x, ae_vector *bndl, ae_vector *havebndl, ae_vector *bndu, ae_vector *havebndu, ae_int_t nmain, ae_int_t nslack, ae_matrix *ce, ae_int_t k, double epsi, ae_int_t *qpits, ae_int_t *gpaits, ae_state *_state)
alglib_impl::minbleicreport * c_ptr()
void mincgsetscale(const mincgstate &state, const real_1d_array &s)
const char *volatile error_msg
_minlbfgsreport_owner & operator=(const _minlbfgsreport_owner &rhs)
ae_bool spdmatrixcholeskyrec(ae_matrix *a, ae_int_t offs, ae_int_t n, ae_bool isupper, ae_vector *tmp, ae_state *_state)
void minlmrestartfrom(const minlmstate &state, const real_1d_array &x)
void minlbfgssetcholeskypreconditioner(const minlbfgsstate &state, const real_2d_array &p, const bool isupper)
void minlmcreatev(const ae_int_t n, const ae_int_t m, const real_1d_array &x, const double diffstep, minlmstate &state)
void minlmresults(minlmstate *state, ae_vector *x, minlmreport *rep, ae_state *_state)
void minbleicsetbarrierdecay(const minbleicstate &state, const double mudecay)
void minasasetstpmax(const minasastate &state, const double stpmax)
#define ae_machineepsilon
void minasacreate(ae_int_t n, ae_vector *x, ae_vector *bndl, ae_vector *bndu, minasastate *state, ae_state *_state)
void minbleiccreatef(ae_int_t n, ae_vector *x, double diffstep, minbleicstate *state, ae_state *_state)
_minbleicreport_owner & operator=(const _minbleicreport_owner &rhs)
virtual ~_mincgreport_owner()
void minbleicsetbc(const minbleicstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
void tagsortbuf(ae_vector *a, ae_int_t n, ae_vector *p1, ae_vector *p2, apbuffers *buf, ae_state *_state)
void minbleiccreate(ae_int_t n, ae_vector *x, minbleicstate *state, ae_state *_state)
virtual ~_minqpstate_owner()
_minlbfgsstate_owner & operator=(const _minlbfgsstate_owner &rhs)
void minbleicsetprecscale(minbleicstate *state, ae_state *_state)
void minlbfgsresultsbuf(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep)
minqpstate & operator=(const minqpstate &rhs)
ae_bool _convexquadraticmodel_init(void *_p, ae_state *_state, ae_bool make_automatic)
void minqpsetquadratictermsparse(minqpstate *state, sparsematrix *a, ae_bool isupper, ae_state *_state)
ae_int_t & iterationscount
ae_bool _minlbfgsstate_init(void *_p, ae_state *_state, ae_bool make_automatic)
void minqpsetlinearterm(minqpstate *state, ae_vector *b, ae_state *_state)
void ae_v_subd(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n, double alpha)
ae_bool ae_vector_init(ae_vector *dst, ae_int_t size, ae_datatype datatype, ae_state *state, ae_bool make_automatic)
void minasasetxrep(minasastate *state, ae_bool needxrep, ae_state *_state)
void minlmcreatefj(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlmstate &state)
ae_int_t ae_maxint(ae_int_t m1, ae_int_t m2, ae_state *state)
void minasasetcond(minasastate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
alglib_impl::minasareport * p_struct
void cqmgradunconstrained(convexquadraticmodel *s, ae_vector *x, ae_vector *g, ae_state *_state)
ae_bool ae_isfinite(double x, ae_state *state)
double ae_sqr(double x, ae_state *state)
alglib_impl::minbleicstate * c_ptr()
void minasaresultsbuf(minasastate *state, ae_vector *x, minasareport *rep, ae_state *_state)
void minlmresults(const minlmstate &state, real_1d_array &x, minlmreport &rep)
void minqpsetalgobleic(minqpstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
void minlbfgsresults(const minlbfgsstate &state, real_1d_array &x, minlbfgsreport &rep)
ae_bool minasaiteration(minasastate *state, ae_state *_state)
void minlmresultsbuf(minlmstate *state, ae_vector *x, minlmreport *rep, ae_state *_state)
void mincgsetcgtype(mincgstate *state, ae_int_t cgtype, ae_state *_state)
void minasasetalgorithm(const minasastate &state, const ae_int_t algotype)
void mincgsetgradientcheck(mincgstate *state, double teststep, ae_state *_state)
void ae_v_addd(double *vdst, ae_int_t stride_dst, const double *vsrc, ae_int_t stride_src, ae_int_t n, double alpha)
alglib_impl::minlbfgsreport * c_ptr()
ae_bool ae_vector_init_copy(ae_vector *dst, ae_vector *src, ae_state *state, ae_bool make_automatic)
ae_int_t & outeriterationscount
ae_bool ae_fp_less_eq(double v1, double v2)
void minqpcreate(ae_int_t n, minqpstate *state, ae_state *_state)
void minlbfgscreate(const ae_int_t n, const ae_int_t m, const real_1d_array &x, minlbfgsstate &state)
void minlmsetgradientcheck(minlmstate *state, double teststep, ae_state *_state)
bool minlmiteration(const minlmstate &state)
virtual ~_minasareport_owner()
virtual ~_minqpreport_owner()
void minbleicsetlc(const minbleicstate &state, const real_2d_array &c, const integer_1d_array &ct, const ae_int_t k)
void minlbfgssetcond(const minlbfgsstate &state, const double epsg, const double epsf, const double epsx, const ae_int_t maxits)
ae_bool mincgiteration(mincgstate *state, ae_state *_state)
alglib_impl::ae_int_t ae_int_t
void _mincgreport_clear(void *_p)
void cqminit(ae_int_t n, convexquadraticmodel *s, ae_state *_state)
alglib_impl::minasastate * p_struct
void minqpsetstartingpoint(const minqpstate &state, const real_1d_array &x)
double cqmdebugconstrainedevalt(convexquadraticmodel *s, ae_vector *x, ae_state *_state)
void minlmsetacctype(const minlmstate &state, const ae_int_t acctype)
void mincgrestartfrom(mincgstate *state, ae_vector *x, ae_state *_state)
void ae_frame_leave(ae_state *state)
virtual ~_minlmstate_owner()
alglib_impl::sparsematrix * c_ptr()
void minqpoptimize(minqpstate *state, ae_state *_state)
void minlmsetstpmax(const minlmstate &state, const double stpmax)
virtual ~minbleicreport()
void ae_matrix_clear(ae_matrix *dst)
void minbleiccreatef(const ae_int_t n, const real_1d_array &x, const double diffstep, minbleicstate &state)
ae_bool ae_fp_greater(double v1, double v2)
void minqpsetorigin(const minqpstate &state, const real_1d_array &xorigin)
_minasareport_owner & operator=(const _minasareport_owner &rhs)
void minlbfgsresults(minlbfgsstate *state, ae_vector *x, minlbfgsreport *rep, ae_state *_state)
ae_bool ae_matrix_set_length(ae_matrix *dst, ae_int_t rows, ae_int_t cols, ae_state *state)
void fblscholeskysolve(ae_matrix *cha, double sqrtscalea, ae_int_t n, ae_bool isupper, ae_vector *xb, ae_vector *tmp, ae_state *_state)
void minlmsetscale(minlmstate *state, ae_vector *s, ae_state *_state)
ql0001_ & zero(ctemp+1),(cvec+1),(a+1),(b+1),(bl+1),(bu+1),(x+1),(w+1), &iout, ifail, &zero,(w+3), &lwar2,(iw+1), &leniw, &glob_grd.epsmac
void minbleicsetcond(minbleicstate *state, double epsg, double epsf, double epsx, ae_int_t maxits, ae_state *_state)
void minqpresultsbuf(const minqpstate &state, real_1d_array &x, minqpreport &rep)
void minasaresultsbuf(const minasastate &state, real_1d_array &x, minasareport &rep)
void projectgradientintobc(ae_vector *x, ae_vector *g, ae_vector *bl, ae_vector *havebl, ae_vector *bu, ae_vector *havebu, ae_int_t nmain, ae_int_t nslack, ae_state *_state)
void minlbfgssetprecdefault(const minlbfgsstate &state)
ae_int_t ae_minint(ae_int_t m1, ae_int_t m2, ae_state *state)
void minqpsetscale(minqpstate *state, ae_vector *s, ae_state *_state)
alglib_impl::minbleicstate * p_struct
minasastate & operator=(const minasastate &rhs)
ae_int_t & terminationtype
double ae_v_dotproduct(const double *v0, ae_int_t stride0, const double *v1, ae_int_t stride1, ae_int_t n)
check(nparam, nf, nfsr, &Linfty, nineq, nineqn, neq, neqn, ncsrl, ncsrn, mode, &modem, eps, bgbnd, param)
void minlmsetbc(const minlmstate &state, const real_1d_array &bndl, const real_1d_array &bndu)
void minlbfgssetprecdiag(minlbfgsstate *state, ae_vector *d, ae_state *_state)
void minbleicrestartfrom(const minbleicstate &state, const real_1d_array &x)
mincgstate & operator=(const mincgstate &rhs)
void _mincgstate_clear(void *_p)
void snnlssetproblem(snnlssolver *s, ae_matrix *a, ae_vector *b, ae_int_t ns, ae_int_t nd, ae_int_t nr, ae_state *_state)
void minqpsetlinearterm(const minqpstate &state, const real_1d_array &b)