30 void checkNLOPTerror(nlopt_result errortype)
34 case -1: FILE_LOG(logERROR) <<
"NLOPT: General failure";
break;
35 case -2: FILE_LOG(logERROR) <<
"NLOPT: Invalid arguments. Check bounds.";
break;
36 case -3: FILE_LOG(logERROR) <<
"NLOPT: Out of memory";
break;
37 case -4: FILE_LOG(logERROR) <<
"NLOPT Warning: Potential roundoff error. " 38 <<
"In general, this can be ignored.";
break;
39 case -5: FILE_LOG(logERROR) <<
"NLOPT: Force stop.";
break;
46 typedef double (*eval_func)(
unsigned int n,
const double *x,
51 double run_nlopt(nlopt::algorithm algo, eval_func fpointer,
52 vectord& Xnext,
int maxf,
const std::vector<double>& vd,
53 const std::vector<double>& vu,
void* objPointer)
56 size_t n = Xnext.size();
57 nlopt::opt opt (algo,n);
59 std::vector<double> xstd(n);
60 opt.set_lower_bounds(vd);
61 opt.set_upper_bounds(vu);
62 opt.set_min_objective(fpointer, objPointer);
63 opt.set_maxeval(maxf);
67 opt.set_ftol_rel(1e-12);
68 opt.set_ftol_abs(1e-12);
70 std::copy(Xnext.begin(),Xnext.end(),xstd.begin());
74 opt.optimize(xstd, fmin);
76 catch (nlopt::roundoff_limited& e)
78 FILE_LOG(logDEBUG) <<
"NLOPT Warning: Potential roundoff error. " 79 <<
"In general, this can be ignored.";
82 std::copy(xstd.begin(),xstd.end(),Xnext.begin());
87 NLOPT_Optimization::NLOPT_Optimization(
RBOptimizable* rbo,
size_t dim):
92 setLimits(zvectord(dim),svectord(dim,1.0));
95 NLOPT_Optimization::NLOPT_Optimization(
RGBOptimizable* rgbo,
size_t dim):
100 setLimits(zvectord(dim),svectord(dim,1.0));
103 NLOPT_Optimization::~NLOPT_Optimization()
105 if(rbobj != NULL)
delete rbobj;
106 if(rgbobj != NULL)
delete rgbobj;
111 assert(mDown.size() == Xnext.size());
112 assert(mUp.size() == Xnext.size());
113 const size_t n = Xnext.size();
115 for (
size_t i = 0; i < n; ++i)
117 if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
119 FILE_LOG(logDEBUG) << Xnext;
120 throw std::invalid_argument(
"Local trial withour proper" 125 nlopt::algorithm algo = nlopt::LN_BOBYQA;
127 void* objPointer =
static_cast<void *
>(rbobj);
128 const size_t nIter = 20;
138 vectord start = Xnext;
140 double fmin = run_nlopt(algo,fpointer,Xnext,nIter,
141 mDown,mUp,objPointer);
143 FILE_LOG(logDEBUG) <<
"Near trial " << nIter <<
"|" 144 << start <<
"-> " << Xnext <<
" f() ->" << fmin;
152 assert(mDown.size() == Xnext.size());
153 assert(mUp.size() == Xnext.size());
158 size_t n = Xnext.size();
160 int maxf1 = maxEvals*n;
162 const double coef_local = 0.1;
166 for (
size_t i = 0; i < n; ++i)
168 if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
170 Xnext(i)=(mDown[i]+mUp[i])/2.0;
175 nlopt::algorithm algo;
179 algo = nlopt::GN_DIRECT_L;
181 objPointer =
static_cast<void *
>(rbobj);
184 algo = nlopt::GN_DIRECT_L;
185 maxf2 =
static_cast<int>(
static_cast<double>(maxf1)*coef_local);
188 objPointer =
static_cast<void *
>(rbobj);
191 algo = nlopt::LN_BOBYQA;
193 objPointer =
static_cast<void *
>(rbobj);
196 algo = nlopt::LD_LBFGS;
198 objPointer =
static_cast<void *
>(rgbobj);
201 throw std::invalid_argument(
"Inner optimization algorithm" 205 if (objPointer == NULL)
207 throw std::invalid_argument(
"Wrong object model " 208 "(gradient/no gradient)");
211 fmin = run_nlopt(algo,fpointer,Xnext,maxf1,
212 mDown,mUp,objPointer);
214 FILE_LOG(logDEBUG) <<
"1st opt " << maxf1 <<
"-> " << Xnext
215 <<
" f() ->" << fmin;
219 for (
size_t i = 0; i < n; ++i)
221 if (Xnext(i)-mDown[i] < 0.0001)
224 FILE_LOG(logDEBUG) <<
"Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
226 if (mUp[i] - Xnext(i) < 0.0001)
229 FILE_LOG(logDEBUG) <<
"Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
236 fmin = run_nlopt(nlopt::LN_COBYLA,fpointer,Xnext,maxf2,
237 mDown,mUp,objPointer);
238 FILE_LOG(logDEBUG) <<
"2nd opt " << maxf2 <<
"-> " << Xnext
239 <<
" f() ->" << fmin;
247 double *grad,
void *my_func_data)
251 std::copy(x,x+n,vx.begin());
253 void *objPointer = my_func_data;
256 return OPTIMIZER->evaluate(vx);
260 double *grad,
void *my_func_data)
264 std::copy(x,x+n,vx.begin());
266 void *objPointer = my_func_data;
270 vectord vgrad = zvectord(n);
271 double f = OPTIMIZER->evaluate(vx,vgrad);
272 if (grad && n) std::copy(vgrad.begin(),vgrad.end(),grad);
const size_t MAX_INNER_EVALUATIONS
Used per dimmension.
double localTrialAround(vectord &Xnext)
Try some local optimization around a point.
Namespace of the library interface.
Global exploration, local refinement (hand tuned)
C++ wrapper of the NLOPT library.
Modules and helper macros for logging.
double run(vectord &Xnext)
Launch the inner optimization algorithm.
static double evaluate_nlopt_grad(unsigned int n, const double *x, double *grad, void *my_func_data)
Wrapper of inner optimization to be evaluated by NLOPT.
static double evaluate_nlopt(unsigned int n, const double *x, double *grad, void *my_func_data)
Wrapper of inner optimization to be evaluated by NLOPT.