BayesOpt
inneroptimization.cpp
1 /*
2 -------------------------------------------------------------------------
3  This file is part of BayesOpt, an efficient C++ library for
4  Bayesian optimization.
5 
6  Copyright (C) 2011-2015 Ruben Martinez-Cantin <rmcantin@unizar.es>
7 
8  BayesOpt is free software: you can redistribute it and/or modify it
9  under the terms of the GNU Affero General Public License as published by
10  the Free Software Foundation, either version 3 of the License, or
11  (at your option) any later version.
12 
13  BayesOpt is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  GNU Affero General Public License for more details.
17 
18  You should have received a copy of the GNU Affero General Public License
19  along with BayesOpt. If not, see <http://www.gnu.org/licenses/>.
20 ------------------------------------------------------------------------
21 */
22 #include <cmath>
23 #include <nlopt.hpp>
24 #include "bayesopt/parameters.h"
25 #include "log.hpp"
26 #include "inneroptimization.hpp"
27 
28 namespace bayesopt
29 {
30  void checkNLOPTerror(nlopt_result errortype)
31  {
32  switch(errortype)
33  {
34  case -1: FILE_LOG(logERROR) << "NLOPT: General failure"; break;
35  case -2: FILE_LOG(logERROR) << "NLOPT: Invalid arguments. Check bounds."; break;
36  case -3: FILE_LOG(logERROR) << "NLOPT: Out of memory"; break;
37  case -4: FILE_LOG(logERROR) << "NLOPT Warning: Potential roundoff error. "
38  << "In general, this can be ignored."; break;
39  case -5: FILE_LOG(logERROR) << "NLOPT: Force stop."; break;
40  default: ;
41  }
42  }
43 
44  const size_t MAX_INNER_EVALUATIONS = 500;
46  typedef double (*eval_func)(unsigned int n, const double *x,
47  double *gradient, /* NULL if not needed */
48  void *func_data);
49 
50 
51  double run_nlopt(nlopt::algorithm algo, eval_func fpointer,
52  vectord& Xnext, int maxf, const std::vector<double>& vd,
53  const std::vector<double>& vu, void* objPointer)
54  {
55  double fmin = 0.0;
56  size_t n = Xnext.size();
57  nlopt::opt opt (algo,n);
58 
59  std::vector<double> xstd(n);
60  opt.set_lower_bounds(vd);
61  opt.set_upper_bounds(vu);
62  opt.set_min_objective(fpointer, objPointer);
63  opt.set_maxeval(maxf);
64 
65  // It seems BOBYQA can be unstable if the same point is repeated
66  // tested over and over. NLOPT bug?
67  opt.set_ftol_rel(1e-12);
68  opt.set_ftol_abs(1e-12);
69 
70  std::copy(Xnext.begin(),Xnext.end(),xstd.begin());
71 
72  try
73  {
74  opt.optimize(xstd, fmin);
75  }
76  catch (nlopt::roundoff_limited& e)
77  {
78  FILE_LOG(logDEBUG) << "NLOPT Warning: Potential roundoff error. "
79  << "In general, this can be ignored.";
80  }
81 
82  std::copy(xstd.begin(),xstd.end(),Xnext.begin());
83  return fmin;
84  }
85 
86 
87  NLOPT_Optimization::NLOPT_Optimization(RBOptimizable* rbo, size_t dim):
88  mDown(dim),mUp(dim)
89  {
90  rbobj = new RBOptimizableWrapper(rbo); rgbobj = NULL;
91  alg = DIRECT; maxEvals = MAX_INNER_EVALUATIONS;
92  setLimits(zvectord(dim),svectord(dim,1.0));
93  };
94 
95  NLOPT_Optimization::NLOPT_Optimization(RGBOptimizable* rgbo, size_t dim):
96  mDown(dim),mUp(dim)
97  {
98  rbobj = NULL; rgbobj = new RGBOptimizableWrapper(rgbo);
99  alg = DIRECT; maxEvals = MAX_INNER_EVALUATIONS;
100  setLimits(zvectord(dim),svectord(dim,1.0));
101  };
102 
103  NLOPT_Optimization::~NLOPT_Optimization()
104  {
105  if(rbobj != NULL) delete rbobj;
106  if(rgbobj != NULL) delete rgbobj;
107  }
108 
110  {
111  assert(mDown.size() == Xnext.size());
112  assert(mUp.size() == Xnext.size());
113  const size_t n = Xnext.size();
114 
115  for (size_t i = 0; i < n; ++i)
116  {
117  if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
118  {
119  FILE_LOG(logDEBUG) << Xnext;
120  throw std::invalid_argument("Local trial withour proper"
121  " initial point.");
122  }
123  }
124 
125  nlopt::algorithm algo = nlopt::LN_BOBYQA;
126  eval_func fpointer = &(NLOPT_Optimization::evaluate_nlopt);
127  void* objPointer = static_cast<void *>(rbobj);
128  const size_t nIter = 20;
129  // std::vector<double> vd(n);
130  // std::vector<double> vu(n);
131 
132  // for (size_t i = 0; i < n; ++i)
133  // {
134  // vd[i] = Xnext(i) - 0.01;
135  // vu[i] = Xnext(i) + 0.01;
136  // }
137 
138  vectord start = Xnext;
139 
140  double fmin = run_nlopt(algo,fpointer,Xnext,nIter,
141  mDown,mUp,objPointer);
142 
143  FILE_LOG(logDEBUG) << "Near trial " << nIter << "|"
144  << start << "-> " << Xnext << " f() ->" << fmin;
145 
146  return fmin;
147 
148  }
149 
150  double NLOPT_Optimization::run(vectord &Xnext)
151  {
152  assert(mDown.size() == Xnext.size());
153  assert(mUp.size() == Xnext.size());
154 
155  eval_func fpointer;
156  void *objPointer;
157 
158  size_t n = Xnext.size();
159  double fmin = 1;
160  int maxf1 = maxEvals*n;
161  int maxf2 = 0; // For a second pass
162  const double coef_local = 0.1;
163  //int ierror;
164 
165  // If Xnext is outside the bounding box, maybe it is undefined
166  for (size_t i = 0; i < n; ++i)
167  {
168  if (Xnext(i) < mDown[i] || Xnext(i) > mUp[i])
169  {
170  Xnext(i)=(mDown[i]+mUp[i])/2.0;
171  }
172  }
173 
174  // nlopt_opt opt;
175  nlopt::algorithm algo;
176  switch(alg)
177  {
178  case DIRECT: // Pure global. No gradient
179  algo = nlopt::GN_DIRECT_L;
181  objPointer = static_cast<void *>(rbobj);
182  break;
183  case COMBINED: // Combined local-global (80% DIRECT -> 20% BOBYQA). No gradient
184  algo = nlopt::GN_DIRECT_L;
185  maxf2 = static_cast<int>(static_cast<double>(maxf1)*coef_local);
186  maxf1 -= maxf2; // That way, the number of evaluations is the same in all methods.
188  objPointer = static_cast<void *>(rbobj);
189  break;
190  case BOBYQA: // Pure local. No gradient
191  algo = nlopt::LN_BOBYQA;
193  objPointer = static_cast<void *>(rbobj);
194  break;
195  case LBFGS: // Pure local. Gradient based
196  algo = nlopt::LD_LBFGS;
198  objPointer = static_cast<void *>(rgbobj);
199  break;
200  default:
201  throw std::invalid_argument("Inner optimization algorithm"
202  " not supported");
203  }
204 
205  if (objPointer == NULL)
206  {
207  throw std::invalid_argument("Wrong object model "
208  "(gradient/no gradient)");
209  }
210 
211  fmin = run_nlopt(algo,fpointer,Xnext,maxf1,
212  mDown,mUp,objPointer);
213 
214  FILE_LOG(logDEBUG) << "1st opt " << maxf1 << "-> " << Xnext
215  << " f() ->" << fmin;
216  if (maxf2)
217  {
218  //If the point is exactly at the limit, we may have trouble.
219  for (size_t i = 0; i < n; ++i)
220  {
221  if (Xnext(i)-mDown[i] < 0.0001)
222  {
223  Xnext(i) += 0.0001;
224  FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
225  }
226  if (mUp[i] - Xnext(i) < 0.0001)
227  {
228  Xnext(i) -= 0.0001;
229  FILE_LOG(logDEBUG) << "Hacking point for BOBYQA. THIS SHOULD NOT HAPPEN";
230  }
231  }
232 
233  // BOBYQA may fail in this point. Could it be that EI is not twice differentiable?
234  // fmin = run_nlopt(nlopt::LN_BOBYQA,fpointer,Xnext,maxf2,
235  // mDown,mUp,objPointer);
236  fmin = run_nlopt(nlopt::LN_COBYLA,fpointer,Xnext,maxf2,
237  mDown,mUp,objPointer);
238  FILE_LOG(logDEBUG) << "2nd opt " << maxf2 << "-> " << Xnext
239  << " f() ->" << fmin;
240  }
241 
242  return fmin;
243 
244  } // innerOptimize (uBlas)
245 
246  double NLOPT_Optimization::evaluate_nlopt (unsigned int n, const double *x,
247  double *grad, void *my_func_data)
248 
249  {
250  vectord vx(n);
251  std::copy(x,x+n,vx.begin());
252 
253  void *objPointer = my_func_data;
254  RBOptimizableWrapper* OPTIMIZER = static_cast<RBOptimizableWrapper*>(objPointer);
255 
256  return OPTIMIZER->evaluate(vx);
257  } /* evaluate_criteria_nlopt */
258 
259  double NLOPT_Optimization::evaluate_nlopt_grad (unsigned int n, const double *x,
260  double *grad, void *my_func_data)
261 
262  {
263  vectord vx(n);
264  std::copy(x,x+n,vx.begin());
265 
266  void *objPointer = my_func_data;
267  RGBOptimizableWrapper* OPTIMIZER = static_cast<RGBOptimizableWrapper*>(objPointer);
268 
269 
270  vectord vgrad = zvectord(n);
271  double f = OPTIMIZER->evaluate(vx,vgrad);
272  if (grad && n) std::copy(vgrad.begin(),vgrad.end(),grad);
273 
274  return f;
275  } /* evaluate_criteria_nlopt */
276 
277 
278 
279 }// namespace bayesopt
280 
const size_t MAX_INNER_EVALUATIONS
Used per dimmension.
double localTrialAround(vectord &Xnext)
Try some local optimization around a point.
Namespace of the library interface.
Definition: using.dox:1
Local, derivative based.
Global exploration, local refinement (hand tuned)
C++ wrapper of the NLOPT library.
Modules and helper macros for logging.
double run(vectord &Xnext)
Launch the inner optimization algorithm.
Local, derivative free.
Parameter definitions.
static double evaluate_nlopt_grad(unsigned int n, const double *x, double *grad, void *my_func_data)
Wrapper of inner optimization to be evaluated by NLOPT.
Global optimization.
static double evaluate_nlopt(unsigned int n, const double *x, double *grad, void *my_func_data)
Wrapper of inner optimization to be evaluated by NLOPT.