23 const double EPS = 3e-8;
48 template <
typename EnergyFunctor>
49 void linearSearch(
unsigned int dim,
double *oldPt,
double oldVal,
double *grad,
50 double *dir,
double *newPt,
double &newVal,
51 EnergyFunctor func,
double maxStep,
int &resCode) {
57 const unsigned int MAX_ITER_LINEAR_SEARCH = 1000;
58 double sum = 0.0, slope = 0.0, test = 0.0,
lambda = 0.0;
59 double lambda2 = 0.0, lambdaMin = 0.0, tmpLambda = 0.0, val2 = 0.0;
65 for (
unsigned int i = 0; i < dim; i++) sum += dir[i] * dir[i];
70 for (
unsigned int i = 0; i < dim; i++) dir[i] *= maxStep / sum;
76 for (
unsigned int i = 0; i < dim; i++) {
77 slope += dir[i] * grad[i];
84 for (
unsigned int i = 0; i < dim; i++) {
85 double temp = fabs(dir[i]) / std::max(fabs(oldPt[i]), 1.0);
86 if (temp > test) test = temp;
89 lambdaMin = MOVETOL / test;
92 while (it < MAX_ITER_LINEAR_SEARCH) {
99 for (
unsigned int i = 0; i < dim; i++) {
100 newPt[i] = oldPt[i] +
lambda * dir[i];
102 newVal = func(newPt);
104 if (newVal - oldVal <= FUNCTOL *
lambda * slope) {
112 tmpLambda = -slope / (2.0 * (newVal - oldVal - slope));
114 double rhs1 = newVal - oldVal -
lambda * slope;
115 double rhs2 = val2 - oldVal - lambda2 * slope;
116 double a = (rhs1 / (
lambda *
lambda) - rhs2 / (lambda2 * lambda2)) /
119 lambda * rhs2 / (lambda2 * lambda2)) /
122 tmpLambda = -slope / (2.0 * b);
124 double disc = b * b - 3 * a * slope;
127 }
else if (b <= 0.0) {
128 tmpLambda = (-b + sqrt(disc)) / (3.0 * a);
130 tmpLambda = -slope / (b + sqrt(disc));
133 if (tmpLambda > 0.5 *
lambda) {
144 for (
unsigned int i = 0; i < dim; i++) {
153 delete[] hessDGrad; \ 156 delete[] invHessian; \ 183 template <
typename EnergyFunctor,
typename GradientFunctor>
184 int minimize(
unsigned int dim,
double *pos,
double gradTol,
185 unsigned int &numIters,
double &funcVal, EnergyFunctor func,
186 GradientFunctor gradFunc,
unsigned int snapshotFreq,
188 unsigned int maxIts = MAXITS) {
193 double sum, maxStep, fp;
195 double *grad, *dGrad, *hessDGrad;
199 grad =
new double[dim];
200 dGrad =
new double[dim];
201 hessDGrad =
new double[dim];
202 newPos =
new double[dim];
203 xi =
new double[dim];
204 invHessian =
new double[dim * dim];
205 snapshotFreq = std::min(snapshotFreq, maxIts);
212 memset(invHessian, 0, dim * dim *
sizeof(
double));
213 for (
unsigned int i = 0; i < dim; i++) {
214 unsigned int itab = i * dim;
216 invHessian[itab + i] = 1.0;
219 sum += pos[i] * pos[i];
222 maxStep = MAXSTEP * std::max(sqrt(sum), static_cast<double>(dim));
224 for (
unsigned int iter = 1; iter <= maxIts; iter++) {
229 linearSearch(dim, pos, fp, grad, xi, newPos, funcVal, func, maxStep,
238 for (
unsigned int i = 0; i < dim; i++) {
239 xi[i] = newPos[i] - pos[i];
241 double temp = fabs(xi[i]) / std::max(fabs(pos[i]), 1.0);
242 if (temp > test) test = temp;
248 if (snapshotVect && snapshotFreq) {
250 snapshotVect->push_back(s);
258 double gradScale = gradFunc(pos, grad);
262 double term = std::max(funcVal * gradScale, 1.0);
263 for (
unsigned int i = 0; i < dim; i++) {
264 double temp = fabs(grad[i]) * std::max(fabs(pos[i]), 1.0);
265 test = std::max(test, temp);
266 dGrad[i] = grad[i] - dGrad[i];
271 if (test < gradTol) {
272 if (snapshotVect && snapshotFreq) {
274 snapshotVect->push_back(s);
286 double fac = 0, fae = 0, sumDGrad = 0, sumXi = 0;
287 for (
unsigned int i = 0; i < dim; i++) {
289 unsigned int itab = i * dim;
291 for (
unsigned int j = 0; j < dim; j++) {
292 hessDGrad[i] += invHessian[itab + j] * dGrad[j];
296 double *ivh = &(invHessian[i * dim]);
297 double &hdgradi = hessDGrad[i];
300 for (
unsigned int j = 0; j < dim; ++j, ++ivh, ++dgj) {
301 hdgradi += *ivh * *dgj;
304 fac += dGrad[i] * xi[i];
305 fae += dGrad[i] * hessDGrad[i];
306 sumDGrad += dGrad[i] * dGrad[i];
307 sumXi += xi[i] * xi[i];
309 if (fac > sqrt(EPS * sumDGrad * sumXi)) {
311 double fad = 1.0 / fae;
312 for (
unsigned int i = 0; i < dim; i++) {
313 dGrad[i] = fac * xi[i] - fad * hessDGrad[i];
316 for (
unsigned int i = 0; i < dim; i++) {
317 unsigned int itab = i * dim;
319 for (
unsigned int j = i; j < dim; j++) {
320 invHessian[itab + j] += fac * xi[i] * xi[j] -
321 fad * hessDGrad[i] * hessDGrad[j] +
322 fae * dGrad[i] * dGrad[j];
323 invHessian[j * dim + i] = invHessian[itab + j];
326 double pxi = fac * xi[i], hdgi = fad * hessDGrad[i],
327 dgi = fae * dGrad[i];
328 double *pxj = &(xi[i]), *hdgj = &(hessDGrad[i]), *dgj = &(dGrad[i]);
329 for (
unsigned int j = i; j < dim; ++j, ++pxj, ++hdgj, ++dgj) {
330 invHessian[itab + j] += pxi * *pxj - hdgi * *hdgj + dgi * *dgj;
331 invHessian[j * dim + i] = invHessian[itab + j];
337 for (
unsigned int i = 0; i < dim; i++) {
338 unsigned int itab = i * dim;
341 for (
unsigned int j = 0; j < dim; j++) {
342 xi[i] -= invHessian[itab + j] * grad[j];
345 double &pxi = xi[i], *ivh = &(invHessian[itab]), *gj = grad;
346 for (
unsigned int j = 0; j < dim; ++j, ++ivh, ++gj) {
351 if (snapshotVect && snapshotFreq && !(iter % snapshotFreq)) {
353 snapshotVect->push_back(s);
354 newPos =
new double[dim];
377 template <
typename EnergyFunctor,
typename GradientFunctor>
378 int minimize(
unsigned int dim,
double *pos,
double gradTol,
379 unsigned int &numIters,
double &funcVal, EnergyFunctor func,
380 GradientFunctor gradFunc,
double funcTol = TOLX,
381 unsigned int maxIts = MAXITS) {
382 return minimize(dim, pos, gradTol, numIters, funcVal, func,
383 gradFunc, 0, NULL, funcTol, maxIts);
const double MAXSTEP
Default maximim step size in the minimizer.
#define CHECK_INVARIANT(expr, mess)
const double MOVETOL
Default tolerance for x changes in the minimizer.
const double lambda
scaling factor for rBO correction
const double FUNCTOL
Default tolerance for function convergence in the minimizer.
void linearSearch(unsigned int dim, double *oldPt, double oldVal, double *grad, double *dir, double *newPt, double &newVal, EnergyFunctor func, double maxStep, int &resCode)
Do a Quasi-Newton minimization along a line.
std::vector< Snapshot > SnapshotVect
#define RDUNUSED_PARAM(x)
const double TOLX
Default direction vector tolerance in the minimizer.
int minimize(unsigned int dim, double *pos, double gradTol, unsigned int &numIters, double &funcVal, EnergyFunctor func, GradientFunctor gradFunc, unsigned int snapshotFreq, RDKit::SnapshotVect *snapshotVect, double funcTol=TOLX, unsigned int maxIts=MAXITS)
Do a BFGS minimization of a function.
#define PRECONDITION(expr, mess)
const double EPS
Default gradient tolerance in the minimizer.
const int MAXITS
Default maximum number of iterations.