Actual source code: projectedarmijo.c

  1: #include "projectedarmijo.h"

  3: #define REPLACE_FIFO 1
  4: #define REPLACE_MRU  2

  6: #define REFERENCE_MAX 1
  7: #define REFERENCE_AVE 2
  8: #define REFERENCE_MEAN 3

 10: static int TaoSetOptions_ProjectedArmijo(TAO_SOLVER,void*);
 11: static int TaoView_ProjectedArmijo(TAO_SOLVER,void*);
 12: static int TaoApply_ProjectedArmijo(TAO_SOLVER,TaoVec*,TaoVec*,TaoVec*,TaoVec*,
 13:                            double*,double*,int*,void*);
 14: static int TaoDestroy_ProjectedArmijo(TAO_SOLVER,void*);

 16: /* ---------------------------------------------------------- */
 19: static int TaoDestroy_ProjectedArmijo(TAO_SOLVER tao, void*ctx)
 20: {
 21:   TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
 22:   int info;

 24:   TaoFunctionBegin;
 25:   if (ls->work != TAO_NULL) {
 26:     delete ls->work;
 27:   }
 28:   if (ls->GG != TAO_NULL) {
 29:     delete ls->GG;
 30:   }

 32:   if (ls->memory != TAO_NULL) {
 33:     info = TaoFree(ls->memory); CHKERRQ(info);
 34:     ls->memory = TAO_NULL;
 35:   }
 36:   info = TaoFree(ls); CHKERRQ(info);
 37:   TaoFunctionReturn(0);
 38: }

 40: /*------------------------------------------------------------*/
 43: static int TaoSetOptions_ProjectedArmijo(TAO_SOLVER tao, void*ctx)
 44: {
 45:   TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
 46:   int info;

 48:   TaoFunctionBegin;
 49:   info = TaoOptionsHead("Projected Armijo linesearch options");CHKERRQ(info);
 50:   info = TaoOptionDouble("-tao_projected_armijo_alpha", "initial reference constant", "", ls->alpha, &ls->alpha, 0); CHKERRQ(info);
 51:   info = TaoOptionDouble("-tao_projected_armijo_beta", "decrease constant", "", ls->beta, &ls->beta, 0); CHKERRQ(info);
 52:   info = TaoOptionDouble("-tao_projected_armijo_sigma", "acceptance constant", "", ls->sigma, &ls->sigma, 0); CHKERRQ(info);
 53:   info = TaoOptionInt("-tao_projected_armijo_memory_size", "number of historical elements", "", ls->memorySize, &ls->memorySize, 0); CHKERRQ(info);
 54:   info = TaoOptionDouble("-tao_projected_armijo_minimum_step", "minimum acceptable step", "", ls->minimumStep, &ls->minimumStep, 0); CHKERRQ(info);
 55:   info = TaoOptionInt("-tao_projected_armijo_reference_policy", "policy for updating reference value", "", ls->referencePolicy, &ls->referencePolicy, 0); CHKERRQ(info);
 56:   info = TaoOptionInt("-tao_projected_armijo_replacement_policy", "policy for updating memory", "", ls->replacementPolicy, &ls->replacementPolicy, 0); CHKERRQ(info);
 57:   info = TaoOptionsTail();CHKERRQ(info);
 58:   TaoFunctionReturn(0);
 59: }
 60: /*------------------------------------------------------------*/

 64: static int TaoView_ProjectedArmijo(TAO_SOLVER tao, void *ctx)
 65: {
 66:   TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
 67:   int info;

 69:   TaoFunctionBegin;

 71:   info=TaoPrintDouble(tao,"  Projected Armijo linesearch: alpha=%g",ls->alpha);CHKERRQ(info);
 72:   info=TaoPrintDouble(tao," beta=%g ",ls->beta);CHKERRQ(info);
 73:   info=TaoPrintDouble(tao,"sigma=%g ",ls->sigma);CHKERRQ(info);
 74:   info=TaoPrintDouble(tao,"minstep=%g,",ls->minimumStep);CHKERRQ(info);
 75:   info=TaoPrintInt(tao,"memsize=%d\n",ls->memorySize);CHKERRQ(info);

 77:   TaoFunctionReturn(0);
 78: }

 82: static int TaoApply_PreProjectedArmijo(TAO_SOLVER tao, TAO_PROJECTEDARMIJO *ls,
 83:                                        double f, double step,
 84:                                        double *ref, int *idx, int *info2)
 85: {
 86:   int info, i;

 88:   TaoFunctionBegin;

 90:   *info2 = 0;

 92:   // Check linesearch parameters
 93:   if (step < 0) {
 94:     info = PetscLogInfo((tao, "TaoApply_ProjectedArmijo:Line search error: step (%g) < 0\n", step)); CHKERRQ(info);
 95:     *info2 = -1;
 96:     TaoFunctionReturn(0);
 97:   } else if (ls->alpha < 1) {
 98:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: alpha (%g) < 1\n", ls->alpha)); CHKERRQ(info);
 99:     *info2 = -2;
100:     TaoFunctionReturn(0);
101:   } else if ((ls->beta <= 0) || (ls->beta >= 1)) {
102:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: beta (%g) invalid\n", ls->beta)); CHKERRQ(info);
103:     *info2 = -3;
104:     TaoFunctionReturn(0);
105:   } else if ((ls->sigma <= 0) || (ls->sigma >= 0.5)) {
106:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: sigma (%g) invalid\n", ls->sigma)); CHKERRQ(info);
107:     *info2 = -4;
108:     TaoFunctionReturn(0);
109:   } else if (ls->minimumStep <= 0) {
110:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: minimum_step (%g) <= 0\n", ls->minimumStep)); CHKERRQ(info);
111:     *info2 = -5;
112:     TaoFunctionReturn(0);
113:   } else if (ls->memorySize < 1) {
114:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: memory_size (%d) < 1\n", ls->memorySize)); CHKERRQ(info);
115:     *info2 = -6;
116:     TaoFunctionReturn(0);
117:   } else if ((ls->referencePolicy != REFERENCE_MAX) &&
118:              (ls->referencePolicy != REFERENCE_AVE) &&
119:              (ls->referencePolicy != REFERENCE_MEAN)){
120:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: reference_policy invalid\n")); CHKERRQ(info);
121:     *info2 = -7;
122:     TaoFunctionReturn(0);
123:   } else if ((ls->replacementPolicy != REPLACE_FIFO) &&
124:              (ls->replacementPolicy != REPLACE_MRU)) {
125:     info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:Line search error: replacement_policy invalid\n")); CHKERRQ(info);
126:     *info2 = -8;
127:     TaoFunctionReturn(0);
128:   }

130:   // Check to see of the memory has been allocated.  If not, allocate
131:   // the historical array and populate it with the initial function
132:   // values.

134:   if (ls->memory == TAO_NULL) {
135:     info = TaoMalloc(sizeof(double)*ls->memorySize, &ls->memory);CHKERRQ(info);
136:     info = PetscLogObjectMemory(tao, sizeof(double)*ls->memorySize); CHKERRQ(info);
137:   }

139:   if (tao->iter == 0) {
140:     for (i = 0; i < ls->memorySize; i++) {
141:       ls->memory[i] = ls->alpha*(f);
142:     }

144:     ls->current = 0;
145:     ls->lastReference = ls->memory[0];
146:   }

148:   // Calculate reference value (MAX)
149:   *ref = ls->memory[0];
150:   *idx = 0;

152:   for (i = 1; i < ls->memorySize; i++) {
153:     if (ls->memory[i] > *ref) {
154:       *ref = ls->memory[i];
155:       *idx = i;
156:     }
157:   }

159:   if (ls->referencePolicy == REFERENCE_AVE) {
160:     *ref = 0;
161:     for (i = 0; i < ls->memorySize; i++) {
162:       *ref += ls->memory[i];
163:     }
164:     *ref = *ref / ls->memorySize;
165:     *ref = TaoMax(*ref, ls->memory[ls->current]);
166:   } else if (ls->referencePolicy == REFERENCE_MEAN) {
167:     *ref = TaoMin(*ref, 0.5*(ls->lastReference + ls->memory[ls->current]));
168:   }

170:   TaoFunctionReturn(0);
171: }

175: static int TaoApply_PostProjectedArmijo(TAO_SOLVER tao, TAO_PROJECTEDARMIJO *ls,
176:                                         double f, double step,
177:                                         double ref, int idx, int *info2)
178: {
179:   int info;
180:   TaoFunctionBegin;

182:   *info2 = 0;

184:   // Check termination
185:   if (step < ls->minimumStep) {
186:     info = PetscLogInfo((tao, "TaoApply_ProjectedArmijo:Step is at lower bound.\n")); CHKERRQ(info);
187:     *info2 = 1;
188:     TaoFunctionReturn(0);
189:   }

191:   // Successful termination, update memory
192:   ls->lastReference = ref;
193:   if (ls->replacementPolicy == REPLACE_FIFO) {
194:     ls->memory[ls->current++] = f;
195:     if (ls->current >= ls->memorySize) {
196:       ls->current = 0;
197:     }
198:   } else {
199:     ls->current = idx;
200:     ls->memory[idx] = f;
201:   }
202:   TaoFunctionReturn(0);
203: }

205: /* ---------------------------------------------------------- */
208: /* @ TaoApply_ProjectedArmijo - This routine performs a linesearch. It
209:    backtracks until the (nonmonotone) Projected Armijo conditions are satisfied.

211:    Input Parameters:
212: +  tao - TAO_SOLVER context
213: .  X - current iterate (on output X contains new iterate, X + step*S)
214: .  S - search direction
215: .  f - merit function evaluated at X
216: .  G - gradient of merit function evaluated at X
217: .  W - work vector
218: -  step - initial estimate of step length

220:    Output parameters:
221: +  f - merit function evaluated at new iterate, X + step*S
222: .  G - gradient of merit function evaluated at new iterate, X + step*S
223: .  X - new iterate
224: -  step - final step length

226:    Info is set to one of:
227: .   0 - the line search succeeds; the sufficient decrease
228:    condition and the directional derivative condition hold

230:    negative number if an input parameter is invalid
231: -   -1 -  step < 0 

233:    positive number > 1 if the line search otherwise terminates
234: +    1 -  Step is at the lower bound, stepmin.
235: @ */

237: static int TaoApply_ProjectedArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G, 
238:                                     TaoVec *S, TaoVec *W, 
239:                                     double *f, double *step,
240:                                     int *info2, void *ctx)
241: {
242:   TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
243:   TaoVec *GG, *L, *U, *work;
244:   double ref, innerd, t;
245:   int idx, info;
246:   TaoTruth flag;

248:   TaoFunctionBegin;

250:   info = TaoApply_PreProjectedArmijo(tao, ls, *f, *step, &ref, &idx, info2);
251:   if (*info2) {
252:     TaoFunctionReturn(0);
253:   }

255:   if (ls->work!=TAO_NULL){
256:     info=X->Compatible(ls->work,&flag); CHKERRQ(info);
257:     if (flag==TAO_FALSE){
258:       info=TaoVecDestroy(ls->work); CHKERRQ(info);
259:       ls->work=TAO_NULL;
260:     }
261:   }

263:   if (ls->work == TAO_NULL) {
264:      G->Clone(&(ls->work));
265:   }
266:   if (ls->GG == TAO_NULL) {
267:      G->Clone(&(ls->GG));
268:   }
269:   GG=ls->GG;

271:   info = TaoGetVariableBounds(tao, &L, &U);
272:   work = ls->work;

274:   const double sigma = ls->sigma;
275:   const double beta = ls->beta;

277:   t = *step;
278:   while (t >= ls->minimumStep) {
279:     // Calculate iterate
280:     info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
281:     info = W->PointwiseMaximum(W, L); CHKERRQ(info);
282:     info = W->PointwiseMinimum(W, U); CHKERRQ(info);

284:     info = work->Waxpby(1.0, X, -1.0, W); CHKERRQ(info);
285:     info = work->Dot(G, &innerd); CHKERRQ(info);

287:     if (innerd > 0) {
288:       // Calculate function at new iterate
289:       //      info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
290:       info = TaoComputeMeritFunctionGradient(tao, W, f, GG); CHKERRQ(info);

292:       // Check descent condition
293:       if (*f <= ref - sigma*innerd) {
294:         break;
295:       }
296:     }

298:     t *= beta;
299:   }

301:   info = TaoApply_PostProjectedArmijo(tao, ls, *f, t, ref, idx, info2);

303:   // Update iterate and compute gradient
304:   *step = t;
305:   info = G->CopyFrom(GG); CHKERRQ(info);
306:   info = X->CopyFrom(W); CHKERRQ(info);
307:   //info = TaoComputeMeritFunctionGradient(tao, X, f, G); CHKERRQ(info);

309:   // Finish computations
310:   info = PetscLogInfo((tao,"TaoApply_ProjectedArmijo:step = %10.4f\n",*step)); CHKERRQ(info);
311:   TaoFunctionReturn(0);
312: }

314: /* ---------------------------------------------------------- */
317: /* @ TaoApply_NDProjectedArmijo - This routine performs a linesearch. It
318:    backtracks until the (nonmonotone) Projected Armijo conditions are 
319:    satisfied.  This is a modified version for a nondifferentiable function.

321:    Input Parameters:
322: +  tao - TAO_SOLVER context
323: .  X - current iterate (on output X contains new iterate, X + step*S)
324: .  S - search direction
325: .  f - merit function evaluated at X
326: -  step - initial estimate of step length

328:    Output parameters:
329: +  f - merit function evaluated at new iterate, X + step*S
330: .  X - new iterate
331: -  step - final step length

333:    Info is set to one of:
334: .   0 - the line search succeeds; the sufficient decrease
335:    condition and the directional derivative condition hold

337:    negative number if an input parameter is invalid
338: -   -1 -  step < 0 

340:    positive number > 1 if the line search otherwise terminates
341: +    1 -  Step is at the lower bound, stepmin.
342: @ */

344: static int TaoApply_NDProjectedArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G, 
345:                                       TaoVec *S, TaoVec *W, 
346:                                       double *f, double *step,
347:                                       int *info2, void *ctx)
348: {
349:   TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
350:   TaoVec *L, *U;
351:   double ref, t;
352:   int idx, info;

354:   TaoFunctionBegin;

356:   info = TaoApply_PreProjectedArmijo(tao, ls, *f, *step, &ref, &idx, info2);
357:   if (*info2) {
358:     TaoFunctionReturn(0);
359:   }

361:   info = TaoGetVariableBounds(tao, &L, &U);

363:   const double sigma = ls->sigma;
364:   const double beta = ls->beta;

366:   t = *step;
367:   while (t >= ls->minimumStep) {
368:     // Calculate iterate
369:     info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
370:     info = W->PointwiseMaximum(W, L); CHKERRQ(info);
371:     info = W->PointwiseMinimum(W, U); CHKERRQ(info);

373:     // Calculate function at new iterate
374:     //    info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
375:     info = TaoComputeMeritFunctionGradient(tao, W, f, G); CHKERRQ(info);

377:     // Check descent condition
378:     if (*f <= (1 - sigma*t)*ref) {
379:         break;
380:     }
381: 
382:     t *= beta;
383:   }

385:   info = TaoApply_PostProjectedArmijo(tao, ls, *f, t, ref, idx, info2);

387:   // Update iterate and compute gradient
388:   *step = t;
389:   info = X->CopyFrom(W); CHKERRQ(info);
390:   //    info = TaoComputeMeritFunctionGradient(tao, X, f, G); CHKERRQ(info);

392:   // Finish computations
393:   info = PetscLogInfo((tao,"TaoApply_NDProjectedArmijo:step = %10.4f\n",*step)); CHKERRQ(info);
394:   TaoFunctionReturn(0);
395: }

397: /* ---------------------------------------------------------- */
400: /*@
401:    TaoCreateProjectedArmijoLineSearch - Create a non-monotone projected linesearch

403:    Input Parameters:
404: .  tao - TAO_SOLVER context


407:    Note:
408:    This algorithm is taken from the following references -- 

410:    Armijo, "Minimization of Functions Having Lipschitz Continuous
411:      First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
412:      pages 1-3, 1966.
413:    Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
414:      Equations," Journal of Optimization Theory and Applications, volume 81,
415:      pages 53-71, 1994.
416:    Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
417:      for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
418:      pages 707-716, 1986.
419:    Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
420:      Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
421:      pages 779-805, 1991.

423:    Note:
424:    This line seach enforces non-monotone Armijo descent conditions for
425:    bounds constrained optimization.  This routine is used within the 
426:    following TAO solvers: feasible semismooth with linesearch (tao_ssfls).

428:    Level: developer

430: .keywords: TAO_SOLVER, linesearch
431: @*/
432: int TaoCreateProjectedArmijoLineSearch(TAO_SOLVER tao)
433: {
434:   TAO_PROJECTEDARMIJO *ls;
435:   int info;

437:   TaoFunctionBegin;

439:   info = TaoNew(TAO_PROJECTEDARMIJO,&ls); CHKERRQ(info);
440:   info = PetscLogObjectMemory(tao,sizeof(TAO_PROJECTEDARMIJO)); CHKERRQ(info);

442:   ls->GG = TAO_NULL;
443:   ls->work = TAO_NULL;
444:   ls->memory = TAO_NULL;
445:   ls->alpha = 1.0;
446:   ls->beta = 0.5;
447:   ls->sigma = 1e-4;
448:   ls->minimumStep = TAO_EPSILON;
449:   ls->memorySize = 1;
450:   ls->referencePolicy = REFERENCE_MAX;
451:   ls->replacementPolicy = REPLACE_MRU;

453:   info = TaoSetLineSearch(tao,0,
454:                           TaoSetOptions_ProjectedArmijo,
455:                           TaoApply_ProjectedArmijo,
456:                           TaoView_ProjectedArmijo,
457:                           TaoDestroy_ProjectedArmijo,
458:                           (void *) ls);CHKERRQ(info);

460:   TaoFunctionReturn(0);
461: }

463: /* ---------------------------------------------------------- */
466: /*@
467:    TaoCreateNDProjectedArmijoLineSearch - Create a non-monotone projected linesearch
468:      for a nondifferentiable function

470:    Input Parameters:
471: .  tao - TAO_SOLVER context


474:    Note:
475:    This algorithm is taken from the following references -- 

477:    Armijo, "Minimization of Functions Having Lipschitz Continuous
478:      First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
479:      pages 1-3, 1966.
480:    Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
481:      Equations," Journal of Optimization Theory and Applications, volume 81,
482:      pages 53-71, 1994.
483:    Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
484:      for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
485:      pages 707-716, 1986.
486:    Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
487:      Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
488:      pages 779-805, 1991.

490:    Note:
491:    This line seach enforces non-monotone Armijo descent conditions for
492:    bounds constrained optimization.  This routine is used within the 
493:    following TAO solvers: feasible semismooth with linesearch (tao_ssfls).

495:    Level: developer

497: .keywords: TAO_SOLVER, linesearch
498: @*/
499: int TaoCreateNDProjectedArmijoLineSearch(TAO_SOLVER tao)
500: {
501:   TAO_PROJECTEDARMIJO *ls;
502:   int info;

504:   TaoFunctionBegin;

506:   info = TaoNew(TAO_PROJECTEDARMIJO,&ls); CHKERRQ(info);
507:   info = PetscLogObjectMemory(tao,sizeof(TAO_PROJECTEDARMIJO));CHKERRQ(info);

509:   ls->work = TAO_NULL;
510:   ls->memory = TAO_NULL;
511:   ls->alpha = 1.0;
512:   ls->beta = 0.5;
513:   ls->sigma = 1e-4;
514:   ls->minimumStep = TAO_EPSILON;
515:   ls->memorySize = 1;
516:   ls->referencePolicy = REFERENCE_MAX;
517:   ls->replacementPolicy = REPLACE_MRU;

519:   info = TaoSetLineSearch(tao,0,
520:                           TaoSetOptions_ProjectedArmijo,
521:                           TaoApply_NDProjectedArmijo,
522:                           TaoView_ProjectedArmijo,
523:                           TaoDestroy_ProjectedArmijo,
524:                           (void *) ls);CHKERRQ(info);

526:   TaoFunctionReturn(0);
527: }