Actual source code: cg_prp.c

  1: /*$Id: cg_prp.c 1.21 05/05/10 15:21:38-05:00 sarich@zorak.(none) $*/

  3: #include "cg.h"                           /*I "tao_solver.h" I*/

  7: static int TaoSolve_CG_PRP(TAO_SOLVER tao, void *solver)
  8: {
  9:   TAO_CG *cg = (TAO_CG *) solver;
 10:   TaoVec*    X,*G; /* solution vector,  gradient vector */
 11:   TaoVec*    Gprev=cg->Gprev;
 12:   TaoVec*    DX=cg->DX, *Work=cg->Work;
 13:   int    iter=0,lsflag=0,info;
 14:   double gnorm2Prev,gdx,gDotgprev,f,gnorm,gnorm2,step=0;
 15:   TaoTerminateReason reason;

 17:   TaoFunctionBegin;
 18:   info=TaoGetSolution(tao,&X);CHKERRQ(info);
 19:   info=TaoGetGradient(tao,&G);CHKERRQ(info);

 21:   info = TaoComputeMeritFunctionGradient(tao,X,&f,G);CHKERRQ(info);
 22:   info = G->Norm2(&gnorm);  CHKERRQ(info);

 24:   info = DX->SetToZero(); CHKERRQ(info);
 25:   info = Gprev->CopyFrom(G); CHKERRQ(info);

 27:   cg->beta=0; cg->restarts=0;
 28:   gnorm2 = gnorm*gnorm; gnorm2Prev = gnorm2;

 30:   /* Enter loop */
 31:   while (1){

 33:     /* Test for convergence */
 34:     info = TaoMonitor(tao,iter++,f,gnorm,0.0,step,&reason);CHKERRQ(info);
 35:     if (reason!=TAO_CONTINUE_ITERATING) break;

 37:     info = G->Dot(Gprev,&gDotgprev); CHKERRQ(info);
 38:     cg->beta=( gnorm2-gDotgprev )/(gnorm2Prev);

 40:     if (cg->beta<0){

 42:       info = PetscLogInfo((tao,"TaoCG: Negative beta.  Restart CG at iterate %d with gradient direction.\n",iter)); CHKERRQ(info);
 43:       cg->restarts++; cg->beta=0.0;
 44:       info = DX->Axpby(-1.0,G,cg->beta); CHKERRQ(info);
 45:       gdx=-gnorm2;

 47:     } else {

 49:       info = DX->Axpby(-1.0,G,cg->beta); CHKERRQ(info);
 50: 
 51:       info = DX->Dot(G,&gdx); CHKERRQ(info);
 52:       if (gdx>=0){ /* If not a descent direction, use gradient */
 53:         info = DX->ScaleCopyFrom(-1.0,G); CHKERRQ(info);
 54:         gdx=-gnorm2;
 55:         cg->restarts++; cg->beta=0.0;
 56:         info = PetscLogInfo((tao,"TaoCG: Not Descent Direction: Restart CG at iterate %d with gradient direction.\n",iter)); CHKERRQ(info);
 57:       }
 58: 
 59:       if ( fabs(gDotgprev)/(gnorm2) > cg->eta){
 60:         /* The gradients are somewhat orthogonal */
 61:         cg->beta=0;
 62:         gdx=-gnorm2;
 63:         info = DX->Axpby(-1.0,G,0.0); CHKERRQ(info);
 64:         info = PetscLogInfo((tao,"TaoCG: Restart CG at iterate %d with gradient direction.\n",iter)); CHKERRQ(info);
 65:       }
 66:     }

 68:     /* Line Search */
 69:     gnorm2Prev = gnorm2;  step=1.0;
 70:     info = Gprev->CopyFrom(G); CHKERRQ(info);
 71:     info = TaoLineSearchApply(tao,X,G,DX,Work,&f,&step,&lsflag);CHKERRQ(info);
 72:     info = G->Norm2(&gnorm);CHKERRQ(info);
 73:     gnorm2 = gnorm*gnorm;
 74:   }
 75: 
 76:   TaoFunctionReturn(0);
 77: }

 79: /* ---------------------------------------------------------- */
 80: /*
 81:     TAOCreate_CG_PRP - Creates the data structure for the nonlinear CG method
 82:     and sets the function pointers for all the routines it needs to call
 83:     (TAOSolve_CG_PRP() etc.)

 86: */
 90: int TaoCreate_CG_PRP(TAO_SOLVER tao)
 91: {
 92:   TAO_CG *cg;
 93:   int    info;

 95:   TaoFunctionBegin;

 97:   info = TaoNew(TAO_CG,&cg); CHKERRQ(info);
 98:   info = PetscLogObjectMemory(tao,sizeof(TAO_CG)); CHKERRQ(info);

100:   info=TaoSetTaoSolveRoutine(tao,TaoSolve_CG_PRP,(void*)cg); CHKERRQ(info);
101:   info=TaoSetTaoSetUpDownRoutines(tao,TaoSetUp_CG,TaoDestroy_CG); CHKERRQ(info);
102:   info=TaoSetTaoOptionsRoutine(tao,TaoSetOptions_CG); CHKERRQ(info);
103:   info=TaoSetTaoViewRoutine(tao,TaoView_CG); CHKERRQ(info);

105:   info = TaoSetMaximumIterates(tao,2000); CHKERRQ(info);
106:   info = TaoSetTolerances(tao,1e-4,1e-4,0,0); CHKERRQ(info);
107:   info = TaoSetMaximumFunctionEvaluations(tao,4000); CHKERRQ(info);

109:   info = TaoCreateMoreThuenteLineSearch(tao,0,0.4); CHKERRQ(info);

111:   cg->eta = 0.5;

113:   TaoFunctionReturn(0);
114: }