ROL
ROL_TypeG_AugmentedLagrangianAlgorithm_Def.hpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Rapid Optimization Library (ROL) Package
4//
5// Copyright 2014 NTESS and the ROL contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef ROL_TYPEG_AUGMENTEDLAGRANGIANALGORITHM_DEF_H
11#define ROL_TYPEG_AUGMENTEDLAGRANGIANALGORITHM_DEF_H
12
14
15namespace ROL {
16namespace TypeG {
17
18template<typename Real>
20 : TypeG::Algorithm<Real>::Algorithm(), secant_(secant), list_(list), subproblemIter_(0) {
21 // Set status test
22 status_->reset();
23 status_->add(makePtr<ConstraintStatusTest<Real>>(list));
24
25 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
26 ParameterList& sublist = list.sublist("Step").sublist("Augmented Lagrangian");
27 useDefaultInitPen_ = sublist.get("Use Default Initial Penalty Parameter", true);
28 state_->searchSize = sublist.get("Initial Penalty Parameter", ten);
29 // Multiplier update parameters
30 scaleLagrangian_ = sublist.get("Use Scaled Augmented Lagrangian", false);
31 minPenaltyLowerBound_ = sublist.get("Penalty Parameter Reciprocal Lower Bound", p1);
32 penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", ten);
33 maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
35 // Optimality tolerance update
36 optIncreaseExponent_ = sublist.get("Optimality Tolerance Update Exponent", one);
37 optDecreaseExponent_ = sublist.get("Optimality Tolerance Decrease Exponent", one);
38 optToleranceInitial_ = sublist.get("Initial Optimality Tolerance", one);
39 // Feasibility tolerance update
40 feasIncreaseExponent_ = sublist.get("Feasibility Tolerance Update Exponent", p1);
41 feasDecreaseExponent_ = sublist.get("Feasibility Tolerance Decrease Exponent", p9);
42 feasToleranceInitial_ = sublist.get("Initial Feasibility Tolerance", one);
43 // Subproblem information
44 print_ = sublist.get("Print Intermediate Optimization History", false);
45 maxit_ = sublist.get("Subproblem Iteration Limit", 1000);
46 subStep_ = sublist.get("Subproblem Step Type", "Trust Region");
47 HessianApprox_ = sublist.get("Level of Hessian Approximation", 0);
48 list_.sublist("Step").set("Type",subStep_);
49 list_.sublist("Status Test").set("Iteration Limit",maxit_);
50 list_.sublist("Status Test").set("Use Relative Tolerances",false);
51 // Verbosity setting
52 verbosity_ = list.sublist("General").get("Output Level", 0);
54 print_ = (verbosity_ > 2 ? true : print_);
55 list_.sublist("General").set("Output Level",(print_ ? verbosity_ : 0));
56 // Outer iteration tolerances
57 outerFeasTolerance_ = list.sublist("Status Test").get("Constraint Tolerance", oem8);
58 outerOptTolerance_ = list.sublist("Status Test").get("Gradient Tolerance", oem8);
59 outerStepTolerance_ = list.sublist("Status Test").get("Step Tolerance", oem8);
60 useRelTol_ = list.sublist("Status Test").get("Use Relative Tolerances", false);
61 // Scaling
62 useDefaultScaling_ = sublist.get("Use Default Problem Scaling", true);
63 fscale_ = sublist.get("Objective Scaling", one);
64 cscale_ = sublist.get("Constraint Scaling", one);
65}
66
67template<typename Real>
69 const Vector<Real> &g,
70 const Vector<Real> &l,
71 const Vector<Real> &c,
75 std::ostream &outStream ) {
76 hasPolyProj_ = true;
77 if (proj_ == nullPtr) {
78 proj_ = makePtr<PolyhedralProjection<Real>>(makePtrFromRef(bnd));
79 hasPolyProj_ = false;
80 }
81 proj_->project(x,outStream);
82
83 const Real one(1), TOL(1.e-2);
84 Real tol = std::sqrt(ROL_EPSILON<Real>());
86
87 // Initialize the algorithm state
88 state_->nfval = 0;
89 state_->ncval = 0;
90 state_->ngrad = 0;
91
92 // Compute objective value
93 alobj.update(x,UpdateType::Initial,state_->iter);
94 state_->value = alobj.getObjectiveValue(x,tol);
95 alobj.gradient(*state_->gradientVec,x,tol);
96
97 // Compute constraint violation
98 state_->constraintVec->set(*alobj.getConstraintVec(x,tol));
99 state_->cnorm = state_->constraintVec->norm();
100
101 // Update evaluation counters
102 state_->ncval += alobj.getNumberConstraintEvaluations();
103 state_->nfval += alobj.getNumberFunctionEvaluations();
104 state_->ngrad += alobj.getNumberGradientEvaluations();
105
106 // Compute problem scaling
107 if (useDefaultScaling_) {
108 fscale_ = one/std::max(one,alobj.getObjectiveGradient(x,tol)->norm());
109 try {
110 Ptr<Vector<Real>> ji = x.clone();
111 Real maxji(0), normji(0);
112 for (int i = 0; i < c.dimension(); ++i) {
113 con.applyAdjointJacobian(*ji,*c.basis(i),x,tol);
114 normji = ji->norm();
115 maxji = std::max(normji,maxji);
116 }
117 cscale_ = one/std::max(one,maxji);
118 }
119 catch (std::exception &e) {
120 cscale_ = one;
121 }
122 }
124
125 // Compute gradient of the lagrangian
126 x.axpy(-one,state_->gradientVec->dual());
127 proj_->project(x,outStream);
128 x.axpy(-one/std::min(fscale_,cscale_),*state_->iterateVec);
129 state_->gnorm = x.norm();
130 x.set(*state_->iterateVec);
131
132 // Compute initial penalty parameter
133 if (useDefaultInitPen_) {
134 const Real oem8(1e-8), oem2(1e-2), two(2), ten(10);
135 state_->searchSize = std::max(oem8,
136 std::min(ten*std::max(one,std::abs(fscale_*state_->value))
137 / std::max(one,std::pow(cscale_*state_->cnorm,two)),oem2*maxPenaltyParam_));
138 }
139 // Initialize intermediate stopping tolerances
140 if (useRelTol_) outerOptTolerance_ *= state_->gnorm;
141 minPenaltyReciprocal_ = std::min(one/state_->searchSize,minPenaltyLowerBound_);
142 optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
144 optTolerance_ = std::min<Real>(optTolerance_,TOL*state_->gnorm);
145 feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
147
148 // Set data
149 alobj.reset(l,state_->searchSize);
150
151 if (verbosity_ > 1) {
152 outStream << std::endl;
153 outStream << "Augmented Lagrangian Initialize" << std::endl;
154 outStream << "Objective Scaling: " << fscale_ << std::endl;
155 outStream << "Constraint Scaling: " << cscale_ << std::endl;
156 outStream << "Penalty Parameter: " << state_->searchSize << std::endl;
157 outStream << std::endl;
158 }
159}
160
161template<typename Real>
163 const Vector<Real> &g,
164 Objective<Real> &obj,
166 Constraint<Real> &econ,
167 Vector<Real> &emul,
168 const Vector<Real> &eres,
169 std::ostream &outStream ) {
170 const Real one(1), oem2(1e-2);
171 Real tol(std::sqrt(ROL_EPSILON<Real>()));
172 // Initialize augmented Lagrangian data
173 AugmentedLagrangianObjective<Real> alobj(makePtrFromRef(obj),makePtrFromRef(econ),
174 state_->searchSize,g,eres,emul,
176 initialize(x,g,emul,eres,alobj,bnd,econ,outStream);
177 Ptr<TypeB::Algorithm<Real>> algo;
178
179 // Output
180 if (verbosity_ > 0) writeOutput(outStream,true);
181
182 while (status_->check(*state_)) {
183 // Solve unconstrained augmented Lagrangian subproblem
184 list_.sublist("Status Test").set("Gradient Tolerance",optTolerance_);
185 list_.sublist("Status Test").set("Step Tolerance",1.e-6*optTolerance_);
187 if (hasPolyProj_) algo->run(x,g,alobj,bnd,*proj_->getLinearConstraint(),
188 *proj_->getMultiplier(),*proj_->getResidual(),
189 outStream);
190 else algo->run(x,g,alobj,bnd,outStream);
191 subproblemIter_ = algo->getState()->iter;
192
193 // Compute step
194 state_->stepVec->set(x);
195 state_->stepVec->axpy(-one,*state_->iterateVec);
196 state_->snorm = state_->stepVec->norm();
197
198 // Update iteration information
199 state_->iter++;
200 state_->iterateVec->set(x);
201 state_->value = alobj.getObjectiveValue(x,tol);
202 state_->constraintVec->set(*alobj.getConstraintVec(x,tol));
203 state_->cnorm = state_->constraintVec->norm();
204 alobj.gradient(*state_->gradientVec,x,tol);
205 if (scaleLagrangian_) {
206 state_->gradientVec->scale(state_->searchSize);
207 }
208 x.axpy(-one/std::min(fscale_,cscale_),state_->gradientVec->dual());
209 proj_->project(x,outStream);
210 x.axpy(-one,*state_->iterateVec);
211 state_->gnorm = x.norm();
212 x.set(*state_->iterateVec);
213 //alobj.update(x,UpdateType::Accept,state_->iter);
214
215 // Update evaluation counters
216 state_->nfval += alobj.getNumberFunctionEvaluations();
217 state_->ngrad += alobj.getNumberGradientEvaluations();
218 state_->ncval += alobj.getNumberConstraintEvaluations();
219
220 // Update multipliers
221 if ( algo->getState()->statusFlag == EXITSTATUS_CONVERGED ) {
222 minPenaltyReciprocal_ = std::min(one/state_->searchSize,minPenaltyLowerBound_);
223 if ( cscale_*state_->cnorm < feasTolerance_ ) {
224 emul.axpy(state_->searchSize*cscale_,state_->constraintVec->dual());
225 optTolerance_ = std::max(oem2*outerOptTolerance_,
227 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
229 // Update Algorithm State
230 state_->snorm += state_->searchSize*cscale_*state_->cnorm;
231 state_->lagmultVec->set(emul);
232 }
233 else {
234 state_->searchSize = std::min(penaltyUpdate_*state_->searchSize,maxPenaltyParam_);
235 optTolerance_ = std::max(oem2*outerOptTolerance_,
237 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
239 }
240 alobj.reset(emul,state_->searchSize);
241 }
242
243 // Update Output
244 if (verbosity_ > 0) writeOutput(outStream,printHeader_);
245 }
247}
248
249template<typename Real>
251 std::ios_base::fmtflags osFlags(os.flags());
252 if(verbosity_>1) {
253 os << std::string(114,'-') << std::endl;
254 os << "Augmented Lagrangian status output definitions" << std::endl << std::endl;
255 os << " iter - Number of iterates (steps taken)" << std::endl;
256 os << " fval - Objective function value" << std::endl;
257 os << " cnorm - Norm of the constraint violation" << std::endl;
258 os << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
259 os << " snorm - Norm of the step" << std::endl;
260 os << " penalty - Penalty parameter" << std::endl;
261 os << " feasTol - Feasibility tolerance" << std::endl;
262 os << " optTol - Optimality tolerance" << std::endl;
263 os << " #fval - Number of times the objective was computed" << std::endl;
264 os << " #grad - Number of times the gradient was computed" << std::endl;
265 os << " #cval - Number of times the constraint was computed" << std::endl;
266 os << " subIter - Number of iterations to solve subproblem" << std::endl;
267 os << std::string(114,'-') << std::endl;
268 }
269 os << " ";
270 os << std::setw(6) << std::left << "iter";
271 os << std::setw(15) << std::left << "fval";
272 os << std::setw(15) << std::left << "cnorm";
273 os << std::setw(15) << std::left << "gLnorm";
274 os << std::setw(15) << std::left << "snorm";
275 os << std::setw(10) << std::left << "penalty";
276 os << std::setw(10) << std::left << "feasTol";
277 os << std::setw(10) << std::left << "optTol";
278 os << std::setw(8) << std::left << "#fval";
279 os << std::setw(8) << std::left << "#grad";
280 os << std::setw(8) << std::left << "#cval";
281 os << std::setw(8) << std::left << "subIter";
282 os << std::endl;
283 os.flags(osFlags);
284}
285
286template<typename Real>
287void AugmentedLagrangianAlgorithm<Real>::writeName( std::ostream& os ) const {
288 std::ios_base::fmtflags osFlags(os.flags());
289 os << std::endl << "Augmented Lagrangian Solver (Type G, General Constraints)";
290 os << std::endl;
291 os << "Subproblem Solver: " << subStep_ << std::endl;
292 os.flags(osFlags);
293}
294
295template<typename Real>
296void AugmentedLagrangianAlgorithm<Real>::writeOutput( std::ostream& os, const bool print_header ) const {
297 std::ios_base::fmtflags osFlags(os.flags());
298 os << std::scientific << std::setprecision(6);
299 if ( state_->iter == 0 ) writeName(os);
300 if ( print_header ) writeHeader(os);
301 if ( state_->iter == 0 ) {
302 os << " ";
303 os << std::setw(6) << std::left << state_->iter;
304 os << std::setw(15) << std::left << state_->value;
305 os << std::setw(15) << std::left << state_->cnorm;
306 os << std::setw(15) << std::left << state_->gnorm;
307 os << std::setw(15) << std::left << "---";
308 os << std::scientific << std::setprecision(2);
309 os << std::setw(10) << std::left << state_->searchSize;
310 os << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
311 os << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
312 os << std::scientific << std::setprecision(6);
313 os << std::setw(8) << std::left << state_->nfval;
314 os << std::setw(8) << std::left << state_->ngrad;
315 os << std::setw(8) << std::left << state_->ncval;
316 os << std::setw(8) << std::left << "---";
317 os << std::endl;
318 }
319 else {
320 os << " ";
321 os << std::setw(6) << std::left << state_->iter;
322 os << std::setw(15) << std::left << state_->value;
323 os << std::setw(15) << std::left << state_->cnorm;
324 os << std::setw(15) << std::left << state_->gnorm;
325 os << std::setw(15) << std::left << state_->snorm;
326 os << std::scientific << std::setprecision(2);
327 os << std::setw(10) << std::left << state_->searchSize;
328 os << std::setw(10) << std::left << feasTolerance_;
329 os << std::setw(10) << std::left << optTolerance_;
330 os << std::scientific << std::setprecision(6);
331 os << std::setw(8) << std::left << state_->nfval;
332 os << std::setw(8) << std::left << state_->ngrad;
333 os << std::setw(8) << std::left << state_->ncval;
334 os << std::setw(8) << std::left << subproblemIter_;
335 os << std::endl;
336 }
337 os.flags(osFlags);
338}
339
340} // namespace TypeG
341} // namespace ROL
342
343#endif
virtual void initialize(const Vector< Real > &x)
Initialize temporary variables.
Provides the interface to evaluate the augmented Lagrangian.
void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Real getObjectiveValue(const Vector< Real > &x, Real &tol)
void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
void setScaling(const Real fscale=1.0, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x, Real &tol)
const Ptr< const Vector< Real > > getConstraintVec(const Vector< Real > &x, Real &tol)
Provides the interface to apply upper and lower bound constraints.
Provides an interface to check status of optimization algorithms for problems with equality constrain...
Defines the general constraint operator interface.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
Provides the interface to evaluate objective functions.
Provides interface for and implements limited-memory secant operators.
Algorithm()
Constructor, given a step and a status test.
void initialize(const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &mul, const Vector< Real > &c)
virtual void writeExitStatus(std::ostream &os) const
const Ptr< CombinedStatusTest< Real > > status_
Ptr< PolyhedralProjection< Real > > proj_
const Ptr< AlgorithmState< Real > > state_
void writeOutput(std::ostream &os, const bool print_header=false) const override
Print iterate status.
AugmentedLagrangianAlgorithm(ParameterList &list, const Ptr< Secant< Real > > &secant=nullPtr)
void writeName(std::ostream &os) const override
Print step name.
void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, BoundConstraint< Real > &bnd, Constraint< Real > &econ, Vector< Real > &emul, const Vector< Real > &eres, std::ostream &outStream=std::cout) override
Run algorithm on general constrained problems (Type-G). This is the primary Type-G interface.
void initialize(Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &l, const Vector< Real > &c, AugmentedLagrangianObjective< Real > &alobj, BoundConstraint< Real > &bnd, Constraint< Real > &con, std::ostream &outStream=std::cout)
void writeHeader(std::ostream &os) const override
Print iterate header.
Defines the linear algebra or vector space interface.
virtual Real norm() const =0
Returns where .
virtual void set(const Vector &x)
Set where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Ptr< Algorithm< Real > > AlgorithmFactory(ParameterList &parlist, const Ptr< Secant< Real > > &secant=nullPtr)
Real ROL_EPSILON(void)
Platform-dependent machine epsilon.
Definition ROL_Types.hpp:57
@ EXITSTATUS_CONVERGED
Definition ROL_Types.hpp:84