ROL
ROL_TypeE_AugmentedLagrangianAlgorithm_Def.hpp
Go to the documentation of this file.
1// @HEADER
2// *****************************************************************************
3// Rapid Optimization Library (ROL) Package
4//
5// Copyright 2014 NTESS and the ROL contributors.
6// SPDX-License-Identifier: BSD-3-Clause
7// *****************************************************************************
8// @HEADER
9
10#ifndef ROL_TYPEE_AUGMENTEDLAGRANGIANALGORITHM_DEF_H
11#define ROL_TYPEE_AUGMENTEDLAGRANGIANALGORITHM_DEF_H
12
14
15namespace ROL {
16namespace TypeE {
17
18template<typename Real>
20 : TypeE::Algorithm<Real>::Algorithm(), secant_(secant), list_(list), subproblemIter_(0) {
21 // Set status test
22 status_->reset();
23 status_->add(makePtr<ConstraintStatusTest<Real>>(list));
24
25 Real one(1), p1(0.1), p9(0.9), ten(1.e1), oe8(1.e8), oem8(1.e-8);
26 ParameterList& sublist = list.sublist("Step").sublist("Augmented Lagrangian");
27 useDefaultInitPen_ = sublist.get("Use Default Initial Penalty Parameter", true);
28 state_->searchSize = sublist.get("Initial Penalty Parameter", ten);
29 // Multiplier update parameters
30 scaleLagrangian_ = sublist.get("Use Scaled Augmented Lagrangian", false);
31 minPenaltyLowerBound_ = sublist.get("Penalty Parameter Reciprocal Lower Bound", p1);
32 penaltyUpdate_ = sublist.get("Penalty Parameter Growth Factor", ten);
33 maxPenaltyParam_ = sublist.get("Maximum Penalty Parameter", oe8);
35 // Optimality tolerance update
36 optIncreaseExponent_ = sublist.get("Optimality Tolerance Update Exponent", one);
37 optDecreaseExponent_ = sublist.get("Optimality Tolerance Decrease Exponent", one);
38 optToleranceInitial_ = sublist.get("Initial Optimality Tolerance", one);
39 // Feasibility tolerance update
40 feasIncreaseExponent_ = sublist.get("Feasibility Tolerance Update Exponent", p1);
41 feasDecreaseExponent_ = sublist.get("Feasibility Tolerance Decrease Exponent", p9);
42 feasToleranceInitial_ = sublist.get("Initial Feasibility Tolerance", one);
43 // Subproblem information
44 print_ = sublist.get("Print Intermediate Optimization History", false);
45 maxit_ = sublist.get("Subproblem Iteration Limit", 1000);
46 subStep_ = sublist.get("Subproblem Step Type", "Trust Region");
47 HessianApprox_ = sublist.get("Level of Hessian Approximation", 0);
48 list_.sublist("Step").set("Type",subStep_);
49 list_.sublist("Status Test").set("Iteration Limit",maxit_);
50 list_.sublist("Status Test").set("Use Relative Tolerances",false);
51 // Verbosity setting
52 verbosity_ = list.sublist("General").get("Output Level", 0);
54 print_ = (verbosity_ > 2 ? true : print_);
55 list_.sublist("General").set("Output Level",(print_ ? verbosity_ : 0));
56 // Outer iteration tolerances
57 outerFeasTolerance_ = list.sublist("Status Test").get("Constraint Tolerance", oem8);
58 outerOptTolerance_ = list.sublist("Status Test").get("Gradient Tolerance", oem8);
59 outerStepTolerance_ = list.sublist("Status Test").get("Step Tolerance", oem8);
60 useRelTol_ = list.sublist("Status Test").get("Use Relative Tolerances", false);
61 // Scaling
62 useDefaultScaling_ = sublist.get("Use Default Problem Scaling", true);
63 fscale_ = sublist.get("Objective Scaling", one);
64 cscale_ = sublist.get("Constraint Scaling", one);
65}
66
67template<typename Real>
69 const Vector<Real> &g,
70 const Vector<Real> &l,
71 const Vector<Real> &c,
74 std::ostream &outStream ) {
75 const Real one(1), TOL(1.e-2);
76 Real tol = std::sqrt(ROL_EPSILON<Real>());
78
79 // Initialize the algorithm state
80 state_->nfval = 0;
81 state_->ncval = 0;
82 state_->ngrad = 0;
83
84 // Compute objective value
85 alobj.update(x,UpdateType::Initial,state_->iter);
86 state_->value = alobj.getObjectiveValue(x,tol);
87 alobj.gradient(*state_->gradientVec,x,tol);
88
89 // Compute constraint violation
90 state_->constraintVec->set(*alobj.getConstraintVec(x,tol));
91 state_->cnorm = state_->constraintVec->norm();
92
93 // Update evaluation counters
94 state_->ncval += alobj.getNumberConstraintEvaluations();
95 state_->nfval += alobj.getNumberFunctionEvaluations();
96 state_->ngrad += alobj.getNumberGradientEvaluations();
97
98 // Compute problem scaling
100 fscale_ = one/std::max(one,alobj.getObjectiveGradient(x,tol)->norm());
101 try {
102 Ptr<Vector<Real>> ji = x.clone();
103 Real maxji(0), normji(0);
104 for (int i = 0; i < c.dimension(); ++i) {
105 con.applyAdjointJacobian(*ji,*c.basis(i),x,tol);
106 normji = ji->norm();
107 maxji = std::max(normji,maxji);
108 }
109 cscale_ = one/std::max(one,maxji);
110 }
111 catch (std::exception &e) {
112 cscale_ = one;
113 }
114 }
116
117 // Compute gradient of the lagrangian
118 state_->gnorm = state_->gradientVec->norm()/std::min(fscale_,cscale_);
119
120 // Compute initial penalty parameter
121 if (useRelTol_) outerOptTolerance_ *= state_->gnorm;
122 if (useDefaultInitPen_) {
123 const Real oem8(1e-8), oem2(1e-2), two(2), ten(10);
124 state_->searchSize = std::max(oem8,
125 std::min(ten*std::max(one,std::abs(fscale_*state_->value))
126 / std::max(one,std::pow(cscale_*state_->cnorm,two)),oem2*maxPenaltyParam_));
127 }
128 // Initialize intermediate stopping tolerances
129 minPenaltyReciprocal_ = std::min(one/state_->searchSize,minPenaltyLowerBound_);
130 optTolerance_ = std::max<Real>(TOL*outerOptTolerance_,
132 optTolerance_ = std::min<Real>(optTolerance_,TOL*state_->gnorm);
133 feasTolerance_ = std::max<Real>(TOL*outerFeasTolerance_,
135
136 // Set data
137 alobj.reset(l,state_->searchSize);
138
139 if (verbosity_ > 1) {
140 outStream << std::endl;
141 outStream << "Augmented Lagrangian Initialize" << std::endl;
142 outStream << "Objective Scaling: " << fscale_ << std::endl;
143 outStream << "Constraint Scaling: " << cscale_ << std::endl;
144 outStream << std::endl;
145 }
146}
147
148template<typename Real>
150 const Vector<Real> &g,
151 Objective<Real> &obj,
152 Constraint<Real> &econ,
153 Vector<Real> &emul,
154 const Vector<Real> &eres,
155 std::ostream &outStream ) {
156 const Real one(1), oem2(1e-2);
157 Real tol(std::sqrt(ROL_EPSILON<Real>()));
158 // Initialize augmented Lagrangian data
159 AugmentedLagrangianObjective<Real> alobj(makePtrFromRef(obj),makePtrFromRef(econ),
160 state_->searchSize,g,eres,emul,
162 initialize(x,g,emul,eres,alobj,econ,outStream);
163 Ptr<TypeU::Algorithm<Real>> algo;
164
165 // Output
166 if (verbosity_ > 0) writeOutput(outStream,true);
167
168 while (status_->check(*state_)) {
169 // Solve unconstrained augmented Lagrangian subproblem
170 list_.sublist("Status Test").set("Gradient Tolerance",optTolerance_);
171 list_.sublist("Status Test").set("Step Tolerance",1.e-6*optTolerance_);
173 algo->run(x,g,alobj,outStream);
174 subproblemIter_ = algo->getState()->iter;
175
176 // Compute step
177 state_->stepVec->set(x);
178 state_->stepVec->axpy(-one,*state_->iterateVec);
179 state_->snorm = state_->stepVec->norm();
180
181 // Update iteration information
182 state_->iter++;
183 state_->iterateVec->set(x);
184 state_->value = alobj.getObjectiveValue(x,tol);
185 state_->constraintVec->set(*alobj.getConstraintVec(x,tol));
186 state_->cnorm = state_->constraintVec->norm();
187 alobj.gradient(*state_->gradientVec,x,tol);
188 if (scaleLagrangian_) {
189 state_->gradientVec->scale(state_->searchSize);
190 }
191 state_->gnorm = state_->gradientVec->norm()/std::min(fscale_,cscale_);
192 //alobj.update(x,UpdateType::Accept,state_->iter);
193
194 // Update evaluation counters
195 state_->nfval += alobj.getNumberFunctionEvaluations();
196 state_->ngrad += alobj.getNumberGradientEvaluations();
197 state_->ncval += alobj.getNumberConstraintEvaluations();
198
199 // Update multipliers
200 minPenaltyReciprocal_ = std::min(one/state_->searchSize,minPenaltyLowerBound_);
201 if ( cscale_*state_->cnorm < feasTolerance_ ) {
202 emul.axpy(state_->searchSize*cscale_,state_->constraintVec->dual());
203 if ( algo->getState()->statusFlag == EXITSTATUS_CONVERGED ) {
204 optTolerance_ = std::max(oem2*outerOptTolerance_,
206 }
207 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
209 // Update Algorithm State
210 state_->snorm += state_->searchSize*cscale_*state_->cnorm;
211 state_->lagmultVec->set(emul);
212 }
213 else {
214 state_->searchSize = std::min(penaltyUpdate_*state_->searchSize,maxPenaltyParam_);
215 optTolerance_ = std::max(oem2*outerOptTolerance_,
217 feasTolerance_ = std::max(oem2*outerFeasTolerance_,
219 }
220 alobj.reset(emul,state_->searchSize);
221
222 // Update Output
223 if (verbosity_ > 0) writeOutput(outStream,printHeader_);
224 }
225 emul.scale(cscale_);
227}
228
229template<typename Real>
231 std::ios_base::fmtflags osFlags(os.flags());
232 if(verbosity_>1) {
233 os << std::string(114,'-') << std::endl;
234 os << "Augmented Lagrangian status output definitions" << std::endl << std::endl;
235 os << " iter - Number of iterates (steps taken)" << std::endl;
236 os << " fval - Objective function value" << std::endl;
237 os << " cnorm - Norm of the constraint violation" << std::endl;
238 os << " gLnorm - Norm of the gradient of the Lagrangian" << std::endl;
239 os << " snorm - Norm of the step" << std::endl;
240 os << " penalty - Penalty parameter" << std::endl;
241 os << " feasTol - Feasibility tolerance" << std::endl;
242 os << " optTol - Optimality tolerance" << std::endl;
243 os << " #fval - Number of times the objective was computed" << std::endl;
244 os << " #grad - Number of times the gradient was computed" << std::endl;
245 os << " #cval - Number of times the constraint was computed" << std::endl;
246 os << " subIter - Number of iterations to solve subproblem" << std::endl;
247 os << std::string(114,'-') << std::endl;
248 }
249 os << " ";
250 os << std::setw(6) << std::left << "iter";
251 os << std::setw(15) << std::left << "fval";
252 os << std::setw(15) << std::left << "cnorm";
253 os << std::setw(15) << std::left << "gLnorm";
254 os << std::setw(15) << std::left << "snorm";
255 os << std::setw(10) << std::left << "penalty";
256 os << std::setw(10) << std::left << "feasTol";
257 os << std::setw(10) << std::left << "optTol";
258 os << std::setw(8) << std::left << "#fval";
259 os << std::setw(8) << std::left << "#grad";
260 os << std::setw(8) << std::left << "#cval";
261 os << std::setw(8) << std::left << "subIter";
262 os << std::endl;
263 os.flags(osFlags);
264}
265
266template<typename Real>
267void AugmentedLagrangianAlgorithm<Real>::writeName( std::ostream& os ) const {
268 std::ios_base::fmtflags osFlags(os.flags());
269 os << std::endl << "Augmented Lagrangian Solver (Type E, Equality Constraints)";
270 os << std::endl;
271 os << "Subproblem Solver: " << subStep_ << std::endl;
272 os.flags(osFlags);
273}
274
275template<typename Real>
276void AugmentedLagrangianAlgorithm<Real>::writeOutput( std::ostream& os, const bool print_header ) const {
277 std::ios_base::fmtflags osFlags(os.flags());
278 os << std::scientific << std::setprecision(6);
279 if ( state_->iter == 0 ) writeName(os);
280 if ( print_header ) writeHeader(os);
281 if ( state_->iter == 0 ) {
282 os << " ";
283 os << std::setw(6) << std::left << state_->iter;
284 os << std::setw(15) << std::left << state_->value;
285 os << std::setw(15) << std::left << state_->cnorm;
286 os << std::setw(15) << std::left << state_->gnorm;
287 os << std::setw(15) << std::left << "---";
288 os << std::scientific << std::setprecision(2);
289 os << std::setw(10) << std::left << state_->searchSize;
290 os << std::setw(10) << std::left << std::max(feasTolerance_,outerFeasTolerance_);
291 os << std::setw(10) << std::left << std::max(optTolerance_,outerOptTolerance_);
292 os << std::scientific << std::setprecision(6);
293 os << std::setw(8) << std::left << state_->nfval;
294 os << std::setw(8) << std::left << state_->ngrad;
295 os << std::setw(8) << std::left << state_->ncval;
296 os << std::setw(8) << std::left << "---";
297 os << std::endl;
298 }
299 else {
300 os << " ";
301 os << std::setw(6) << std::left << state_->iter;
302 os << std::setw(15) << std::left << state_->value;
303 os << std::setw(15) << std::left << state_->cnorm;
304 os << std::setw(15) << std::left << state_->gnorm;
305 os << std::setw(15) << std::left << state_->snorm;
306 os << std::scientific << std::setprecision(2);
307 os << std::setw(10) << std::left << state_->searchSize;
308 os << std::setw(10) << std::left << feasTolerance_;
309 os << std::setw(10) << std::left << optTolerance_;
310 os << std::scientific << std::setprecision(6);
311 os << std::setw(8) << std::left << state_->nfval;
312 os << std::setw(8) << std::left << state_->ngrad;
313 os << std::setw(8) << std::left << state_->ncval;
314 os << std::setw(8) << std::left << subproblemIter_;
315 os << std::endl;
316 }
317 os.flags(osFlags);
318}
319
320} // namespace TypeE
321} // namespace ROL
322
323#endif
virtual void initialize(const Vector< Real > &x)
Initialize temporary variables.
Provides the interface to evaluate the augmented Lagrangian.
void reset(const Vector< Real > &multiplier, const Real penaltyParameter)
void update(const Vector< Real > &x, UpdateType type, int iter=-1)
Real getObjectiveValue(const Vector< Real > &x, Real &tol)
void gradient(Vector< Real > &g, const Vector< Real > &x, Real &tol)
void setScaling(const Real fscale=1.0, const Real cscale=1.0)
const Ptr< const Vector< Real > > getObjectiveGradient(const Vector< Real > &x, Real &tol)
const Ptr< const Vector< Real > > getConstraintVec(const Vector< Real > &x, Real &tol)
Provides an interface to check status of optimization algorithms for problems with equality constrain...
Defines the general constraint operator interface.
virtual void applyAdjointJacobian(Vector< Real > &ajv, const Vector< Real > &v, const Vector< Real > &x, Real &tol)
Apply the adjoint of the the constraint Jacobian at , , to vector .
Provides the interface to evaluate objective functions.
const Ptr< CombinedStatusTest< Real > > status_
Algorithm()
Constructor, given a step and a status test.
const Ptr< AlgorithmState< Real > > state_
Provides interface for and implements limited-memory secant operators.
void initialize(const Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &mul, const Vector< Real > &c)
virtual void writeExitStatus(std::ostream &os) const
virtual void writeName(std::ostream &os) const override
Print step name.
virtual void writeHeader(std::ostream &os) const override
Print iterate header.
AugmentedLagrangianAlgorithm(ParameterList &list, const Ptr< Secant< Real > > &secant=nullPtr)
void initialize(Vector< Real > &x, const Vector< Real > &g, const Vector< Real > &l, const Vector< Real > &c, AugmentedLagrangianObjective< Real > &alobj, Constraint< Real > &con, std::ostream &outStream=std::cout)
virtual void writeOutput(std::ostream &os, const bool print_header=false) const override
Print iterate status.
virtual void run(Vector< Real > &x, const Vector< Real > &g, Objective< Real > &obj, Constraint< Real > &econ, Vector< Real > &emul, const Vector< Real > &eres, std::ostream &outStream=std::cout) override
Defines the linear algebra or vector space interface.
virtual void scale(const Real alpha)=0
Compute where .
virtual ROL::Ptr< Vector > clone() const =0
Clone to make a new (uninitialized) vector.
virtual int dimension() const
Return dimension of the vector space.
virtual ROL::Ptr< Vector > basis(const int i) const
Return i-th basis vector.
virtual void axpy(const Real alpha, const Vector &x)
Compute where .
Ptr< Algorithm< Real > > AlgorithmFactory(ParameterList &parlist, const Ptr< Secant< Real > > &secant=nullPtr)
Real ROL_EPSILON(void)
Platform-dependent machine epsilon.
Definition ROL_Types.hpp:57
@ EXITSTATUS_CONVERGED
Definition ROL_Types.hpp:84