MoochoPack : Framework for Large-Scale Optimization Algorithms  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
MoochoPack_NLPAlgoConfigIP.cpp
1 // @HEADER
2 // ***********************************************************************
3 //
4 // Moocho: Multi-functional Object-Oriented arCHitecture for Optimization
5 // Copyright (2003) Sandia Corporation
6 //
7 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
8 // license for use of this work by or on behalf of the U.S. Government.
9 //
10 // Redistribution and use in source and binary forms, with or without
11 // modification, are permitted provided that the following conditions are
12 // met:
13 //
14 // 1. Redistributions of source code must retain the above copyright
15 // notice, this list of conditions and the following disclaimer.
16 //
17 // 2. Redistributions in binary form must reproduce the above copyright
18 // notice, this list of conditions and the following disclaimer in the
19 // documentation and/or other materials provided with the distribution.
20 //
21 // 3. Neither the name of the Corporation nor the names of the
22 // contributors may be used to endorse or promote products derived from
23 // this software without specific prior written permission.
24 //
25 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
26 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
29 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 //
37 // Questions? Contact Roscoe A. Bartlett (rabartl@sandia.gov)
38 //
39 // ***********************************************************************
40 // @HEADER
41 
42 #include <assert.h>
43 
44 #include <sstream>
45 #include <typeinfo>
46 #include <iostream>
47 
48 #include "MoochoPack_NLPAlgoConfigIP.hpp"
49 #include "NLPInterfacePack_NLPBarrier.hpp"
50 #include "MoochoPack_NLPAlgo.hpp"
51 #include "MoochoPack_IpState.hpp"
52 #include "MoochoPack_NLPAlgoContainer.hpp"
53 #include "AbstractLinAlgPack_MatrixSymPosDefCholFactor.hpp" // rHL
54 //#include "ConstrainedOptPack_MatrixSymPosDefInvCholFactor.hpp" // .
55 #include "ConstrainedOptPack_MatrixSymPosDefLBFGS.hpp" // .
56 //#include "ConstrainedOptPack_MatrixHessianSuperBasicInitDiagonal.hpp/ | rHL (super basics)
57 #include "AbstractLinAlgPack_MatrixSymDiagStd.hpp" // |
58 
59 #include "NLPInterfacePack_NLPDirect.hpp"
60 #include "NLPInterfacePack_NLPVarReductPerm.hpp"
61 #include "NLPInterfacePack_CalcFiniteDiffProd.hpp"
62 
63 // line search
64 #include "ConstrainedOptPack_DirectLineSearchArmQuad_Strategy.hpp"
65 #include "ConstrainedOptPack_DirectLineSearchArmQuad_StrategySetOptions.hpp"
66 #include "ConstrainedOptPack_MeritFuncNLPL1.hpp"
67 #include "ConstrainedOptPack_MeritFuncNLPModL1.hpp"
68 
69 // Basis permutations and direct sparse solvers
70 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
71 #include "ConstrainedOptPack_DecompositionSystemVarReductPerm.hpp"
72 #endif
73 
74 #include "MoochoPack_MoochoAlgorithmStepNames.hpp"
75 
76 #include "MoochoPack_UpdateBarrierParameter_Step.hpp"
77 
78 #include "MoochoPack_PreEvalNewPointBarrier_Step.hpp"
79 #include "MoochoPack_PostEvalNewPointBarrier_Step.hpp"
80 #include "MoochoPack_ReducedGradientStd_Step.hpp"
81 //#include "MoochoPack_InitFinDiffReducedHessian_Step.hpp"
82 //#include "MoochoPack_InitFinDiffReducedHessian_StepSetOptions.hpp"
83 #include "MoochoPack_ReducedHessianSecantUpdateStd_Step.hpp"
84 #include "MoochoPack_ReducedHessianSecantUpdateBFGSFull_Strategy.hpp"
85 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_Strategy.hpp"
86 //#include "MoochoPack_ReducedHessianSecantUpdateBFGSProjected_StrategySetOptions.hpp"
87 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_Strategy.hpp"
88 //#include "MoochoPack_ReducedHessianSecantUpdateLPBFGS_StrategySetOptions.hpp"
89 #include "MoochoPack_BFGSUpdate_Strategy.hpp"
90 #include "MoochoPack_BFGSUpdate_StrategySetOptions.hpp"
91 #include "MoochoPack_QuasiNormalStepStd_Step.hpp"
92 #include "MoochoPack_CheckDescentQuasiNormalStep_Step.hpp"
93 #include "MoochoPack_CheckDecompositionFromPy_Step.hpp"
94 #include "MoochoPack_CheckDecompositionFromRPy_Step.hpp"
95 #include "MoochoPack_TangentialStepIP_Step.hpp"
96 //#include "MoochoPack_TangentialStepWithoutBounds_Step.hpp"
97 //#include "MoochoPack_TangentialStepWithInequStd_Step.hpp"
98 //#include "MoochoPack_TangentialStepWithInequStd_StepSetOptions.hpp"
99 //#include "MoochoPack_SetDBoundsStd_AddedStep.hpp"
100 #include "MoochoPack_QPFailureReinitReducedHessian_Step.hpp"
101 #include "MoochoPack_CalcDFromYPYZPZ_Step.hpp"
102 #include "MoochoPack_CalcD_vStep_Step.hpp"
103 
104 #include "MoochoPack_PreProcessBarrierLineSearch_Step.hpp"
105 #include "MoochoPack_PostProcessBarrierLineSearch_Step.hpp"
106 #include "MoochoPack_LineSearchFailureNewDecompositionSelection_Step.hpp"
107 #include "MoochoPack_LineSearchFilter_Step.hpp"
108 #include "MoochoPack_LineSearchFilter_StepSetOptions.hpp"
109 #include "MoochoPack_LineSearchFullStep_Step.hpp"
110 #include "MoochoPack_LineSearchDirect_Step.hpp"
111 //#include "MoochoPack_LineSearch2ndOrderCorrect_Step.hpp"
112 //#include "MoochoPack_LineSearch2ndOrderCorrect_StepSetOptions.hpp"
113 //#include "MoochoPack_FeasibilityStepReducedStd_Strategy.hpp"
114 //#include "MoochoPack_FeasibilityStepReducedStd_StrategySetOptions.hpp"
115 //#include "MoochoPack_QuasiRangeSpaceStepStd_Strategy.hpp"
116 //#include "MoochoPack_QuasiRangeSpaceStepTailoredApproach_Strategy.hpp"
117 //#include "MoochoPack_LineSearchWatchDog_Step.hpp"
118 //#include "MoochoPack_LineSearchWatchDog_StepSetOptions.hpp"
119 //#include "MoochoPack_LineSearchFullStepAfterKIter_Step.hpp"
120 //#include "MoochoPack_CalcLambdaIndepStd_AddedStep.hpp"
121 #include "MoochoPack_CalcReducedGradLagrangianStd_AddedStep.hpp"
122 #include "MoochoPack_CheckConvergenceStd_AddedStep.hpp"
123 #include "MoochoPack_CheckConvergenceIP_Strategy.hpp"
124 #include "MoochoPack_CheckSkipBFGSUpdateStd_StepSetOptions.hpp"
125 #include "MoochoPack_MeritFunc_PenaltyParamUpdate_AddedStepSetOptions.hpp"
126 #include "MoochoPack_MeritFunc_PenaltyParamUpdateMultFree_AddedStep.hpp"
127 //#include "MoochoPack_MeritFunc_PenaltyParamUpdateWithMult_AddedStep.hpp"
128 //#include "MoochoPack_MeritFunc_PenaltyParamsUpdateWithMult_AddedStep.hpp"
129 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStep.hpp"
130 //#include "MoochoPack_MeritFunc_ModifiedL1LargerSteps_AddedStepSetOptions.hpp"
131 //#include "MoochoPack_ActSetStats_AddedStep.hpp"
132 //#include "MoochoPack_NumFixedDepIndep_AddedStep.hpp"
133 #include "MoochoPack_UpdateReducedSigma_Step.hpp"
134 
135 #include "MoochoPack_quasi_newton_stats.hpp"
136 
137 // Misc utilities
138 #include "Teuchos_AbstractFactoryStd.hpp"
139 #include "Teuchos_dyn_cast.hpp"
140 #include "ReleaseResource_ref_count_ptr.hpp"
141 #include "Teuchos_Assert.hpp"
142 
143 // Stuff to read in options
144 #include "OptionsFromStreamPack_StringToIntMap.hpp"
145 #include "OptionsFromStreamPack_StringToBool.hpp"
146 
147 // Stuff for exact reduced hessian
148 //#include "MoochoPack_ReducedHessianExactStd_Step.hpp"
149 //#include "MoochoPack_CrossTermExactStd_Step.hpp"
150 //#include "MoochoPack_DampenCrossTermStd_Step.hpp"
151 
152 namespace {
153  const double INF_BASIS_COND_CHANGE_FRAC = 1e+20;
154 }
155 
156 namespace MoochoPack {
157 
158 //
159 // Here is where we define the default values for the algorithm. These
160 // should agree with what are in the Moocho.opt.NLPAlgoConfigIP file.
161 //
162 NLPAlgoConfigIP::SOptionValues::SOptionValues()
163  :max_basis_cond_change_frac_(-1.0)
164  ,exact_reduced_hessian_(false)
165  ,quasi_newton_(QN_AUTO)
166  ,num_lbfgs_updates_stored_(-1)
167  ,lbfgs_auto_scaling_(true)
168  ,hessian_initialization_(INIT_HESS_AUTO)
169  ,qp_solver_type_(QP_AUTO)
170  ,reinit_hessian_on_qp_fail_(true)
171  ,line_search_method_(LINE_SEARCH_AUTO)
172  ,merit_function_type_(MERIT_FUNC_AUTO)
173  ,l1_penalty_param_update_(L1_PENALTY_PARAM_AUTO)
174  ,full_steps_after_k_(-1)
175 {}
176 
178 {}
179 
181 {}
182 
183 // overridden from NLPAlgoConfig
184 
186 {
187  options_ = options;
188  decomp_sys_step_builder_.set_options(options);
189 }
190 
193 {
194  return options_;
195 }
196 
198  NLPAlgoContainer *algo_cntr
199  ,std::ostream *trase_out
200  )
201 {
202 
203  using Teuchos::RCP;
204  using Teuchos::dyn_cast;
205  using Teuchos::dyn_cast;
206 
207  if(trase_out) {
208  *trase_out
209  << std::endl
210  << "*****************************************************************\n"
211  << "*** NLPAlgoConfigIP configuration ***\n"
212  << "*** ***\n"
213  << "*** Here, summary information about how the algorithm is ***\n"
214  << "*** configured is printed so that the user can see how the ***\n"
215  << "*** properties of the NLP and the set options influence ***\n"
216  << "*** how an algorithm is configured. ***\n"
217  << "*****************************************************************\n";
218  }
219 
220  // ////////////////////////////////////////////////////////////
221  // A. ???
222 
223  // /////////////////////////////////////////////////////////////////////////
224  // B. Create an algo object, give to algo_cntr, then give algo_cntr to algo
225 
226  if(trase_out)
227  *trase_out << "\n*** Creating the NLPAlgo algo object ...\n";
228 
229  typedef Teuchos::RCP<NLPAlgo> algo_ptr_t;
230  algo_ptr_t algo = Teuchos::rcp(new NLPAlgo);
231  TEUCHOS_TEST_FOR_EXCEPT( !( algo.get() ) );
232  algo_cntr->set_algo(algo);
233  algo->set_algo_cntr(algo_cntr);
234 
235  // /////////////////////////////////////////////
236  // C. Configure algo
237 
238  // /////////////////////////////////////////////////////
239  // C.0 Set the nlp and track objects
240 
241  if(trase_out)
242  *trase_out << "\n*** Setting the NLP and track objects to the algo object ...\n";
243 
244  algo->set_nlp( algo_cntr->get_nlp().get() );
245  algo->set_track( algo_cntr->get_track() );
246 
247  // ////////////////////////////////////////////////
248  // Determine what the options are:
249 
250  // Readin the options
251  if(options_.get()) {
252  readin_options( *options_, &uov_, trase_out );
253  }
254  else {
255  if(trase_out) {
256  *trase_out
257  << "\n*** Warning, no OptionsFromStream object was set so a default set"
258  " of options will be used!\n";
259  }
260  }
261 
262  NLP &nlp = algo->nlp();
263  nlp.initialize(algo->algo_cntr().check_results());
264  // Get the dimensions of the NLP
265  const size_type
266  n = nlp.n(),
267  m = nlp.m(),
268  r = m, // ToDo: Compute this for real!
269  //dof = n - r,
270  nb = nlp.num_bounded_x();
271 
272  // Process the NLP
273  NLPFirstOrder *nlp_foi = NULL;
274  NLPSecondOrder *nlp_soi = NULL;
275  NLPDirect *nlp_fod = NULL;
276  bool tailored_approach = false;
277  decomp_sys_step_builder_.process_nlp_and_options(
278  trase_out, nlp
279  ,&nlp_foi, &nlp_soi, &nlp_fod, &tailored_approach
280  );
281 
282  const int max_dof_quasi_newton_dense
283  = decomp_sys_step_builder_.current_option_values().max_dof_quasi_newton_dense_;
284 
285  // Make sure that we can handle this type of NLP currently
287  m == 0, std::logic_error
288  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
289  "can not currently solve an unconstrained NLP!" );
291  n == m, std::logic_error
292  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
293  "can not currently solve a square system of equations!" );
294 
295  // //////////////////////////////////////////////////////
296  // C.1. Sort out the options
297 
298  if(trase_out)
299  *trase_out
300  << "\n*** Sorting out some of the options given input options ...\n";
301 
302  if( tailored_approach ) {
303  // Change the options for the tailored approach.
304  if(trase_out) {
305  *trase_out
306  << "\nThis is a tailored approach NLP (NLPDirect) which forces the following options:\n"
307  << "merit_function_type = L1;\n"
308  << "l1_penalty_parameter_update = MULT_FREE;\n"
309  << "null_space_matrix = EXPLICIT;\n"
310  ;
311  }
312  cov_.merit_function_type_
313  = MERIT_FUNC_L1;
314  cov_.l1_penalty_param_update_
315  = L1_PENALTY_PARAM_MULT_FREE;
316  decomp_sys_step_builder_.current_option_values().null_space_matrix_type_
317  = DecompositionSystemStateStepBuilderStd::NULL_SPACE_MATRIX_EXPLICIT;
318  }
319 
320  if( !tailored_approach && uov_.merit_function_type_ != MERIT_FUNC_L1 ) {
321  if(trase_out) {
322  *trase_out
323  << "\nThe only merit function currently supported is:\n"
324  << "merit_function_type = L1;\n"
325  ;
326  }
327  cov_.merit_function_type_ = MERIT_FUNC_L1;
328  }
329 
330  // Decide what type of quasi newton update to use
331  switch( uov_.quasi_newton_ ) {
332  case QN_AUTO: {
333  if(trase_out)
334  *trase_out
335  << "\nquasi_newton == AUTO:"
336  << "\nnlp.num_bounded_x() == " << nlp.num_bounded_x() << ":\n";
337  //if( n - r > cov_.max_dof_quasi_newton_dense_ ) {
338  // if(trase_out)
339  // *trase_out
340  // << "n-r = " << n-r << " > max_dof_quasi_newton_dense = "
341  // << cov_.max_dof_quasi_newton_dense_ << ":\n"
342  // << "setting quasi_newton == LBFGS\n";
343  // cov_.quasi_newton_ = QN_LBFGS;
344  //}
345  //else {
346  if(trase_out)
347  *trase_out
348  << "n-r = " << n-r << " <= max_dof_quasi_newton_dense = "
349  << max_dof_quasi_newton_dense << ":\n"
350  << "setting quasi_newton == BFGS\n";
351  cov_.quasi_newton_ = QN_BFGS;
352  //}
353  break;
354  }
355  case QN_BFGS:
356  case QN_PBFGS:
357  case QN_LBFGS:
358  case QN_LPBFGS:
359  cov_.quasi_newton_ = uov_.quasi_newton_;
360  break;
361  default:
362  TEUCHOS_TEST_FOR_EXCEPT(true); // Invalid option!
363  }
364 
365  // ToDo: Sort out the rest of the options!
366 
367  // Set the default options that where not already set yet
368  set_default_options(uov_,&cov_,trase_out);
369 
370  // ToDo: Implement the 2nd order correction linesearch
371  if( cov_.line_search_method_ == LINE_SEARCH_2ND_ORDER_CORRECT ) {
372  if(trase_out)
373  *trase_out <<
374  "\nline_search_method == 2ND_ORDER_CORRECT:\n"
375  "Sorry, the second order corrrection linesearch is not updated yet!\n"
376  "setting line_search_method = DIRECT ...\n";
377  cov_.line_search_method_ = LINE_SEARCH_DIRECT;
378  }
379  if( cov_.line_search_method_ == LINE_SEARCH_WATCHDOG ) {
380  if(trase_out)
381  *trase_out <<
382  "\nline_search_method ==WATCHDOG:\n"
383  "Sorry, the watchdog linesearch is not updated yet!\n"
384  "setting line_search_method = DIRECT ...\n";
385  cov_.line_search_method_ = LINE_SEARCH_DIRECT;
386  }
387 
388  // /////////////////////////////////////////////////////
389  // C.1. Create the decomposition system object
390 
391  typedef RCP<DecompositionSystem> decomp_sys_ptr_t;
392  decomp_sys_ptr_t decomp_sys;
393  decomp_sys_step_builder_.create_decomp_sys(
394  trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach
395  ,&decomp_sys
396  );
397 
398 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
400  decomp_sys_perm = Teuchos::rcp_dynamic_cast<DecompositionSystemVarReductPerm>(decomp_sys);
401 #endif
402 
403  // /////////////////////////////////////////////////////
404  // C.2. Create and set the state object
405 
406  if(trase_out)
407  *trase_out
408  << "\n*** Creating the state object and setting up iteration quantity objects ...\n";
409 
410  {
411  //
412  // Create the state object with the vector spaces
413  //
414 
415  typedef RCP<IpState> state_ptr_t;
416  state_ptr_t
417  state = Teuchos::rcp(
418  new IpState(
419  decomp_sys
420  ,nlp.space_x()
421  ,nlp.space_c()
422  ,( tailored_approach
423  ? ( nlp_fod->var_dep().size()
424  ? nlp.space_x()->sub_space(nlp_fod->var_dep())->clone()
425  : Teuchos::null )
426  : decomp_sys->space_range() // could be NULL for BasisSystemPerm
427  )
428  ,( tailored_approach
429  ?( nlp_fod->var_indep().size()
430  ? nlp.space_x()->sub_space(nlp_fod->var_indep())->clone()
431  : Teuchos::null )
432  : decomp_sys->space_null() // could be NULL for BasisSystemPerm
433  )
434  )
435  );
436 
437  //
438  // Set the iteration quantities for the NLP matrix objects
439  //
440 
441  decomp_sys_step_builder_.add_iter_quantities(
442  trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
443  ,state
444  );
445 
447  // Set the iteration quantities for the barrier terms
449  state->set_iter_quant(
450  Vu_name
451  ,Teuchos::rcp(
452  new IterQuantityAccessContiguous<MatrixSymDiagStd>(
453  1,
454  Vu_name,
455  Teuchos::rcp( new Teuchos::AbstractFactoryStd<MatrixSymDiagStd,MatrixSymDiagStd,
456  MatrixSymDiagStd::PostMod>( nlp.space_x() )
457  )
458  )
459  )
460  );
461 
462  state->set_iter_quant(
463  Vl_name
464  ,Teuchos::rcp(
465  new IterQuantityAccessContiguous<MatrixSymDiagStd>(
466  1,
467  Vl_name,
468  Teuchos::rcp( new Teuchos::AbstractFactoryStd<MatrixSymDiagStd,MatrixSymDiagStd,
469  MatrixSymDiagStd::PostMod>( nlp.space_x() )
470  )
471  )
472  )
473  );
474 
475  state->set_iter_quant(
476  invXu_name
477  ,Teuchos::rcp(
478  new IterQuantityAccessContiguous<MatrixSymDiagStd>(
479  1,
480  invXu_name,
481  Teuchos::rcp( new Teuchos::AbstractFactoryStd<MatrixSymDiagStd,MatrixSymDiagStd,
482  MatrixSymDiagStd::PostMod>( nlp.space_x() )
483  )
484  )
485  )
486  );
487 
488  state->set_iter_quant(
489  invXl_name
490  ,Teuchos::rcp(
491  new IterQuantityAccessContiguous<MatrixSymDiagStd>(
492  1,
493  invXl_name,
494  Teuchos::rcp( new Teuchos::AbstractFactoryStd<MatrixSymDiagStd,MatrixSymDiagStd,
495  MatrixSymDiagStd::PostMod>( nlp.space_x() )
496  )
497  )
498  )
499  );
500 
501  state->set_iter_quant(
502  rHB_name
503  ,Teuchos::rcp(
504  new IterQuantityAccessContiguous<MatrixSymOp>(
505  1,
506  rHB_name,
507  Teuchos::rcp(
509  MatrixSymPosDefCholFactor::PostMod(
510  true // maintain_original
511  ,false // maintain_factor
512  ,true // allow_factor (always!)
513  )
514  )
515  )
516  )
517  )
518  );
519 
520  state->set_iter_quant(
521  B_name
522  ,Teuchos::rcp(
523  new IterQuantityAccessContiguous<MatrixSymOp>(
524  1,
525  B_name,
526  Teuchos::rcp(
528  MatrixSymPosDefCholFactor::PostMod(
529  true // maintain_original
530  ,false // maintain_factor
531  ,true // allow_factor (always!)
532  )
533  )
534  )
535  )
536  )
537  );
538 
539  state->set_iter_quant(
540  Sigma_name
541  ,Teuchos::rcp(
542  new IterQuantityAccessContiguous<MatrixSymDiagStd>(
543  1,
544  Sigma_name,
545  Teuchos::rcp( new Teuchos::AbstractFactoryStd<MatrixSymDiagStd,MatrixSymDiagStd,
546  MatrixSymDiagStd::PostMod>( nlp.space_x() )
547  )
548  )
549  )
550  );
551 
552  // These iteration quantities are defined in IpState,
553  // force their creation and resize them
554  dyn_cast< IterQuantityAccessContiguous<value_type> >(state->barrier_obj()).resize(2);
555  dyn_cast< IterQuantityAccessContiguous<VectorMutable> >(state->grad_barrier_obj()).resize(2);
556 
557  // Add reduced Hessian of the Lagrangian
558 
559  if( !cov_.exact_reduced_hessian_ ) {
561  abstract_factory_rHL = Teuchos::rcp(
563  MatrixSymPosDefCholFactor::PostMod(
564  true // maintain_original
565  ,false // maintain_factor
566  ,true // allow_factor (always!)
567  )
568  )
569  );
570  state->set_iter_quant(
571  rHL_name
572  ,Teuchos::rcp(
573  new IterQuantityAccessContiguous<MatrixSymOp>(
574  1
575  ,rHL_name
576  ,abstract_factory_rHL
577  )
578  )
579  );
580  }
581  else {
582  TEUCHOS_TEST_FOR_EXCEPT(true); // ToDo: Add rHL for an exact reduced Hessian!
583  }
584 
585  //
586  // Set the NLP merit function
587  //
588 
589  if( cov_.line_search_method_ != LINE_SEARCH_NONE
590  && cov_.line_search_method_ != LINE_SEARCH_FILTER) {
592  merit_func_factory = Teuchos::null;
593  switch( cov_.merit_function_type_ ) {
594  case MERIT_FUNC_L1:
595  merit_func_factory = Teuchos::rcp(
597  break;
598  case MERIT_FUNC_MOD_L1:
599  case MERIT_FUNC_MOD_L1_INCR:
600  merit_func_factory = Teuchos::rcp(
602  break;
603  default:
604  TEUCHOS_TEST_FOR_EXCEPT(true); // local programming error
605  }
606  state->set_iter_quant(
607  merit_func_nlp_name
608  ,Teuchos::rcp(
609  new IterQuantityAccessContiguous<MeritFuncNLP>(
610  1
611  ,merit_func_nlp_name
612  ,merit_func_factory
613  )
614  )
615  );
616  }
617 
618  if (cov_.line_search_method_ == LINE_SEARCH_FILTER)
619  {
620  // Add the filter iteration quantity
621  state->set_iter_quant(
622  FILTER_IQ_STRING
623  ,Teuchos::rcp(
624  new IterQuantityAccessContiguous<Filter_T>(1,FILTER_IQ_STRING)
625  )
626  );
627  }
628 
629  //
630  // Resize the number of storage locations (these can be changed later).
631  //
632  // Also, touch all of the value_type, index_type and vector iteration quantities
633  // that we know about so that when state.dump_iter_quant() is called, all of the
634  // iteration quantities will be included.
635  //
636 
637  typedef IterQuantityAccessContiguous<value_type> IQ_scalar_cngs;
638  typedef IterQuantityAccessContiguous<VectorMutable> IQ_vector_cngs;
639 
640  dyn_cast<IQ_vector_cngs>(state->x()).resize(2);
641  dyn_cast<IQ_scalar_cngs>(state->f()).resize(2);
642  if(m) dyn_cast<IQ_vector_cngs>(state->c()).resize(2);
643  dyn_cast<IQ_vector_cngs>(state->Gf()).resize(2);
644  if(m && nlp_foi) state->Gc();
645 
646  if( m
647 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
648  && decomp_sys_perm.get() == NULL
649 #endif
650  ) state->py();
651  if(m) dyn_cast<IQ_vector_cngs>(state->Ypy()).resize(2);
652  if( m
653 #ifndef MOOCHO_NO_BASIS_PERM_DIRECT_SOLVERS
654  && decomp_sys_perm.get() == NULL
655 #endif
656  ) state->pz();
657  if(m) dyn_cast<IQ_vector_cngs>(state->Zpz()).resize(2);
658  dyn_cast<IQ_vector_cngs>(state->d()).resize(2);
659 
660  if( n > m ) {
661  dyn_cast<IQ_vector_cngs>(state->rGf()).resize(2);
662  state->w();
663  state->zeta();
664  state->qp_grad();
665  }
666  state->eta();
667 
668  dyn_cast<IQ_scalar_cngs>(state->barrier_parameter()).resize(2);
669 
670  dyn_cast<IQ_scalar_cngs>(state->alpha()).resize(2);
671  dyn_cast<IQ_scalar_cngs>(state->mu()).resize(2);
672  dyn_cast<IQ_scalar_cngs>(state->phi()).resize(2);
673 
674  dyn_cast<IQ_scalar_cngs>(state->opt_kkt_err()).resize(2);
675  dyn_cast<IQ_scalar_cngs>(state->feas_kkt_err()).resize(2);
676  if( n > m ) {
677  dyn_cast<IQ_vector_cngs>(state->rGL()).resize(2);
678  }
679  if(m) dyn_cast<IQ_vector_cngs>(state->lambda()).resize(2);
680  dyn_cast<IQ_vector_cngs>(state->nu()).resize(2);
681 
682  // Set the state object
683  algo->set_state( state );
684  }
685 
686  // /////////////////////////////////////////////////////
687  // C.3 Create and set the step objects
688 
689  if(trase_out)
690  *trase_out << "\n*** Creating and setting the step objects ...\n";
691 
692  {
693 
694  //
695  // Create some standard step objects that will be used by many different
696  // specific algorithms
697  //
698 
699  typedef RCP<AlgorithmStep> algo_step_ptr_t;
700 
701  // Create the EvalNewPoint step and associated objects
702  algo_step_ptr_t eval_new_point_step = Teuchos::null;
703  RCP<CalcFiniteDiffProd> calc_fd_prod = Teuchos::null;
704  RCP<VariableBoundsTester> bounds_tester = Teuchos::null;
705  RCP<NewDecompositionSelection_Strategy> new_decomp_selection_strategy = Teuchos::null;
706  decomp_sys_step_builder_.create_eval_new_point(
707  trase_out, nlp, nlp_foi, nlp_soi, nlp_fod, tailored_approach, decomp_sys
708  ,&eval_new_point_step, &calc_fd_prod, &bounds_tester, &new_decomp_selection_strategy
709  );
710 
711  // UpdateBarrierParameter_Step
712  Teuchos::RCP<UpdateBarrierParameter_Step> updateBarrierParameter_step = Teuchos::null;
713  updateBarrierParameter_step = Teuchos::rcp(new UpdateBarrierParameter_Step());
714  if(options_.get())
715  {
716  UpdateBarrierParameter_StepSetOptions options_setter(updateBarrierParameter_step.get());
717  options_setter.set_options(*options_);
718  }
719 
720  // PreEvalNewPointBarrier_Step
721  Teuchos::RCP<PreEvalNewPointBarrier_Step> preEvalNewPointBarrier_step = Teuchos::null;
722  preEvalNewPointBarrier_step = Teuchos::rcp(new PreEvalNewPointBarrier_Step());
723  if(options_.get())
724  {
725  PreEvalNewPointBarrier_StepSetOptions
726  options_setter(preEvalNewPointBarrier_step.get());
727  options_setter.set_options(*options_);
728  }
729 
730  // PostEvalNewPointBarrier_Step
731  algo_step_ptr_t postEvalNewPointBarrier_step = Teuchos::rcp(new PostEvalNewPointBarrier_Step());
732 
733  // ReducedGradient_Step
734  algo_step_ptr_t reduced_gradient_step = Teuchos::null;
735  if( !tailored_approach ) {
736  reduced_gradient_step = Teuchos::rcp(new ReducedGradientStd_Step());
737  }
738 
739  // RangeSpace_Step
740  algo_step_ptr_t quasi_normal_step_step = Teuchos::null;
741  if( !tailored_approach ) {
742  quasi_normal_step_step = Teuchos::rcp(new QuasiNormalStepStd_Step());
743  }
744 
745  // Check and change decomposition
746  algo_step_ptr_t check_decomp_from_py_step = Teuchos::null;
747  algo_step_ptr_t check_decomp_from_Rpy_step = Teuchos::null;
748  if( new_decomp_selection_strategy.get() && cov_.max_basis_cond_change_frac_ < INF_BASIS_COND_CHANGE_FRAC ) {
749  check_decomp_from_py_step = Teuchos::rcp(
751  new_decomp_selection_strategy
752  ,cov_.max_basis_cond_change_frac_
753  ) );
754  check_decomp_from_Rpy_step = Teuchos::rcp(
756  new_decomp_selection_strategy
757  ,cov_.max_basis_cond_change_frac_
758  ) );
759  }
760 
761  // CheckDescentQuasiNormalStep
762  algo_step_ptr_t check_descent_quasi_normal_step_step = Teuchos::null;
763  if( algo->algo_cntr().check_results() ) {
764  check_descent_quasi_normal_step_step = Teuchos::rcp(new CheckDescentQuasiNormalStep_Step(calc_fd_prod));
765  }
766 
767  // ReducedGradient_Step
768  //algo_step_ptr_t reduced_gradient_step = Teuchos::null;
769  //if( !tailored_approach ) {
770  // reduced_gradient_step = Teuchos::rcp(new ReducedGradientStd_Step());
771  //}
772 
773  // CheckSkipBFGSUpdate
774  algo_step_ptr_t check_skip_bfgs_update_step = Teuchos::null;
775  if(!cov_.exact_reduced_hessian_) {
778  if(options_.get()) {
780  opt_setter( step.get() );
781  opt_setter.set_options( *options_ );
782  }
783  check_skip_bfgs_update_step = step;
784  }
785 
786  // ReducedHessian_Step
787  algo_step_ptr_t reduced_hessian_step = Teuchos::null;
788  {
789  // Get the strategy object that will perform the actual secant update.
791  secant_update_strategy = Teuchos::null;
792  switch( cov_.quasi_newton_ )
793  {
794  case QN_BFGS:
795  case QN_PBFGS:
796  case QN_LBFGS:
797  case QN_LPBFGS:
798  {
799  // create and setup the actual BFGS strategy object
800  typedef RCP<BFGSUpdate_Strategy> bfgs_strategy_ptr_t;
801  bfgs_strategy_ptr_t
802  bfgs_strategy = Teuchos::rcp(new BFGSUpdate_Strategy);
803  if(options_.get()) {
805  opt_setter( bfgs_strategy.get() );
806  opt_setter.set_options( *options_ );
807  }
808  switch( cov_.quasi_newton_ ) {
809  case QN_BFGS:
810  case QN_LBFGS:
811  {
812  secant_update_strategy = Teuchos::rcp(new ReducedHessianSecantUpdateBFGSFull_Strategy(bfgs_strategy));
813  break;
814  }
815  case QN_PBFGS:
816  case QN_LPBFGS:
817  {
819  true, std::logic_error
820  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
821  "The quansi_newton options of PBFGS and LPBFGS have not been updated yet!" );
822  break;
823  }
824  }
825  break;
826  }
827  default:
829  }
830 
831  // Finally build the step object
832  reduced_hessian_step = Teuchos::rcp(
833  new ReducedHessianSecantUpdateStd_Step( secant_update_strategy ) );
834  // Add the QuasiNewtonStats iteration quantity
835  algo->state().set_iter_quant(
836  quasi_newton_stats_name
837  ,Teuchos::rcp(new IterQuantityAccessContiguous<QuasiNewtonStats>(
838  1
839  ,quasi_newton_stats_name
840 #ifdef _MIPS_CXX
843 #endif
844  )
845  ));
846  }
847 
848  // UpdateReducedSigma_Step
849  Teuchos::RCP<UpdateReducedSigma_Step> updateReducedSigma_step = Teuchos::null;
850  updateReducedSigma_step = Teuchos::rcp(new UpdateReducedSigma_Step());
851  if(options_.get())
852  {
853  UpdateReducedSigma_StepSetOptions
854  options_setter(updateReducedSigma_step.get());
855  options_setter.set_options(*options_);
856  }
857 
858  // NullSpace_Step
859  algo_step_ptr_t tangential_step = Teuchos::rcp(new TangentialStepIP_Step());
860  /* algo_step_ptr_t set_d_bounds_step = Teuchos::null;
861  algo_step_ptr_t tangential_step_step = Teuchos::null;
862  if( nb == 0 ) {
863  tangential_step_step = Teuchos::rcp(new TangentialStepWithoutBounds_Step());
864  }
865  else {
866  // Step object that sets bounds for QP subproblem
867  set_d_bounds_step = Teuchos::rcp(new SetDBoundsStd_AddedStep());
868  // QP Solver object
869  Teuchos::RCP<QPSolverRelaxed> qp_solver = Teuchos::null;
870  // ToDo: Create the QP solver!
871  // QP solver tester
872  Teuchos::RCP<QPSolverRelaxedTester>
873  qp_solver_tester = Teuchos::rcp(new QPSolverRelaxedTester());
874  if(options_.get()) {
875  QPSolverRelaxedTesterSetOptions
876  opt_setter( qp_solver_tester.get() );
877  opt_setter.set_options( *options_ );
878  }
879  // The null-space step
880  Teuchos::RCP<TangentialStepWithInequStd_Step>
881  tangential_step_with_inequ_step = Teuchos::rcp(
882  new TangentialStepWithInequStd_Step(
883  qp_solver, qp_solver_tester ) );
884  if(options_.get()) {
885  TangentialStepWithInequStd_StepSetOptions
886  opt_setter( tangential_step_with_inequ_step.get() );
887  opt_setter.set_options( *options_ );
888  }
889  tangential_step_step = tangential_step_with_inequ_step;
890  // Step for reinitialization reduced Hessian on QP failure
891  tangential_step_step = Teuchos::rcp(
892  new QPFailureReinitReducedHessian_Step(tangential_step_step)
893  );
894  }*/
895 
896  // CalcDFromYPYZPZ_Step
897  algo_step_ptr_t calc_d_from_Ypy_Zpy_step = Teuchos::null;
898  {
899  calc_d_from_Ypy_Zpy_step = Teuchos::rcp(new CalcDFromYPYZPZ_Step());
900  }
901 
902  // CalcD_vStep_Step
903  algo_step_ptr_t calc_d_v_step_step = Teuchos::rcp(new CalcD_vStep_Step());
904 
905  // build the barrier nlp decorator to be used by the line search
907  barrier_nlp->InitializeFromNLP( algo_cntr->get_nlp() );
908 
909  // PreProcessBarrierLineSearch_Step
910  algo_step_ptr_t preprocess_barrier_linesearch_step = Teuchos::rcp(new PreProcessBarrierLineSearch_Step(barrier_nlp));
911 
912  // PostProcessBarrierLineSearch_Step
913  algo_step_ptr_t postprocess_barrier_linesearch_step = Teuchos::rcp(new PostProcessBarrierLineSearch_Step(barrier_nlp));
914 
915  // CalcReducedGradLagrangianStd_AddedStep
916  algo_step_ptr_t calc_reduced_grad_lagr_step = Teuchos::null;
917  {
918  calc_reduced_grad_lagr_step = Teuchos::rcp(
920  }
921 
922  // CheckConvergence_Step
923  algo_step_ptr_t check_convergence_step = Teuchos::null;
924  {
925  // Create the strategy object
927  check_convergence_strategy = Teuchos::rcp(new CheckConvergenceIP_Strategy());
928 
929  if(options_.get())
930  {
932  opt_setter( check_convergence_strategy.get() );
933  opt_setter.set_options( *options_ );
934  }
935 
937  _check_convergence_step = Teuchos::rcp(new CheckConvergenceStd_AddedStep(check_convergence_strategy));
938 
939  check_convergence_step = _check_convergence_step;
940  }
941 
942  // MeritFuncPenaltyParamUpdate_Step
943  algo_step_ptr_t merit_func_penalty_param_update_step = Teuchos::null;
944  if( cov_.line_search_method_ == LINE_SEARCH_FILTER ) {
945  // We don't need to update a penalty parameter for the filter method :-)
946  }
947  else if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
949  param_update_step = Teuchos::null;
950  switch( cov_.merit_function_type_ ) {
951  case MERIT_FUNC_L1: {
952  switch(cov_.l1_penalty_param_update_) {
953  case L1_PENALTY_PARAM_WITH_MULT:
954 // param_update_step
955 // = Teuchos::rcp(new MeritFunc_PenaltyParamUpdateWithMult_AddedStep());
957  true, std::logic_error
958  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
959  "The l1_penalty_parameter_update option of MULT_FREE has not been updated yet!" );
960  break;
961  case L1_PENALTY_PARAM_MULT_FREE:
962  param_update_step
964  break;
965  default:
967  }
968  break;
969  }
970  case MERIT_FUNC_MOD_L1:
971  case MERIT_FUNC_MOD_L1_INCR:
972 // param_update_step = new MeritFunc_PenaltyParamsUpdateWithMult_AddedStep(
973 // Teuchos::rcp_implicit_cast<MeritFuncNLP>(merit_func) );
975  true, std::logic_error
976  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
977  "The merit_function_type options of MODIFIED_L1 and MODIFIED_L1_INCR have not been updated yet!" );
978  break;
979  default:
980  TEUCHOS_TEST_FOR_EXCEPT(true); // local programming error
981  }
982  if(options_.get()) {
984  ppu_options_setter( param_update_step.get() );
985  ppu_options_setter.set_options( *options_ );
986  }
987  merit_func_penalty_param_update_step = param_update_step;
988  }
989 
990  // LineSearch_Step
991  algo_step_ptr_t line_search_full_step_step = Teuchos::null;
992  {
993  line_search_full_step_step = Teuchos::rcp(new LineSearchFullStep_Step(bounds_tester));
994  }
995 
996  // LineSearch_Step
997  algo_step_ptr_t line_search_step = Teuchos::null;
998  if( cov_.line_search_method_ != LINE_SEARCH_NONE ) {
1000  direct_line_search = Teuchos::rcp(new DirectLineSearchArmQuad_Strategy());
1001  if(options_.get()) {
1003  ls_options_setter( direct_line_search.get(), "DirectLineSearchArmQuadSQPStep" );
1004  ls_options_setter.set_options( *options_ );
1005  }
1006  switch( cov_.line_search_method_ ) {
1007  case LINE_SEARCH_DIRECT: {
1008  line_search_step = Teuchos::rcp(new LineSearchDirect_Step(direct_line_search));
1009  break;
1010  }
1011  case LINE_SEARCH_2ND_ORDER_CORRECT: {
1013  true, std::logic_error
1014  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
1015  "The line_search_method option of 2ND_ORDER_CORRECT has not been updated yet!" );
1016  break;
1017  }
1018  case LINE_SEARCH_WATCHDOG: {
1020  true, std::logic_error
1021  ,"NLPAlgoConfigIP::config_algo_cntr(...) : Error, "
1022  "The line_search_method option of WATCHDOG has not been updated yet!" );
1023  break;
1024  }
1025  case LINE_SEARCH_FILTER:
1026  {
1028  line_search_filter_step = Teuchos::rcp(new LineSearchFilter_Step(barrier_nlp, barrier_obj_name, grad_barrier_obj_name));
1029 
1030  if(options_.get())
1031  {
1032  LineSearchFilter_StepSetOptions options_setter(line_search_filter_step.get());
1033  options_setter.set_options(*options_);
1034  }
1035 
1036  line_search_step = line_search_filter_step;
1037  break;
1038  }
1039  }
1040  }
1041 
1042  // LineSearchFailure
1043  if( new_decomp_selection_strategy.get() ) {
1044  line_search_step = Teuchos::rcp(
1046  line_search_step
1047  ,new_decomp_selection_strategy
1048  )
1049  );
1050  }
1051 
1052  //
1053  // Create the algorithm depending on the type of NLP we are trying to solve.
1054  //
1055 
1056  if( m == 0 ) {
1057  if( nb == 0 ) {
1058  //
1059  // Unconstrained NLP (m == 0, num_bounded_x == 0)
1060  //
1061  if(trase_out)
1062  *trase_out
1063  << "\nConfiguring an algorithm for an unconstrained "
1064  << "NLP (m == 0, num_bounded_x == 0) ...\n";
1066  m == 0 && nb == 0, std::logic_error
1067  ,"NLPAlgoConfigIP::config_alg_cntr(...) : Error, "
1068  "Unconstrained NLPs are not supported yet!" );
1069  }
1070  else {
1071  //
1072  // Simple bound constrained NLP (m == 0, num_bounded_x > 0)
1073  //
1074  if(trase_out)
1075  *trase_out
1076  << "\nConfiguring an algorithm for a simple bound constrained "
1077  << "NLP (m == 0, num_bounded_x > 0) ...\n";
1079  m == 0 && nb == 0, std::logic_error
1080  ,"NLPAlgoConfigIP::config_alg_cntr(...) : Error, "
1081  "Bound constrained NLPs are not supported yet!" );
1082  }
1083  }
1084  else if( n == m ) {
1085  //
1086  // System of Nonlinear equations (n == m)
1087  //
1088  if(trase_out)
1089  *trase_out
1090  << "\nConfiguring an algorithm for a system of nonlinear equations "
1091  << "NLP (n == m) ...\n";
1093  n == m, std::logic_error
1094  ,"NLPAlgoConfigIP::config_alg_cntr(...) : Error, "
1095  "Nonlinear equation (NLE) problems are not supported yet!" );
1096  TEUCHOS_TEST_FOR_EXCEPT(true); // ToDo: add the step objects for this algorithm
1097  }
1098  else if ( m > 0 || nb > 0 ) {
1099  //
1100  // General nonlinear NLP ( m > 0 )
1101  //
1102  if( nb == 0 ) {
1103  //
1104  // Nonlinear equality constrained NLP ( m > 0 && num_bounded_x == 0 )
1105  //
1106  if(trase_out)
1107  *trase_out
1108  << "\nConfiguring an algorithm for a nonlinear equality constrained "
1109  << "NLP ( m > 0 && num_bounded_x == 0) ...\n";
1110  }
1111  else {
1112  //
1113  // Nonlinear inequality constrained NLP ( num_bounded_x > 0 )
1114  //
1115  if(trase_out)
1116  *trase_out
1117  << "\nConfiguring an algorithm for a nonlinear generally constrained "
1118  << "NLP ( num_bounded_x > 0 ) ...\n";
1119  }
1120 
1121 
1122 
1124  // Add all the steps to the algorithm
1126 
1127  int step_num = 0;
1128  int assoc_step_num = 0;
1129 
1130  // UpdateBarrierParameter
1131  //algo->insert_step( ++step_num, "UpdateBarrierParameter", updateBarrierParameter_step );
1132 
1133  // EvalNewPoint
1134  algo->insert_step( ++step_num, EvalNewPoint_name, eval_new_point_step );
1135 
1136  //* EvalNewPoint pre steps
1137  // PreEvalNewPointBarrier
1138  algo->insert_assoc_step( step_num, IterationPack::PRE_STEP, 1, "PreEvalNewPointBarrier", preEvalNewPointBarrier_step);
1139 
1140  //* EvalNewPoint post steps
1141  if( check_descent_quasi_normal_step_step.get() && tailored_approach && algo->algo_cntr().check_results() )
1142  {
1143  algo->insert_assoc_step(
1144  step_num
1145  ,IterationPack::POST_STEP
1146  ,++assoc_step_num
1147  ,"CheckDescentQuasiNormalStep"
1148  ,check_descent_quasi_normal_step_step
1149  );
1150  }
1151 
1152  // PostEvalNewPointBarrier
1153  algo->insert_assoc_step( step_num, IterationPack::POST_STEP, ++assoc_step_num, "PostEvalNewPointBarrier", postEvalNewPointBarrier_step);
1154  assoc_step_num = 0;
1155 
1156  // ReducedGradient
1157  if( !tailored_approach ) {
1158  algo->insert_step( ++step_num, ReducedGradient_name, reduced_gradient_step );
1159  }
1160 
1161  // CalcReducedGradLagrangian
1162  algo->insert_step( ++step_num, CalcReducedGradLagrangian_name, calc_reduced_grad_lagr_step );
1163 
1164  // CalcLagrangeMultDecomposed
1165  // Compute these here so that in case we converge we can report them
1166  if( !tailored_approach ) {
1167  // ToDo: Insert this step
1168  }
1169 
1170  // CheckConvergence
1171  algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1172 
1173  //}
1174 
1175  // UpdateBarrierParameter
1176  algo->insert_step( ++step_num, "UpdateBarrierParameter", updateBarrierParameter_step );
1177 
1178  // QuasiNormalStep
1179  if( !tailored_approach ) {
1180  algo->insert_step( ++step_num, QuasiNormalStep_name, quasi_normal_step_step );
1181  assoc_step_num = 0;
1182  if( check_decomp_from_py_step.get() )
1183  algo->insert_assoc_step(
1184  step_num
1185  ,IterationPack::POST_STEP
1186  ,++assoc_step_num
1187  ,"CheckDecompositionFromPy"
1188  ,check_decomp_from_py_step
1189  );
1190  if( check_decomp_from_Rpy_step.get() )
1191  algo->insert_assoc_step(
1192  step_num
1193  ,IterationPack::POST_STEP
1194  ,++assoc_step_num
1195  ,"CheckDecompositionFromRPy"
1196  ,check_decomp_from_Rpy_step
1197  );
1198  if( check_descent_quasi_normal_step_step.get() )
1199  algo->insert_assoc_step(
1200  step_num
1201  ,IterationPack::POST_STEP
1202  ,++assoc_step_num
1203  ,"CheckDescentQuasiNormalStep"
1204  ,check_descent_quasi_normal_step_step
1205  );
1206  }
1207 
1208  // ReducedHessian
1209  algo->insert_step( ++step_num, ReducedHessian_name, reduced_hessian_step );
1210 
1211  // UpdateReducedSigma_Step
1212  algo->insert_step( ++step_num, "UpdateReducedSigma", updateReducedSigma_step);
1213 
1214  // TangentialStep
1215  algo->insert_step( ++step_num, "TangentialStepIP", tangential_step);
1216  // CalcDFromYPYZPZ
1217  algo->insert_step( ++step_num, CalcDFromYPYZPZ_name, calc_d_from_Ypy_Zpy_step );
1218 
1219  // CalcD_vStep_Step
1220  algo->insert_step( ++step_num, "CalcD_vStep_Step", calc_d_v_step_step );
1221 
1222  // PreProcessBarrierLineSearch_Step
1223  algo->insert_step( ++step_num, "PreProcessBarrierLineSearch_Step", preprocess_barrier_linesearch_step );
1224 
1225  // LineSearch
1226  if( cov_.line_search_method_ == LINE_SEARCH_NONE ) {
1227  algo->insert_step( ++step_num, LineSearch_name, line_search_full_step_step );
1228  }
1229  else {
1230  // Main line search step
1231  algo->insert_step( ++step_num, LineSearch_name, line_search_step );
1232  // Insert presteps
1234  pre_step_i = 0;
1235  // (.-?) LineSearchFullStep
1236  //algo->insert_assoc_step(
1237  // step_num
1238  // ,IterationPack::PRE_STEP
1239  // ,++pre_step_i
1240  // ,"LineSearchFullStep"
1241  // ,line_search_full_step_step
1242  // );
1243  // (.-?) MeritFunc_PenaltyPramUpdate
1244  if(merit_func_penalty_param_update_step.get()) {
1245  algo->insert_assoc_step(
1246  step_num
1247  ,IterationPack::PRE_STEP
1248  ,++pre_step_i
1249  ,"MeritFunc_PenaltyParamUpdate"
1250  ,merit_func_penalty_param_update_step
1251  );
1252  }
1253  }
1254 
1255  // PostProcessBarrierLineSearch_Step
1256  algo->insert_step( ++step_num, "PostProcessBarrierLineSearch_Step", postprocess_barrier_linesearch_step );
1257 
1258  // CheckConvergence
1259  //algo->insert_step( ++step_num, CheckConvergence_name, check_convergence_step );
1260 
1261  }
1262  else {
1263  TEUCHOS_TEST_FOR_EXCEPT(true); // Error, this should not ever be called!
1264  }
1265  }
1266 
1267 }
1268 
1270 {
1271  using Teuchos::dyn_cast;
1272 
1274  _algo == NULL, std::invalid_argument
1275  ,"NLPAlgoConfigIP::init_algo(_algo) : Error, "
1276  "_algo can not be NULL" );
1277 
1278  NLPAlgo &algo = dyn_cast<NLPAlgo>(*_algo);
1279  NLPAlgoState &state = algo.rsqp_state();
1280  NLP &nlp = algo.nlp();
1281 
1282  algo.max_iter( algo.algo_cntr().max_iter() );
1283  algo.max_run_time( algo.algo_cntr().max_run_time() );
1284 
1285  // Reset the iteration count to zero
1286  state.k(0);
1287 
1288  // Get organized output of vectors and matrices even if setw is not used by Step objects.
1289  algo.track().journal_out()
1290  << std::setprecision(algo.algo_cntr().journal_print_digits())
1291  << std::scientific;
1292 
1293  // set the first step
1294  algo.do_step_first(1);
1295 
1296  // The rest of the algorithm should initialize itself
1297 }
1298 
1299 // private
1300 
1301 void NLPAlgoConfigIP::readin_options(
1303  , SOptionValues *ov
1304  , std::ostream *trase_out
1305  )
1306 {
1307  namespace ofsp = OptionsFromStreamPack;
1308  using ofsp::OptionsFromStream;
1309  typedef OptionsFromStream::options_group_t options_group_t;
1310  using ofsp::StringToIntMap;
1311  using ofsp::StringToBool;
1312 
1313  TEUCHOS_TEST_FOR_EXCEPT( !( ov ) ); // only a local class error
1314 
1315  // Get the options group for "NLPAlgoConfigIP"
1316  const std::string opt_grp_name = "NLPAlgoConfigIP";
1317  const OptionsFromStream::options_group_t optgrp = options.options_group( opt_grp_name );
1318  if( OptionsFromStream::options_group_exists( optgrp ) ) {
1319 
1320  // Define map for options group "IpConfig".
1321  const int num_opts = 11;
1322  enum EIpConfig {
1323  MAX_BASIS_COND_CHANGE_FRAC
1324  ,EXACT_REDUCED_HESSIAN
1325  ,QUASI_NEWTON
1326  ,NUM_LBFGS_UPDATES_STORED
1327  ,LBFGS_AUTO_SCALING
1328  ,HESSIAN_INITIALIZATION
1329  ,QP_SOLVER
1330  ,REINIT_HESSIAN_ON_QP_FAIL
1331  ,LINE_SEARCH_METHOD
1332  ,MERIT_FUNCTION_TYPE
1333  ,L1_PENALTY_PARAM_UPDATE
1334  };
1335  const char* SIpConfig[num_opts] = {
1336  "max_basis_cond_change_frac"
1337  ,"exact_reduced_hessian"
1338  ,"quasi_newton"
1339  ,"num_lbfgs_updates_stored"
1340  ,"lbfgs_auto_scaling"
1341  ,"hessian_initialization"
1342  ,"qp_solver"
1343  ,"reinit_hessian_on_qp_fail"
1344  ,"line_search_method"
1345  ,"merit_function_type"
1346  ,"l1_penalty_parameter_update"
1347  };
1348  StringToIntMap map( opt_grp_name, num_opts, SIpConfig );
1349 
1350  options_group_t::const_iterator itr = optgrp.begin();
1351  for( ; itr != optgrp.end(); ++itr ) {
1352  switch( (EIpConfig)map( ofsp::option_name(itr) ) ) {
1353  case MAX_BASIS_COND_CHANGE_FRAC:
1354  ov->max_basis_cond_change_frac_ = std::atof( ofsp::option_value(itr).c_str() );
1355  break;
1356  case EXACT_REDUCED_HESSIAN:
1357  ov->exact_reduced_hessian_ = StringToBool( "exact_reduced_hessian", ofsp::option_value(itr).c_str() );
1358  break;
1359  case QUASI_NEWTON:
1360  {
1361  const std::string &opt_val = ofsp::option_value(itr);
1362  if( opt_val == "AUTO" )
1363  ov->quasi_newton_ = QN_AUTO;
1364  else if( opt_val == "BFGS" )
1365  ov->quasi_newton_ = QN_BFGS;
1366  else if( opt_val == "PBFGS" )
1367  ov->quasi_newton_ = QN_PBFGS;
1368  else if( opt_val == "LBFGS" )
1369  ov->quasi_newton_ = QN_LBFGS;
1370  else if( opt_val == "LPBFGS" )
1371  ov->quasi_newton_ = QN_LPBFGS;
1372  else
1374  true, std::invalid_argument
1375  ,"NLPAlgoConfigIP::readin_options(...) : "
1376  "Error, incorrect value for \"quasi_newton\" "
1377  ", Only options of BFGS, PBFGS"
1378  ", LBFGS, LPBFGS and AUTO are avalible."
1379  );
1380  break;
1381  }
1382  case NUM_LBFGS_UPDATES_STORED:
1383  ov->num_lbfgs_updates_stored_ = std::atoi( ofsp::option_value(itr).c_str() );
1384  break;
1385  case LBFGS_AUTO_SCALING:
1386  ov->lbfgs_auto_scaling_
1387  = StringToBool( "lbfgs_auto_scaling", ofsp::option_value(itr).c_str() );
1388  break;
1389  case HESSIAN_INITIALIZATION:
1390  {
1391  const std::string &opt_val = ofsp::option_value(itr);
1392  if( opt_val == "IDENTITY" )
1393  ov->hessian_initialization_ = INIT_HESS_IDENTITY;
1394  else if( opt_val == "FINITE_DIFF_SCALE_IDENTITY" )
1395  ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_IDENTITY;
1396  else if( opt_val == "FINITE_DIFF_DIAGONAL" )
1397  ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL;
1398  else if( opt_val == "FINITE_DIFF_DIAGONAL_ABS" )
1399  ov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
1400  else if( opt_val == "AUTO" )
1401  ov->hessian_initialization_ = INIT_HESS_AUTO;
1402  else
1404  true, std::invalid_argument
1405  ,"NLPAlgoConfigIP::readin_options(...) : "
1406  "Error, incorrect value for \"hessian_initialization\" "
1407  ", Only options of IDENTITY, FINITE_DIFF_SCALE_IDENTITY,"
1408  " FINITE_DIFF_DIAGONAL, FINITE_DIFF_DIAGONAL_ABS and AUTO"
1409  " are available" );
1410  break;
1411  }
1412  case QP_SOLVER:
1413  {
1414  const std::string &qp_solver = ofsp::option_value(itr);
1415  if( qp_solver == "AUTO" ) {
1416  ov->qp_solver_type_ = QP_AUTO;
1417  } else if( qp_solver == "QPSOL" ) {
1418  ov->qp_solver_type_ = QP_QPSOL;
1419  } else if( qp_solver == "QPOPT" ) {
1420 #ifdef CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT
1421  ov->qp_solver_type_ = QP_QPOPT;
1422 #else
1424  true, std::invalid_argument
1425  ,"NLPAlgoConfigIP::readin_options(...) : QPOPT is not supported,"
1426  " must define CONSTRAINED_OPTIMIZATION_PACK_USE_QPOPT!" );
1427 #endif
1428  } else if( qp_solver == "QPKWIK" ) {
1429  ov->qp_solver_type_ = QP_QPKWIK;
1430  } else if( qp_solver == "QPSCHUR" ) {
1431  ov->qp_solver_type_ = QP_QPSCHUR;
1432  } else {
1434  true, std::invalid_argument
1435  ,"NLPAlgoConfigIP::readin_options(...) : "
1436  "Error, incorrect value for \"qp_solver\" "
1437  "Only qp solvers QPOPT, QPSOL, QPKWIK, QPSCHUR and AUTO are avalible." );
1438  }
1439  break;
1440  }
1441  case REINIT_HESSIAN_ON_QP_FAIL:
1442  ov->reinit_hessian_on_qp_fail_ = StringToBool( "reinit_hessian_on_qp_fail", ofsp::option_value(itr).c_str() );
1443  break;
1444  case LINE_SEARCH_METHOD:
1445  {
1446  const std::string &option = ofsp::option_value(itr);
1447  if( option == "NONE" ) {
1448  ov->line_search_method_ = LINE_SEARCH_NONE;
1449  } else if( option == "DIRECT" ) {
1450  ov->line_search_method_ = LINE_SEARCH_DIRECT;
1451  } else if( option == "2ND_ORDER_CORRECT" ) {
1452  ov->line_search_method_ = LINE_SEARCH_2ND_ORDER_CORRECT;
1453  } else if( option == "WATCHDOG" ) {
1454  ov->line_search_method_ = LINE_SEARCH_WATCHDOG;
1455  } else if( option == "AUTO" ) {
1456  ov->line_search_method_ = LINE_SEARCH_AUTO;
1457  } else if( option == "FILTER" ) {
1458  ov->line_search_method_ = LINE_SEARCH_FILTER;
1459  } else {
1461  true, std::invalid_argument
1462  ,"NLPAlgoConfigIP::readin_options(...) : "
1463  "Error, incorrect value for \"line_search_method\".\n"
1464  "Only the options NONE, DIRECT, 2ND_ORDER_CORRECT, FILTER, WATCHDOG "
1465  "and AUTO are avalible." );
1466  }
1467  break;
1468  }
1469  case MERIT_FUNCTION_TYPE:
1470  {
1471  const std::string &option = ofsp::option_value(itr);
1472  if( option == "L1" )
1473  ov->merit_function_type_ = MERIT_FUNC_L1;
1474  else if( option == "MODIFIED_L1" )
1475  ov->merit_function_type_ = MERIT_FUNC_MOD_L1;
1476  else if( option == "MODIFIED_L1_INCR" )
1477  ov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
1478  else if( option == "AUTO" )
1479  ov->merit_function_type_ = MERIT_FUNC_AUTO;
1480  else
1482  true, std::invalid_argument
1483  ,"NLPAlgoConfigIP::readin_options(...) : "
1484  "Error, incorrect value for \"merit_function_type\".\n"
1485  "Only the options L1, MODIFIED_L1, MODIFIED_L1_INCR "
1486  "and AUTO are avalible." );
1487  break;
1488  }
1489  case L1_PENALTY_PARAM_UPDATE:
1490  {
1491  const std::string &option = ofsp::option_value(itr);
1492  if( option == "WITH_MULT" )
1493  ov->l1_penalty_param_update_
1494  = L1_PENALTY_PARAM_WITH_MULT;
1495  else if( option == "MULT_FREE" )
1496  ov->l1_penalty_param_update_
1497  = L1_PENALTY_PARAM_MULT_FREE;
1498  else if( option == "AUTO" )
1499  ov->l1_penalty_param_update_
1500  = L1_PENALTY_PARAM_AUTO;
1501  else
1503  true, std::invalid_argument
1504  ,"NLPAlgoConfigIP::readin_options(...) : "
1505  "Error, incorrect value for \"l1_penalty_param_update\".\n"
1506  "Only the options WITH_MULT, MULT_FREE and AUTO"
1507  "are avalible." );
1508  break;
1509  }
1510  default:
1511  TEUCHOS_TEST_FOR_EXCEPT(true); // this would be a local programming error only.
1512  }
1513  }
1514  }
1515  else {
1516  if(trase_out)
1517  *trase_out
1518  << "\n\n*** Warning! The options group \"NLPAlgoConfigIP\" was not found.\n"
1519  << "Using a default set of options instead ... \n";
1520  }
1521 }
1522 
1523 //
1524 // This is where some of the default options are set and the user is alerted to what their
1525 // value is.
1526 //
1527 void NLPAlgoConfigIP::set_default_options(
1528  const SOptionValues &uov
1529  ,SOptionValues *cov
1530  ,std::ostream *trase_out
1531  )
1532 {
1533  if(trase_out)
1534  *trase_out
1535  << "\n*** Setting option defaults for options not set by the user or determined some other way ...\n";
1536 
1537  if( cov->max_basis_cond_change_frac_ < 0.0 && uov.max_basis_cond_change_frac_ < 0.0 ) {
1538  if(trase_out)
1539  *trase_out
1540  << "\nmax_basis_cond_change_frac < 0 : setting max_basis_cond_change_frac = 1e+4 \n";
1541  cov->max_basis_cond_change_frac_ = 1e+4;
1542  }
1543  else {
1544  cov->max_basis_cond_change_frac_ = uov.max_basis_cond_change_frac_;
1545  }
1546  cov->exact_reduced_hessian_ = uov.exact_reduced_hessian_;
1547  if( cov->quasi_newton_ == QN_AUTO && uov.quasi_newton_ == QN_AUTO ) {
1548  if(trase_out)
1549  *trase_out
1550  << "\nquasi_newton == AUTO: setting quasi_newton = BFGS\n";
1551  cov->quasi_newton_ = QN_BFGS;
1552  }
1553  else if(cov->quasi_newton_ == QN_AUTO) {
1554  cov->quasi_newton_ = uov.quasi_newton_;
1555  }
1556  if( cov->num_lbfgs_updates_stored_ < 0 && uov.num_lbfgs_updates_stored_ < 0 ) {
1557  if(trase_out)
1558  *trase_out
1559  << "\nnum_lbfgs_updates_stored < 0 : setting num_lbfgs_updates_stored = 10\n";
1560  cov->num_lbfgs_updates_stored_ = 10;
1561  }
1562  else if(cov->num_lbfgs_updates_stored_ < 0) {
1563  cov->num_lbfgs_updates_stored_ = uov.num_lbfgs_updates_stored_;
1564  }
1565  cov->lbfgs_auto_scaling_ = uov.lbfgs_auto_scaling_;
1566  if( cov->hessian_initialization_ == INIT_HESS_AUTO && uov.hessian_initialization_ == INIT_HESS_AUTO ) {
1567  if(trase_out)
1568  *trase_out
1569  << "\nhessian_initialization == AUTO: setting hessian_initialization = FINITE_DIFF_DIAGONAL_ABS\n";
1570  cov->hessian_initialization_ = INIT_HESS_FIN_DIFF_SCALE_DIAGONAL_ABS;
1571  }
1572  else if(cov->hessian_initialization_ == INIT_HESS_AUTO) {
1573  cov->hessian_initialization_ = uov.hessian_initialization_;
1574  }
1575  if( cov->qp_solver_type_ == QP_AUTO && uov.qp_solver_type_ == QP_AUTO ) {
1576  if(trase_out)
1577  *trase_out
1578  << "\nqp_solver_type == AUTO: setting qp_solver_type = QPSCHUR\n";
1579  cov->qp_solver_type_ = QP_QPSCHUR;
1580  }
1581  else if(cov->qp_solver_type_ == QP_AUTO) {
1582  cov->qp_solver_type_ = uov.qp_solver_type_;
1583  }
1584  cov->reinit_hessian_on_qp_fail_ = uov.reinit_hessian_on_qp_fail_;
1585  if( cov->line_search_method_ == LINE_SEARCH_AUTO && uov.line_search_method_ == LINE_SEARCH_AUTO ) {
1586  if(trase_out)
1587  *trase_out
1588  << "\nline_search_method == AUTO: setting line_search_method = FILTER\n";
1589  cov->line_search_method_ = LINE_SEARCH_FILTER;
1590  }
1591  else if(cov->line_search_method_ == LINE_SEARCH_AUTO) {
1592  cov->line_search_method_ = uov.line_search_method_;
1593  }
1594  if( cov->merit_function_type_ == MERIT_FUNC_AUTO && uov.merit_function_type_ == MERIT_FUNC_AUTO ) {
1595  if(trase_out)
1596  *trase_out
1597  << "\nmerit_function_type == AUTO: setting merit_function_type = MODIFIED_L1_INCR\n";
1598  cov->merit_function_type_ = MERIT_FUNC_MOD_L1_INCR;
1599  }
1600  else if(cov->merit_function_type_ == MERIT_FUNC_AUTO) {
1601  cov->merit_function_type_ = uov.merit_function_type_;
1602  }
1603  if( cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO && uov.l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO ) {
1604  if(trase_out)
1605  *trase_out
1606  << "\nl1_penalty_param_update == AUTO: setting l1_penalty_param_update = MULT_FREE\n";
1607  cov->l1_penalty_param_update_ = L1_PENALTY_PARAM_MULT_FREE;
1608  }
1609  else if(cov->l1_penalty_param_update_ == L1_PENALTY_PARAM_AUTO) {
1610  cov->l1_penalty_param_update_ = uov.l1_penalty_param_update_;
1611  }
1612  if( cov->full_steps_after_k_ < 0 && uov.full_steps_after_k_ < 0 ) {
1613  if(trase_out)
1614  *trase_out
1615  << "\nfull_steps_after_k < 0 : the line search will never be turned off after so many iterations\n";
1616  }
1617  else {
1618  cov->full_steps_after_k_ = uov.full_steps_after_k_;
1619  }
1620  if(trase_out)
1621  *trase_out
1622  << "\n*** End setting default options\n";
1623 }
1624 
1625 } // end namespace MoochoPack
Checks for descent in the decomposed equality constraints with respect to the range space step Ypy us...
void config_algo_cntr(NLPAlgoContainer *algo_cntr, std::ostream *trase_out)
Set options for CheckSkipBFGSUpdateStd_Step from a OptionsFromStream object.
Fraction to boundary rule for calculating alpha max.
void process_nlp_and_options(std::ostream *trase_out, NLP &nlp, NLPFirstOrder **nlp_foi, NLPSecondOrder **nlp_soi, NLPDirect **nlp_fod, bool *tailored_approach)
Process the NLP and process the options passed in from set_options(). Postconditions: ...
Checks if a BFGS update should be preformed.
virtual void max_iter(size_t max_iter)
void create_eval_new_point(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, const Teuchos::RCP< DecompositionSystem > &decomp_sys, Teuchos::RCP< IterationPack::AlgorithmStep > *eval_new_point_step, Teuchos::RCP< CalcFiniteDiffProd > *calc_fd_prod, Teuchos::RCP< VariableBoundsTester > *bounds_tester, Teuchos::RCP< NewDecompositionSelection_Strategy > *new_decomp_selection_strategy)
Create the EvalNewPoint step object and allocated objects.
Set options for MeritFunc_PenaltyParamUpdate_AddedStep from a OptionsFromStream object.
Interface NLPAlgoContainer uses to access NLPAlgo.
Fraction to boundary rule for calculating alpha max.
#define TEUCHOS_TEST_FOR_EXCEPTION(throw_exception_test, Exception, msg)
void set_options(const OptionsFromStream &options)
T_To & dyn_cast(T_From &from)
Computes the reducecd gradient of the objective rGf_k = Z_k' * Gf_k
void InitializeFromNLP(Teuchos::RCP< NLP > original_nlp)
SOptionValues & current_option_values()
Return the current option values being used.
rSQP Algorithm control class.
void init_algo(NLPAlgoInterface *algo)
Standard class for updating the reduced sigma for interior point optimization.
T * get() const
Specializes the update of the penalty parameter for a merit function as: min_mu = |(Gf_k+nu_k)'* Ypy_...
Directs the selection of a new decomposition if the line search fails.
void add_iter_quantities(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, const Teuchos::RCP< DecompositionSystem > &decomp_sys, const Teuchos::RCP< NLPAlgoState > &state)
Add the common iteration quantities to the state object.
Set options for CheckConvergence_Strategy from an OptionsFromStream object.
Implementation for NLPAlgo solver.
Calculates dvl_k = mu*invXl_k*e - vl_k - invXl_k*Vl_k*d_k and dvu_k = mu*invXu_k*e - vu_k + invXu_k*V...
TEUCHOS_DEPRECATED RCP< T > rcp(T *p, Dealloc_T dealloc, bool owns_mem)
Takes the full step x_kp1 = x_k + d_k (d_k = Ypy_k + Zpz_k).
virtual std::ostream & journal_out() const
Reduced space SQP state encapsulation interface.
size_t size_type
virtual void max_run_time(double max_iter)
AlgorithmTracker & track()
Delegates the line search to a DirectLineSearch_Strategy object.
Implementation of CheckConvergence_Strategy interface.
Null Space Step for Interior Point algorithm.
Check if the decomposition is going singular and if it is select a new decomposition.
void create_decomp_sys(std::ostream *trase_out, NLP &nlp, NLPFirstOrder *nlp_foi, NLPSecondOrder *nlp_soi, NLPDirect *nlp_fod, bool tailored_approach, Teuchos::RCP< DecompositionSystem > *decomp_sys)
Create the decomposition system object.
Calculates the reduced gradient of the Lagrangian rGL = rGf + Z' * nu + GcUP' * lambda(equ_undecomp) ...
void set_options(const options_ptr_t &options)
Set the OptionsFromStream object that will be used for specifying the options.
options_group_t options_group(const std::string &options_group_name)
NLPAlgoState & rsqp_state()
<<std aggr>="">> members for algo_cntr
Calculates the range space step by, solving for py = -inv(R)*c(equ_decomp), then setting Ypy = Y * py...
Standard evaluation step class for extra parameters in primal/dual barrier method.
Strategy interface which contains the guts for a dampened BFGS update.
Set options for BFGSUpdate_Strategy from an OptionsFromStream object.
const options_ptr_t & get_options() const
void do_step_first(Algorithm::poss_type first_step_poss)
Check if the decomposition is going singular and if it is select a new decomposition.
Standard evaluation step class for extra parameters in primal/dual barrier method.
void set_options(const options_ptr_t &options)
Set the options that will be used to configure the algorithmic objects.
#define TEUCHOS_TEST_FOR_EXCEPT(throw_exception_test)