MoochoPack : Framework for Large-Scale Optimization Algorithms  Version of the Day
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
Options for NLPAlgoConfigIP.

The following is the contents of the file Moocho.opt.NLPAlgoConfigIP which are options specific to the class MoochoPack::NLPAlgoConfigIP and the class objects that it configures.

*** Begin Moocho.opt.NLPAlgoConfigIP

********************************************************************
*** All of these options can be used with the NLPAlgoConfigIP
*** algorithm configuration class.  This file will be maintained and
*** will include every option that users can set.  Most of these
*** options the user will want to leave alone but they are there
*** in any case.

**********************************************************
*** Options specific for the rSQP IP algorithm configuration
*** class NLPAlgoConfigIP
***
options_group NLPAlgoConfigIP {

*** Variable Reduction range/null space decomposition

*    max_basis_cond_change_frac = -1.0;  *** [default]
     *** (+-dbl) If < 0 then the solver will decide what value to use.
     *** Otherwise this is the (see printed algorithm description) above
     *** which a new basis will be selected.  Example values:
     ***    -1 : Allow solver to decide [default] 
     ***     0 : Switch to a new basis every iteration (not a good idea)
     ***   100 : Seitch to a new basis when change is more that 100?
     *** 1e+50 : (big number) Never switch to a new basis.

*** Reduced Hessian Approximations

*    exact_reduced_hessian = true; *** Use NLP Hessian info if available
*    exact_reduced_hessian = false; *** Use quasi_newton [default]
     *** If true and if the NLP supports second order information
     *** (hessian of the lagrangian HL) then the exact reduced hessian
     *** rHL = Z'*HL*Z will be computed at each iteration.

*    quasi_newton = AUTO;   *** Let solver decide dynamically [default]
*    quasi_newton = BFGS;   *** Dense BFGS
*    quasi_newton = LBFGS;  *** Limited memory BFGS
     *** [exact_reduced_hessian == false]
     *** ToDo: Finish documentation!

*    max_dof_quasi_newton_dense = -1; *** [default]
     *** [quasi_newton == AUTO] (+-int)  This option is used to
     *** determine when the algorithm will switch from quasi_newton=LBFGS
     *** to quasi_newton=BFGS and from range_space_matrix=ORTHOGONAL
     *** to range_space_matrix=COORDINATE.  Example values:
     ***  -1 : (< 0) Let the solver decide dynamically [default]
     ***   0 : Always use limited memory LBFGS and COORDINATE.
     *** 500 : Use LBFG when n-r >= 500 and dense BFGS when n-m < 500
     ***       Use COORDINATE when (n-m)*r >= 500 and ORTHOGONAL when
     ***       (n-r)*r <= 500.

*    num_lbfgs_updates_stored   = -1; *** [default]
     *** [quasi_newton == L*BFGS] (+-int) If < 0 then let solver decide
     *** otherwise this is the maximum number of update vectors stored
     *** for limite memory LBFGS.

*    lbfgs_auto_scaling = true;  *** (defalut)
*    lbfgs_auto_scaling = false;
     *** [quasi_newton == LBFGS] If true then auto scaling of initial
     *** hessian approximation will be use for LBFGS.

*** Line search methods

*    line_search_method = AUTO;               *** Let the solver decide dynamically [default]
*    line_search_method = NONE;               *** Take full steps at every iteration
*    line_search_method = DIRECT;             *** Use standard Armijo backtracking
*    line_search_method = FILTER;              *** Use the Filter line search method
     *** Options:
     *** AUTO : Let the solver decide dynamically what line search method to use (if any)
     *** NONE : Take full steps at every iteration.  For most problems this is a bad idea.
     ***     However, for some problems this can help when starting close to the solution usually.
     *** DIRECT : Use a standard Armijo backtracking line search at every iteration.
     *** 2ND_ORDER_CORRECT : Like DIRECT except computes corrections for before applying
     ***     the backtracking line search (see options_group LineSearch2ndOrderCorrect).
     ***     This can help greatly on some problems and can counter act the Maritos effect.
     *** FILTER : Use the filter line search.  Here we accept either a decrease in the
     ***     objective function for the constriants.  See "Global and Local Convergence of
     ***     Line Search Filter Methods for Nonliner Programming" by Waechter and Biegler.

*    merit_function_type = AUTO;              *** [line_search_method != NONE] Let solver decide
*    merit_function_type = L1;                *** [line_search_method != NONE] phi(x) = f(x) + mu*||c(x)||1
*    merit_function_type = MODIFIED_L1;       *** [line_search_method != NONE] phi(x) = f(x) + sum(mu(j),|cj(x)|,j)
*    merit_function_type = MODIFIED_L1_INCR;  *** [line_search_method != NONE] Like MODIFIED_L1 except mu(j) are
*                                             *** altered in order to take larger steps
*    l1_penalty_parameter_update = AUTO;      *** [merit_function_type == L1] let solver decide
*    l1_penalty_parameter_update = WITH_MULT; *** [merit_function_type == L1] Use Lagrange multipliers to update mu
*    l1_penalty_parameter_update = MULT_FREE; *** [merit_function_type == L1] Don't use Lagrange multipliers to update mu

}

***************************************************************
*** Options group for UpdateBarrierParameter_Step
***
*** These options control how the barrier parameter (mu)
*** is updated
***
options_group UpdateBarrierParameter {

*	tau_mu = 0.2;			*** linear decrease fraction for mu
*	theta_mu = 1.5;			*** superlinear decrease power for mu
*	tau_epsilon = 10; 		*** error tolerance fraction
*	theta_epsilon = 1.1;	*** error tolerance power
*	e_tol_max = 1000;		*** maximum error tolerance

}
***************************************************************
*** Options group for CalcFiniteDiffProd class
***
*** These options control how finite differences are computed
*** when testing and for other purposes.
***
options_group CalcFiniteDiffProd {

*    fd_method_order = FD_ORDER_ONE;          *** Use O(eps) one sided finite differences
*  	 fd_method_order = FD_ORDER_TWO;          *** Use O(eps^2) one sided finite differences
*    fd_method_order = FD_ORDER_TWO_CENTRAL;  *** Use O(eps^2) two sided central finite differences
*    fd_method_order = FD_ORDER_TWO_AUTO;     *** Uses FD_ORDER_TWO_CENTRAL or FD_ORDER_TWO
*    fd_method_order = FD_ORDER_FOUR;         *** Use O(eps^4) one sided finite differences
*    fd_method_order = FD_ORDER_FOUR_CENTRAL; *** Use O(eps^4) two sided central finite differences
*    fd_method_order = FD_ORDER_FOUR_AUTO;    *** [default] Uses FD_ORDER_FOUR_CENTRAL or FD_ORDER_FOUR
     *** Selects the finite differencing method to use.  Several different
     *** methods of different orders are available.  For more accuracy use a higher order
     *** method, for faster execution time use a lower order method.

*    fd_step_select = FD_STEP_ABSOLUTE; *** [default] Use absolute step size fd_step_size
*    fd_step_select = FD_STEP_RELATIVE; *** Use relative step size fd_step_size * ||x||inf
     *** Determines how the actual finite difference step size that is used is selected.
     ***    FD_STEP_ABSOLUTE : The actual step size used is taken from fd_step_size or
     ***                       is determined by the implementation if fd_step_size < 0.0.
     ***                       Taking an absolute step size can result in inaccurate gradients
     ***                       for  badly scaled NLPs.
     ***    FD_STEP_RELATIVE : The actual step size used is taken from fd_step_size or
     ***                       is determined by the implementation if fd_step_size < 0.0
     ***                       and then multiplied by ||x||inf.  Taking a relative step
     ***                       will not always result in accurate gradients and the user
     ***                       may have to play with fd_step_size some.

*    fd_step_size = -1.0; *** [default] Let the implementation decide
     *** Determines what finite difference step size to use.  If fd_step_select=FD_STEP_ABSOLUTE
     *** then this is the absolute step size that is used.  If fd_step_select=FD_STEP_RELATIVE
     *** the actual step size used in fd_step_size * ||x||inf.  Some common values are:
     ***   < 0.0   : let the implementation decide.
     ***   1e-8    : Optimal step size for FD_ORDER_ONE for IEEE double and perfect scaling?
     ***   1e-5    : Optimal step size for FD_ORDER_TWOx for IEEE double and perfect scaling?
     ***   1e-3    : Optimal step size for FD_ORDER_FOURx for IEEE double and perfect scaling?

*    fd_step_size_min = -1.0; *** [default] Let the implementation decide.
     *** Determines the minimum step size that will be taken to compute the finite differences.
     *** This option is used to forbid the computation of a finite difference with a very small
     *** step size as required by the variable bounds.  Computing finite difference derivatives
     *** for such small step sizes generally result in a lot of roundoff errors.  If
     *** fd_step_size_min < 0.0, then the implementation will pick a default value that is
     *** smaller than the default value for fd_step_size.

*    fd_step_size_f = -1.0; *** [default] Let the implementation decide
     *** Determines what finite difference step size to use for the objective function
     *** f(x).  If fd_step_size_f < 0.0, then the selected value for fd_step_size will be
     *** used (see the options fd_step_size and fd_step_select).  This option allows
     *** fine-tunning of the finite difference computations.

*    fd_step_size_c = -1.0; *** [default] Let the implementation decide
     *** Determines what finite difference step size to use for the equality constraints
     *** c(x).  If fd_step_size_c < 0.0, then the selected value for fd_step_size will be
     *** used (see the options fd_step_size and fd_step_select).  This option allows
     *** fine-tunning of the finite difference computations.

*    fd_step_size_h = -1.0; *** [default] Let the implementation decide
     *** Determines what finite difference step size to use for the inequality constraints
     *** h(x).  If fd_step_size_h < 0.0, then the selected value for fd_step_size will be
     *** used (see the options fd_step_size and fd_step_select).  This option allows
     *** fine-tunning of the finite difference computations.

}

***************************************************************
*** Options for EvalNewPointBarrier.
*** See options_group NLPFirstDerivTester
***
options_group EvalNewPointBarrier {

*	 relative_bounds_push = 0.01;   *** 
*    absolute_bounds_push = 0.001;  *** 
     *** determines how to push x within bounds if initial values are specified
     ***  either outside the bounds or too close to the bounds
	 *** *   xl_sb = min(xl+relative_bound_push*(xu-xl),
		 *               xl + absolute_bound_push)
		 *   xu_sb = max(xu-relative_bound_push*(xu-xl),
		 *               xu - absolute_bound_push)
		 *   if (xl_sb > xu_sb) then
		 *      x = (xl + (xu-xl)/2
		 *   else if (x < xl_sb) then 
		 *      x = xl_sb
		 *   else if (x > xu_sb) then
		 *      x = xu_sb

}

*********************************************************************
*** Options for the check for if to skip the BFGS update.
***
*** [NLPAlgoConfigMamaJama::exact_hessian == false]
***
options_group CheckSkipBFGSUpdateStd {
*    skip_bfgs_prop_const = 10.0; *** (+dbl)
}

*********************************************************************
*** Options the BFGS updating (dense or limited memory)
*** 
*** [NLPAlgoConfigMamaJama::exact_hessian == false]
***
options_group BFGSUpdate {

*    rescale_init_identity = true;  *** [default]
*    rescale_init_identity = false;
     *** If true, then rescale the initial identity matrix at 2nd iteration

*    use_dampening = true;  *** [default]
*    use_dampening = false;
     *** Use dampened BFGS update

*    secant_testing          = DEFAULT;  *** Test secant condition if check_results==true (see above)
*    secant_testing          = TEST;     *** Always test secant condition
*    secant_testing          = NO_TEST;  *** Never test secant condition

*    secant_warning_tol      = 1e-14;
*    secant_error_tol        = 1e-10;

}

*********************************************************************
*** Options the updating of the Reduced Sigma term
*** 
***
options_group UpdateReducedSigma {

*   update_method = always_explicit;
*	update_method = BFGS_primal;
*	update_method = BFGS_dual_no_correction;
*	update_method = BFGS_dual_explicit_correction; *** [default]
*	update_method = BFGS_dual_scaling_correction;
	*** These options determine exactly how the reduced sigma
	*** term will be updated. 
	***
	***	always_explicit 				: the full Z_kT*Sigma*Zk at each step (expensive)
	*** BFGS_primal 					: a BFGS update of mu*X^-2 (exact at solution)
	*** BFGS_dual_no_correction 		: update with Z_kT*Sigma*Z_k*pz 
	***										(no correction when mu changes)
	*** BFGS_dual_explicit_correction 	: same as above
	***										(do an explicit calculation when mu changes)
	*** BFGS_dual_scaling_correction    : same as above
	***										(scale by mu_kp1/mu_k when mu changes)

}

*********************************************************************
*** Options for the convergence test.
***
*** See the printed step description (i.e. 'MoochoAlgo.out') for a
*** description of what these options do.
***
options_group CheckConvergenceStd {

*    scale_opt_error_by   = SCALE_BY_NORM_2_X;
*    scale_opt_error_by   = SCALE_BY_NORM_INF_X;
*    scale_opt_error_by   = SCALE_BY_ONE;        *** [default]

*    scale_feas_error_by   = SCALE_BY_NORM_2_X;
*    scale_feas_error_by   = SCALE_BY_NORM_INF_X;
*    scale_feas_error_by   = SCALE_BY_ONE;        *** [default]

*    scale_comp_error_by   = SCALE_BY_NORM_2_X;
*    scale_comp_error_by   = SCALE_BY_NORM_INF_X;
*    scale_comp_error_by   = SCALE_BY_ONE;        *** [default]

     *** Determines what all of the error measures are scaled by when checking convergence
     *** SCALE_BY_NORM_2_X   : Scale the optimiality conditions by 1/(1+||x_k||2)
     *** SCALE_BY_NORM_INF_X : Scale the optimality conditions by 1/(1+||x_k||inf)
     *** SCALE_BY_ONE        : Scale the optimality conditions by 1

*    scale_opt_error_by_Gf = true; *** [default]
*    scale_opt_error_by_Gf = false;
     *** Determines if the linear dependence of gradients (i.e. ||rGL_k||inf or ||GL_k||inf)
     *** is scaled by the gradient of the objective function or not.
     *** true  : Scale ||rGL_k||inf ir ||GL_k|| by an additional 1/(1+||Gf||inf)
     *** false : Scale ||rGL_k||inf ir ||GL_k|| by an addiational 1 (i.e. no extra scaling)

}


********************************************************************
*** Options for the direct line search object that is used in all the
*** line search methods for the SQP step.
***
*** [NLPAlgoConfigMamaJama::line_search_method != NONE]
***
options_group DirectLineSearchArmQuadSQPStep {
*    slope_frac       = 1.0e-4;
*    min_frac_step    = 0.1:
*    max_frac_step    = 0.5;
*    max_ls_iter      = 20;
}

*******************************************************************
*** Options for the watchdog line search.
*** 
*** [NLPAlgoConfigMamaJama::line_search_method = WATCHDOG]
***
options_group LineSearchWatchDog {
*    opt_kkt_err_threshold	= 1e-3; *** (+dbl)
*    feas_kkt_err_threshold	= 1e-3; *** (+dbl)
     *** Start the watchdog linesearch when opt_kkt_err_k < opt_kkt_err_threshold and
     *** feas_kkt_err_k < feas_kkt_err_threshold
}

*******************************************************************
*** Options for the second order correction line search.
*** 
*** [NLPAlgoConfigMamaJama::line_search_method == 2ND_ORDER_CORRECT]
***
options_group LineSearch2ndOrderCorrect {

*    newton_olevel = PRINT_USE_DEFAULT;   *** O(?) output [default]
*    newton_olevel = PRINT_NOTHING;       *** No output
*    newton_olevel = PRINT_SUMMARY_INFO;  *** O(max_newton_iter) output
*    newton_olevel = PRINT_STEPS;         *** O(max_newton_iter) output
*    newton_olevel = PRINT_VECTORS;       *** O(max_newton_iter*n) output
     *** Determines the amount of output printed to the journal output stream.
     *** PRINT_USE_DEFAULT: Use the output level from the overall algortihm
     ***     to print comperable output (see journal_output_level).
     *** PRINT_NOTHING: Don't print anything (overriddes default print level).
     *** PRINT_SUMMARY_INFO: Print a nice little summary table showing the sizes of the
     ***    newton steps used to compute a feasibility correction as well as what
     ***    progress is being made.
     *** PRINT_STEPS: Don't print a summary table and instead print some more detail
     ***    as to what computations are being performed etc.
     *** PRINT_VECTORS: Print out relavant vectors as well for the feasibility Newton
     ***     iterations.

*    constr_norm_threshold = 1.0; *** [default]
     *** (+dbl) Tolerance for ||c_k||inf below which a 2nd order correction step
     *** will be considered (see printed description).  Example values:
     ***    0.0: Never consider computing a correction.
     ***   1e-3: Consider a correction if and only if ||c_k||inf <= 1e-3
     ***  1e+50: (big number) Consider a correction regardless of ||c_k||inf.

*    constr_incr_ratio = 10.0; *** [default]
     *** (+dbl) Tolerance for ||c_kp1||inf/(1.0+||c_k||inf) below which a 2nd order
     *** correction step will be considered (see printed description).  Example values:
     ***   0.0: Consider computing a correrction only if ||c_kp1||inf is zero.
     ***   10.0: Consider computing a correction if and only if
     ***        ||c_kp1||inf/(1.0+||c_k||inf) < 10.0.
     *** 1e+50: (big number) Consider a corrrection requardless how big ||c_kp1||inf is.

*    after_k_iter = 0; *** [default]
     *** (+int) Number of SQP iterations before a 2nd order correction will be considered
     *** (see printed description).  Example values:
     ***        0: Consider computing a correction right away at the first iteration.
     ***        2: Consider computing a correction when k >= 2.
     ***   999999: (big number) Never consider a 2nd order correction.

*    forced_constr_reduction = LESS_X_D;
*    forced_constr_reduction = LESS_X; *** [default]
     *** Determine the amount of reduction required for c(x_k+d+w).
     ***   LESS_X_D: phi(c(x_k+d+w)) < forced_reduct_ratio * phi(c(x_k+d)) is all that is required.
     ***             As long as a feasible step can be computed, only one newton
     ***             iteration should be required for this.
     ***   LESS_X:   phi(c(x_k+d+w)) < forced_reduct_ratio * phi(c(x_k)) is required.
     ***             In general, this may require several feasibility step calculations
     ***             and several newton iterations.  Of course the maxinum number of
     ***             newton iterations may be exceeded before this is achieved.

*    forced_reduct_ratio = 1.0; *** [default]
     *** (+dbl) (< 1) Fraction of reduction in phi(c(x)) for required reduction.
     *** Example values:
     ***    0.0: The constraints must be fully converged and newton iterations will
     ***         performed until max_newton_itr is exceeded.
     ***    0.5: Require an extra 50% of the required reduction.
     ***    1.0: Don't require any extra reduction.

*    max_step_ratio = 1.0; *** [default]
     *** (+dbl) Maxumum ratio of ||w^p||inf/||d||inf allowed for correction step w^p before
     *** a line search along phi(c(x_k+d+b*w^p)) is performed.  The purpose of this parameter
     *** is to limit the number of line search iterations needed for each feasibility
     *** step and to keep the full w = sum(w^p,p=1...) from getting too big. Example values:
     ***    0.0: Don't allow any correction step.
     ***    0.1: Allow ||w^p||inf/||d||inf <= 0.1.
     ***    1.0: Allow ||w^p||inf/||d||inf <= 1.0.
     ***  1e+50: (big number) Allow ||w^p||inf/||d||inf to be a big as possible.

*    max_newton_iter = 3; *** [default]
     *** (+int) Limit the number of newton feasibilty iterations (with line searches)
     *** allowed.  Example values:
     ***       0: Don't allow any newton iterations (no 2nd order correction).
     ***       3: Allow 3 newton iterations
     ***  999999: Allow any number of newton iteations (not a good idea)

}

*******************************************************************
*** Options for the filter search.
*** 
*** [NLPAlgoConfigMamaJama::line_search_method = FILTER]
***
options_group LineSearchFilter {
*    gamma_theta      = 1e-5;
*    gamma_f          = 1e-5;
*    gamma_alpha      = 5e-2;
*    delta            = 1e-4;
*    s_theta          = 1.1;
*    s_f              = 2.3;	
*    theta_small_fact = 1e-4;
*    theta_max        = 1e10;
*    eta_f            = 1e-4;
*    back_track_frac  = 0.5;
}

*******************************************************************
*** Options for generating feasibility steps for reduced space.
*** 
*** [NLPAlgoConfigMamaJama::line_search_method == 2ND_ORDER_CORRECT]
***
options_group FeasibilityStepReducedStd {
*    qp_objective = OBJ_MIN_FULL_STEP;
*    qp_objective = OBJ_MIN_NULL_SPACE_STEP;
*    qp_objective = OBJ_RSQP;
*    qp_testing   = QP_TEST_DEFAULT;
*    qp_testing   = QP_TEST;
*    qp_testing   = QP_NO_TEST;
}

******************************************************************
*** Options for the direct line search object for
*** the newton steps of the 2nd order correction (see above).
*** 
*** [NLPAlgoConfigMamaJama::line_search_method == 2ND_ORDER_CORRECT]
***
options_group DirectLineSearchArmQuad2ndOrderCorrectNewton {
*    slope_frac       = 1.0e-4;
*    min_frac_step    = 0.1:
*    max_frac_step    = 0.5;
*    max_ls_iter      = 20;
}

******************************************************************
*** Change how the penalty parameters for the merit function
*** are adjusted.
*** 
*** [NLPAlgoConfigMamaJama::line_search_method != NONE]
***
options_group MeritFuncPenaltyParamUpdate {
*    small_mu     = 1e-6;
*    min_mu_ratio = 1e-8
*    mult_factor  = 7.5e-4;
*    kkt_near_sol = 1e-1;
}

*****************************************************************
*** Change how the penalty parameters for the modifed
*** L1 merit function are increased.
***
*** [NLPAlgoConfigMamaJama::line_search_method != NONE]
*** [NLPAlgoConfigMamaJama::merit_function_type == MODIFIED_L1_INCR]
***
options_group MeritFuncModifiedL1LargerSteps {

*    after_k_iter                  = 3;
     *** (+int) Number of SQP iterations before considering increasing penalties.
     *** Set to 0 to start at the first iteration.

*    obj_increase_threshold        = 1e-4;
     *** (+-dbl) Consider increasing penalty parameters when the relative
     *** increase in the objective function is greater than this value.
     *** Set to a very large negative number (i.e. -1e+100) to always
     *** allow increasing the penalty parameters.

*    max_pos_penalty_increase      = 1.0;
     *** (+dbl) Ratio the multipliers are allowed to be increased.
     *** Set to a very large number (1e+100) to allow increasing the penalties
     *** to any value if it will help in taking a larger step.  This will
     *** in effect put all of the wieght of the constraints and will force
     *** the algorithm to only minimize the infeasibilities and ignore
     *** optimality.

*    pos_to_neg_penalty_increase   = 1.0;  *** (+dbl)

*    incr_mult_factor              = 1e-4; *** (+dbl)

}

*** End Moocho.opt.NLPAlgoConfigIP