%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% Bibliographical database for the book %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% TRUST REGION METHODS %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% by A.R. Conn, N.I.M. Gould and Ph.L. Toint %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% (SIAM, Philadelphia, 2000) %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%% %%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Copyright: Andrew R. Conn, Nicholas I. M. Gould and Philippe L. Toint % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % This version: 18 II 2001 % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The following works have been cited in the above mentioned book. % Some are general references to background material, while others % are central to the development of the trust-region methods we have % covered. For those references directly relating to trust-region % methods, we have included a short summary of the work's contents. % We have deliberately not included any but the most relevant of the % literally thousands of citations to the Levenberg-Morrison-Marquardt % method. % % We would be delighted to receive any corrections or updates to this list. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % STRING definitions % % Journals @string{AOR = "Annals of Operations Research"} @string{BIT = "BIT"} @string{COAP = "Computational Optimization and Applications"} @string{COMPJ = "Computer Journal"} @string{CSB = "Chinese Science Bulletin"} @string{IJNME = "International Journal on Numerical Methods in Engineering"} @string{IMAJNA = "IMA Journal of Numerical Analysis"} @string{JIMA = "Journal of the Institute of Mathematics and its Applications"} @string{JOTA = "Journal of Optimization Theory and Applications"} @string{JCAM = "Journal of Computational and Applied Mathematics"} @string{JCM = "Journal of Computational Mathematics"} @string{LAA = "Linear Algebra and its Applications"} @string{MC = "Mathematics of Computation"} @string{MOR = "Mathematics of Operations Research"} @string{MP = "Mathematical Programming"} @string{MPA = "Mathematical Programming, Series~A"} @string{MPB = "Mathematical Programming, Series~B"} @string{MPS = "Mathematical Programming Studies"} @string{NUMMATH = "Numerische Mathematik"} @string{OMS = "Optimization Methods and Software"} @string{ORSAC = "ORSA Journal on Computing"} @string{RAIRO-OR = "RAIRO-Recherche Op\'{e}rationnelle---Operations Research"} @string{RAIRO-MM = "RAIRO-Mathematical Modelling and Numerical Analysis---Mod\'{e}lisation Math\'{e}matique et Analyse Num\'{e}rique"} @string{SIADM = "SIAM Journal on Algebraic and Discrete Methods"} @string{SINUM = "SIAM Journal on Numerical Analysis"} @string{SICON = "SIAM Journal on Control and Optimization"} @string{SIOPT = "SIAM Journal on Optimization"} @string{SIMAA = "SIAM Journal on Matrix Analysis and Applications"} @string{SISSC = "SIAM Journal on Scientific and Statistical Computing"} @string{SISC = "SIAM Journal on Scientific Computing"} @string{SIAPM = "SIAM Journal on Applied Mathematics"} @string{SIREV = "SIAM Review"} @string{TOMS = "Transactions of the ACM on Mathematical Software"} % Places @string{ANL = "Argonne National Laboratory"} @string{ANL-ADDRESS = "Argonne, Illinois, USA"} @string{BELLLABS = "Bell Laboratories"} @string{BELLLABS-ADDRESS = "Murray Hill, New Jersey, USA"} @string{CAAM = "Department of Computational and Applied Mathematics, Rice University"} @string{CRPC = "Center for Research on Parallel Computers"} @string{COIMBRA = "Department of Mathematics, University of Coimbra"} @string{COIMBRA-ADDRESS = "Coimbra, Portugal"} @string{DAMTP = "Department of Applied Mathematics and Theoretical Physics, Cambridge University"} @string{DAMTP-ADDRESS = "Cambridge, England"} @string{DUNDEE = "Department of Mathematics, University of Dundee"} @string{DUNDEE-ADDRESS = "Dundee, Scotland"} @string{ICMSEC = "Institute of Computational Mathematics and Scientific/Enginering Computing, Chinese Academy of Sciences"} @string{ICMSEC-ADDRESS = "Beijing, China"} @string{RICE = "Department of Mathematical Sciences, Rice University"} @string{RICE-ADDRESS = "Houston, Texas, USA"} @string{ACRI-CORNELL = "Advanced Computing Research Institute, Cornell Theory Center"} @string{CS-CORNELL = "Department of Computer Science, Cornell University"} @string{CORNELL-ADDRESS = "Ithaca, New York, USA"} @string{FUNDP = "Department of Mathematics, University of Namur"} @string{FUNDP-ADDRESS = "Namur, Belgium"} @string{HAMBURG = "Institute of Applied Mathematics, University of Hamburg"} @string{HAMBURG-ADDRESS = "Hamburg, Germany"} @string{HARWELL = "{AERE} {H}arwell Laboratory"} @string{HARWELL-ADDRESS = "Harwell, Oxfordshire, England"} @string{HATFIELD = "Numerical Optimization Center, Hatfield Polytechnic"} @string{IBMWATSON = "T. J. Watson Research Center"} @string{IBMWATSON-ADDRESS = "Yorktown Heights, NY, USA"} @string{ICASE = "Institute for Computer Applications in Science and Engineering"} @string{ICASE-ADDRESS = "NASA Langley Research Center Hampton, Virginia, USA"} @string{MADISON = "Computer Sciences Department, University of Wisconsin"} @string{MADISON-ADDRESS = "Madison, Wisconsin, USA"} @string{MCCM = "Manchester Centre for Computational Mathematics"} @string{MCCM-ADDRESS = "Manchester, England"} @string{NIST = "Applied and Computational Mathematics Division"} @string{NIST-ADDRESS = "National Institute of Standards and Technology, Gaithersburg, Maryland, USA"} @string{NPL = "National Physical Laboratory"} @string{NPL-ADDRESS = "London, England"} @string{NWU = "Department of Electrical Engineering and Computer Science, Northwestern University"} @string{NWU-ADDRESS = "Evanston, Illinois, USA"} @string{OTC = "Optimization Technology Center, Argonnne National Laboratory"} @string{RAL = "Rutherford Appleton Laboratory"} @string{RAL-ADDRESS = "Chilton, Oxfordshire, England"} @string{STANFORD = "Department of Operations Research, Stanford University"} @string{STANFORD-ADDRESS = "Stanford, California, USA"} @string{MATHWATERLOO = "Faculty of Mathematics, University of Waterloo"} @string{MATHWATERLOO-ADDRESS = "Waterloo, Ontario, Canada"} @string{UNICAMP = "Department of Applied Mathematics, IMECC-UNICAMP"} @string{UNICAMP-ADDRESS = "Campinas, Brasil"} % Publishers @string{ADW = "Addison-Wesley Publishing Company"} @string{ADW-ADDRESS = "Reading, Massachusetts, USA"} @string{AMS = "American Mathematical Society"} @string{AMS-ADDRESS = "Providence, Rhode-Island, USA"} @string{AP = "Academic Press"} @string{AP-ADDRESS = "London"} @string{CUP = "Cambridge University Press"} @string{CUP-ADDRESS = "Cambridge, England"} @string{FREEMAN = "W. H. Freeman and Company"}, "} @string{FREEMAN-ADDRESS = "New York and San Francisco"} @string{KLUWER = "Kluwer Academic Publishers"} @string{KLUWER-ADDRESS = "Dordrecht, The Netherlands"} @string{LONGMAN = "Longman Scientific {\&} Technical"} @string{LONGMAN-ADDRESS = "Harlow, Essex, England"} @string{MACGH = "McGraw-Hill"} @string{MACGH-ADDRESS = "New York, USA"} @string{NH = "North Holland"} @string{NH-ADDRESS = "Amsterdam, The Netherlands"} @string{OUP = "Oxford University Press"} @string{OUP-ADDRESS = "Oxford, England"} @string{PH = "Prentice-Hall"} @string{PH-ADDRESS = "Englewood Cliffs, New Jersey, USA"} @string{WILEY = "J. Wiley and Sons"} @string{WILEY-ADDRESS = "Chichester, England"} @string{SIAM = "SIAM"} @string{SIAM-ADDRESS = "Philadelphia, USA"} @string{SPRINGER = "Springer Verlag"} @string{SPRINGER-ADDRESS = "Heidelberg, Berlin, New York"} @string{WSP = "World Scientific Publishers"} @string{WSP-ADDRESS = "Singapore"} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% REFERENCES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% A %%% @misc{Alex98, author = {N. M. Alexandrov}, title = {A Trust-Region Algorithm for Bilevel Optimization}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998, abstract = {This paper concerns a trust-region method for solving nonlinear bilevel optimization problems. No special assumptions on structure, such as separability or convexity, are made. Approaches to bilevel optimization usually fall into one of three categories. One set converts the bilevel program into a single-level NLP by using the KKT conditions of the lower-level system as constraints for the upper-level problem. Another approximates problems of both levels by series of unconstrained problems resulting in double-penalty methods. The third category consists of descent methods on the upper level problem with the use of some gradient information from the lower-level problem. The first two approaches suffer from a number of difficulties. The algorithm presented here is related to those of the third category and it can be extended to multilevel optimization. The development of the algorithm is motivated by applications in multidisciplinary optimization. The paper presents the algorithm, its analysis, and a number of numerical examples. It also contrasts the algorithm with a representative example of the first class---the collaborative optimization algorithm.}, summary = {A trust-region method for solving nonlinear bilevel optimization problems is presented without any special assumptions on structure, such as separability or convexity. The algorithm is related to descent methods on the upper level problem which use some gradient information from the lower-level problem, and can be extended to multilevel optimization. It is motivated by applications in multidisciplinary optimization. The algorithm, its analysis, and a number of numerical examples are discussed and contrasted with the collaborative optimization method.}} @techreport{AlexDenn94a, author = {N. M. Alexandrov and J. E. Dennis}, title = {Multilevel algorithms for nonlinear optimization}, institution = ICASE, address = ICASE-ADDRESS, number = {94-53}, year = 1994, abstract = {Multidisciplinary design optimization (MDO) gives rise to nonlinear optimization problems characterized by a large number of constraints that naturally occur in blocks. We propose a class of multilevel optimization methods motivated by the structure and number of constraints and by the expense of the derivative computations for MDO. The algorithms are an extension to the nonlinear programming problem of the successful class of local \citebb{Brow69}--\citebb{Bren73} algorithms for nonlinear equations. Our extensions allow the user to partition constraints into arbitrary blocks to fit the application, and they separately process each block and the objective function, restricted to certain subspaces. The methods use trust regions as a globalization strategy, and they have been shown to be globally convergent under reasonable assumptions. The multilevel algorithms can be applied to all classes of MDO formulations. Multilevel algorithms for solving nonlinear systems of equations are a special case of the multilevel optimization methods. In this case, they can be viewed as a trust-region globalization of the Brown-Brent class.}, summary = {Multidisciplinary design optimization (MDO) gives rise to nonlinear optimization problems with a large number of constraints that occur in blocks. A class of multilevel optimization methods motivated by their structure and number and by the expense of the derivative computations for MDO is proposed. The algorithms are an extension of the local \citebb{Brow69}--\citebb{Bren73} algorithms for nonlinear equations. They allow the user to partition constraints into arbitrary blocks to fit the application, and they separately process each block and the objective function, restricted to certain subspaces. The methods use trust regions as a globalization strategy, and are globally convergent.}} @techreport{AlexDenn94b, author = {N. M. Alexandrov and J. E. Dennis}, title = {Algorithms for bilevel optimization}, institution = ICASE, address = ICASE-ADDRESS, number = {94--77}, year = 1994, abstract = {General multilevel nonlinear optimization problems arise in design of complex systems and can be used as a means of regularization for multicriteria optimization problems. Here for clarity in displaying our ideas, we restrict ourselves to general bilevel optimization problems, and we present two solution approaches. Both approaches use a trust-region globalization strategy, and they can be easily extended to handle the general multilevel problem. We make no convexity assumptions, but we do assume that the problem has a nondegenerate feasible set. We consider necessary optimality conditions for the bilevel problem formulations and discuss results that can be extended to obtain multilevel optimization formulations with constraints at each level.}, summary = {Two solution approaches are presented for general bilevel optimization problems. Both use a trust-region globalization strategy, and can be extended to handle the general multilevel problem. No convexity assumptions are made, but it is assumed that the problem has a non-degenerate feasible set. Necessary optimality conditions for the bilevel problem formulations are considered and the results extended to obtain multilevel optimization formulations with constraints at each level.}} @techreport{AlexDenn99, author = {N. M. Alexandrov and J. E. Dennis}, title = {A Class of General Trust-Region Multilevel Algorithms for Nonlinear Constrained Optimization: Global Convergence Analysis}, institution = CRPC, address = RICE-ADDRESS, number = {TR99786-S}, year = 1999, abstract = {This paper presents a braod class of trust-region multilevel algorithms for solving large, nonlinear, equality constrained problems, as well as a global convergence analysis for the class. The work is motivated by engineering optimization problems with naturally occurring, densely or fully-coupled subproblem structure. The constraints are partitioned into blocks, the number and composition of which are determined by the application. At every iteration, a multilevel algorithm minimizes models of the reduced constraint blocks, followed by a reduced model of the objective function, in a sequence of subproblems, each of which yields a substep. The trial step is the sum of these substeps. The salient feature of the multilevel class is that there is no prescription on how the substeps must be computed. Instead, each substep is required to satisfy mild sufficient decrease and boundedness conditions on the restricted model that it minimizes. Within a single trial step computation, all substeps can be computed by different methods appropriate to the nature of each subproblem. This feature is important for the applications of interest in that it allows for a wide variety of step-choice rules. The trial step is evaluated via one of two merit functions that take into account the autonomy of subproblem processing. The multilevel procedure presented in this work is sequential. If a problem exhibits full or partial separability, or if separability is induced by introducing auxiliary variables, then the multilevel algorithms can easily be stated in parallel form. However, since this work is devaoted to analysis, we consider the most general case, that of a fully coupled problem.}, summary = {A globally convergent class of trust-region multilevel algorithms for solving equality constrained problems is presented, motivated by engineering optimization problems with densely subproblem structure. The constraints are partitioned into blocks. At every iteration, a multilevel algorithm minimizes models of the reduced constraint blocks, followed by a reduced model of the objective function, each yielding a substep. The trial step is the sum of these substeps. Each substep is required to satisfy sufficient decrease and boundedness conditions on its restricted model and can be computed by a specialized method. The trial step is evaluated via one of two merit functions that take into account the autonomy of subproblem processing.}} @article{AlexDennLewiTorc98, author = {N. M. Alexandrov and J. E. Dennis and R. M. Lewis and V. Torczon}, title = {A Trust Region Framework for Managing the Use of Approximation Models}, journal = {Structural Optimization}, volume = 15, number = 1, pages = {16--23}, year = 1998, abstract = {This paper presents an analytically robust, globally convergent approach to managing the use of approximation models of various fidelity in optimization. By robust global behavior we mean the mathematical assurance that the iterates produced by the optimization algorithm, started at an arbitrary initial iterate, will converge to a stationary point or local optimizer for the original problem. The approach we present is based on the trust region idea from nonlinear programming and is shown to be provably convergent to a solution of the original high-fidelity problem. The proposed method for managing approximations in engineering optimization suggests ways to decide when the fidelity, and thus the cost, of the approximations might be fruitfully increased or decreased in the course of the optimization iterations. The approach is quite general. We make no assumptions on the structure of the original problem, in particular, no assumptions of convexity and separability, and place only mild requirements on the approximations. The approximations used in the framework can be of any nature appropriate to an application; for instance, they can be represented by analyses, simulations, or simple algebraic models. This paper introduces the approach and outlines the convergence analysis.}, summary = {A robust, globally convergent approach to using approximation models of various fidelity is presented. It is based on trust regions and converges to a solution of the original high-fidelity problem. The method suggests ways to decide when the fidelity, and thus the cost, of the approximations might be altered in the course of the iterations. No assumptions on the structure of the original problem, such as convexity and separability, are made, and the requirements on the approximations are mild. These can be of any nature appropriate to an application; for instance, they can be represented by analyses, simulations, or simple algebraic models.}} @article{Aliz95, author = {F. Alizadeh}, title = {Interior point methods in semidefinite programming with applications to combinatorial optimization}, journal = SIOPT, volume = 5, number = 1, pages = {13--51}, year = 1995} @inproceedings{Alle95, author = {D. M. Allen}, title = {Tailoring nonlinear least squares algorithms for the analysis of compartment models}, booktitle = {Computationally Intensive Statistical Methods. Proceedings of the 26th Symposium on the Interface, Fairfax Station, VA, USA}, editor = {J. Sall and A. Lehman}, volume = 26, pages = {533--535}, year = 1995, abstract = {Compartment models are widely used in pharmacokinetics. Our objective is to fit compartment models to data. General numerical optimization methods frequently perform poorly for this purpose. A good book on numerical optimization, such as by J.E. Dennis and R.B. Schnabel, describes multiple techniques and discusses the advantages and disadvantages of each. In order to implement these methods in a software product, one must make a number of decisions. For example, should a line search method or a trust region method be used? How should variables on widely different scales be handled? In general, these are questions without clear-cut answers. Compartment models are defined by linear differential equations. Consequently, compartment models have a particular structure. We have tailored general optimization methods to exploit this structure. Through study and experimentation we have found workable answers to the questions posed above.}, summary = {Compartment models are defined by linear differential equations and are widely used in pharmacokinetics. It is shown how general optimization methods, including linesearch and trust-region methods can be tailored to exploit the special structure of such models.}} @article{AllgBohmPotrRhei86, author = {E. L. Allgower and K. B\"{o}hmer and F. A. Potra and W. C. Rheinboldt}, title = {A mesh-independence principle for operator equations and their discretizations}, journal = SINUM, volume = 23, number = 1, pages = {160--169}, year = 1986} @article{Amay85, author = {J. Amaya}, title = {On the convergence of curvilinear search algorithms in unconstrained optimization}, journal = {Operations Research Letters}, volume = 4, number = 1, pages = {31--34}, year = 1985, abstract = {The purpose of this paper is to unify the conditions under which curvilinear algorithms for unconstrained optimization converge. Particularly, two gradient path approximation algorithms and a trust-region curvilinear algorithm are examined in this context.}, summary = {The conditions under which curvilinear algorithms for unconstrained optimization converge are unified. Two gradient path approximation algorithms and a trust-region curvilinear algorithm are examined in this light.}} @book{AndeBaiBiscDemmDongDuCrGreeHammMcKeOstrSore95, author = {E. Anderson and Z. Bai and C. Bischof and J. Demmel and J. J. Dongarra and J. DuCroz and A. Greenbaum and S. Hammarling and A. McKenney and S. Ostrouchov and D. C. Sorensen}, title = {LAPACK Users' Guide}, publisher = SIAM, address = SIAM-ADDRESS, edition = {second}, year = 1995} @inproceedings{AndeGondMeszXu96, author = {E. D. Andersen and J. Gondzio and C. M\'{e}sz\'{a}ros and X. Xu}, title = {Implementation of interior point methods for large scale linear programming}, crossref = {Terl96}, pages = {189--252}} @article{AndrVice99, author = {D. A. Andrews and L. N. Vicente}, title = {Characterization of the Smoothness and Curvature of a Marginal Function for Trust-Region Problem}, journal = MP, volume = 84, number = 1, pages = {123--137}, year = 1999, abstract = {This paper studies the smoothness and curvature of a marginal function for a trust-region problem. In this problem, a quadratic function is minimized over an ellipsoid. The marginal function considered is obtained by perturbing the trust radius, i.e. by changing the size of the ellipsoid constraint. The values of the marginal function and of its first and second derivatives are explicitly calculated in all possible scenarios. A complete study of the smoothness and curvature of this marginal function is given. The main motivation for this work arises from an application in statistics.}, summary = {The smoothness and curvature of a marginal function for a scaled $\ell_2$ trust-region problem are studied. The marginal function is obtained by perturbing the trust radius. The values of the marginal function and of its first and second derivatives are explicitly calculated in all possible cases. A complete study of the smoothness and curvature is given. The work is motivated by an application in statistics.}} @techreport{AntoGior96, author = {F. Antonio and G. Giorgio}, title = {A Bundle type Dual-ascent Approach to Linear Multi-Commodity Min Cost Flow Problems}, institution = {Pisa University}, address = {Italy}, number = {TR96-01}, year = 1996, abstract = {We present a cost decomposition approach to Linear Multicommodity Min Cost Flow problems, based on dualizing the mutual capacity constraints: the resulting Lagrangian Dual is solved by means of a new, specialized Bundle type-dual ascent algorithm. Although decomposition approaches are generally believed not to be competitive, even for the solution of large-scale network structured problems, we present evidence based on extensive computational comparisons that a careful implementation of a decomposition algorithm can outperform several other approaches, especially on problems where the number of commodities is "large" w.r.t. the size of the graph. Our specialized Bundle algorithm is characterised by a new heuristic for the trust region parameter handling, and embeds a custom, fast Quadratic Programming solver that permits the implementation of a Lagrangian variables generation strategy. The Lagrangian Relaxation solver is capable of exploiting the structural properties of the single-commodity Min Cost Flow subproblems to avoid using a "generic" MCF solver whenever it is possible. The proposed approach can be easily extended to handle extra constraints or variants such as the Nonsimultaneous Multicommodity problem. In our computational experience, we also studied the impact on the relative efficiencies of the different approaches tested of some characteristics, such as the number of commodities w.r.t. the total size of the problem.}, summary = {A cost decomposition approach to Linear Multicommodity Min Cost Flow problems, based on dualizing the mutual capacity constraints, is presented: the resulting Lagrangian Dual is solved by means of a specialized Bundle type-dual ascent algorithm. Extensive computational comparisons show that a careful implementation of a decomposition algorithm can outperform several other approaches, especially on problems where the number of commodities is ``large'' with respect to the size of the graph. The specialized Bundle algorithm is characterised by a heuristic for handling the trust region parameter, and embeds a specialized, efficient quadratic programming solver that permits the implementation of a Lagrangian variables generation strategy. Numerical tests illustrate the relative efficiencies of the different approaches.}} @article{AriyLau92, author = {K. A. Ariyawansa and D. T. M. Lau}, title = {On the Updating Scheme in a Class of Collinear Scaling Algorithms for Sparse Minimization}, journal = JOTA, volume = 75, number = 1, pages = {183--193}, year = 1992, abstract = {Sorensen has proposed a class of algorithms for sparse unconstrained minimization where the sparsity pattern of the Cholesky factors of the Hessian is known. His updates at each iteration depend on the choice of a vector, and in this reference the question of choosing this vector is essentially left open. In this note, we propose a variational problem whose solution may be used to choose that vector. The major part of the computation of a solution of this variational problem is similar to the computation of a trust-region step in unconstrained optimization. Therefore, well-developed techniques available for the latter problem can be used to compute this vector and to perform the updating.}, summary = {The techniques for solving the trust-region subproblem are used for solving a variational problem arising in the updating of sparse Hessian approximations for large-scale unconstrained optimization.}} @techreport{AshcGrimLewi95, author = {C. Ashcraft and R. G. Grimes and J. G. Lewis}, title = {Accurate Symmetric Indefinite Linear Equation Solvers}, institution = {Boeing Computer Services}, address = {Seattle, Washington, USA}, year = 1995} @mastersthesis{Auer93, author = {G. Auer}, title = {Numerische {B}ehandlung von {T}rust {R}egion {P}roblemen}, school = {Technical University of Graz}, address = {Graz, Austria}, year = 1993} @article{Axel72, author = {O. Axelsson}, title = {A generalized {SSOR} method}, journal = BIT, volume = 12, pages = {443--467}, year = 1972} @book{Axel96, author = {O. Axelsson}, title = {Iterative Solution Methods}, publisher = CUP, address = CUP-ADDRESS, year = 1996} %%% B %%% @inproceedings{BandChenMads88, author = {J. W. Bandler and S. H. Chen and K. Madsen}, title = {An algorithm for one-sided $\ell_1$ optimization with application to circuit design centering}, booktitle = {Proceedings 1988 IEEE International Symposium on Circuits and Systems}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 2, pages = {1795--1798}, year = {1988}, abstract = {A highly efficient algorithm for one-sided nonlinear $\ell_1$ optimization combines a trust region Gauss-Newton method and a quasi-Newton method. The proposed method is used as an integral part of an approach to design centering and yield enhancement.}, summary = {A highly efficient algorithm for one-sided nonlinear $\ell_1$ optimization combines a trust-region Gauss-Newton method and a quasi-Newton method. The proposed method is used as an integral part of an approach to design centering and yield enhancement.}} @article{Bann94, author = {T. Bannert}, title = {A trust region algorithm for nonsmooth optimization}, journal = MP, volume = 67, number = 2, pages = {247--264}, year = 1994, abstract = {A trust region algorithm is proposed for minimizing the nonsmooth composite function $F(x)=h(f(x))$, where $f$ is smooth and $h$ is convex. The algorithm employs a smoothing function, which is closely related to \citebb{Flet70b}'s exact differentiable penalty functions. Global and local convergence results are given, considering convergence to a strongly unique minimizer and to a minimizer satisfying second-order sufficiency conditions.}, summary = {A trust-region algorithm is proposed for minimizing the non-smooth composite function $F(x)=h(f(x))$, where $f$ is smooth and $h$ is convex. It uses a smoothing function closely related to \citebb{Flet70b}'s exact differentiable penalty function. Global and local convergence to a strongly unique minimizer and to a minimizer satisfying second-order sufficiency conditions is considered.}} @article{BakrBandBiemChenMads98, author = {M. H. Bakr and J. W. Bandler and R. M. Biemacki and S. H. Chen and K. Madsen}, title = {A trust region aggressive space mapping algorithm for {EM} optimization}, journal = {IEEE Transactions on Microwave Theory and Techniques}, volume = 46, number = {12, part~2}, pages = {2412--2425}, year = 1998, abstract = {A new robust algorithm for EM optimization of microwave circuits is presented. The algorithm integrates a trust region methodology with aggressive space mapping (ASM). A new automated multipoint parameter extraction process is implemented. EM optimization of a double-folded stub filter and of an HTS filter illustrate our new results.}, summary = {A new robust algorithm for EM optimization of microwave circuits is presented. The algorithm integrates a trust region methodology with aggressive space mapping (ASM). A new automated multipoint parameter extraction process is implemented. EM optimization of a double-folded stub filter and of an HTS filter illustrate our new results.}} @article{BakrBandGeorMads99, author = {M. H. Bakr and J. W. Bandler and N. Georgieva and K. Madsen}, title = {A hybrid aggressive space mapping algorithm for {EM} optimization}, journal = {IEEE MTT-S International Microwave Symposium Digest}, volume = 1, pages = {265--268}, year = 1999, abstract = {We present a novel, hybrid aggressive space mapping (HASM) optimization algorithm. HASM is a hybrid approach exploiting both the trust region aggressive space mapping (TRASM) algorithm and direct optimization. It does not assume that the final space-mapped design is the true optimal design and is robust against severe misalignment between the coarse and the fine models. The algorithm is based on a novel lemma that enables smooth switching from the TRASM optimization to direct optimization and vice versa. The new algorithm has been tested on several microwave filters and transformers.}, summary = {A hybrid aggressive space mapping (HASM) optimization method, combining the trust region aggressive space mapping (TRASM) algorithm and direct optimization techiniques, is given. No assumption that the final space-mapped design is the true optimal design is made, and the method is robust against severe misalignment between coarse and fine models. The algorithm is based on theoretical results that enable smooth switching from the TRASM optimization to direct optimization and vice versa. The algorithm is tested on several microwave filters and transformers.}} @article{BaraSand92, author = {R. Barakat and B. H. Sandler}, title = {Determination of the wave-front aberration function from measured values of the point-spread function---A 2-dimensional phase retrieval problem}, journal = {Journal of the Optical Society of America A-Optics Image Science and Vision}, volume = 9, number = 10, pages = {1715--1723}, year = 1992, abstract = {We outline a method for the determination of the unknown wave-front aberration function of an optical system from noisy measurements of the corresponding point-spread function. The problem is cast as a nonlinear unconstrained minimization problem, and trust region techniques are employed for its solution in conjunction with analytic evaluations of the Jacobian and Hessian matrices governing slope and curvature information. Some illustrative numerical results are presented and discussed.}, summary = {A method for the determination of the unknown wave-front aberration function of an optical system from noisy measurements of the corresponding point-spread function is considered. The problem is cast as a unconstrained nonlinear minimization problem. Trust-region techniques are employed for its solution, using analytic expressions of the Jacobian and Hessian matrices. Illustrative numerical results are discussed.}} @article{BarlTora95, author = {J. L. Barlow and G. Toraldo}, title = {The effect of diagonal scaling on projected gradient methods for bound constrained quadratic programming problems}, journal = OMS, volume = 5, number = 3, pages = {235--245}, year = 1995} @inproceedings{BartGoluSaun70, author = {R. H. Bartels and G. H. Golub and M. A. Saunders}, title = {Numerical techniques in mathematical programming}, crossref = {RoseMangRitt70}, pages = {123--176}} @article{BazaGood82, author = {M. S. Bazaraa and J. J. Goode}, title = {Sufficient conditions for a globally exact penalty-function without convexity}, journal = MPS, volume = 19, pages = {1--15}, year = 1982} @incollection{Beal67, author = {E. M. L. Beale}, title = {Numerical Methods}, booktitle = {Nonlinear programming}, editor = {J. Abadie}, publisher = NH, address = NH-ADDRESS, pages = {135--205}, year = 1967} @article{Bell90, author = {B. M. Bell}, title = {Global convergence of a semi-infinite optimization method}, journal = {Applied Mathematics and Optimization}, volume = 21, pages = {69--88}, year = 1990, abstract = {A new algorithm for minimizing locally Lipschitz functions using approximate function values is presented. It yields a method for minimizing semi-infinite exact penalty functions that parallels the trust-region methods used in composite nondifferentiable optimization. A finite method for approximating a semi-infinite exact penalty function is developed. A uniform implicit function theorem is established during this development. An implementation and test results for the approximate penalty function are included.}, summary = {An algorithm for minimizing locally Lipschitz functions using approximate function values is presented. It yields a method for minimizing semi-infinite exact penalty functions that parallels the trust-region methods used in composite non-differentiable optimization. A finite method for approximating a semi-infinite exact penalty function is developed. A uniform implicit function theorem is established during this development. An implementation and test results for the approximate penalty function are included.}} @article{BellRoga95, author = {M. Bellare and P. Rogaway}, title = {The complexity of approximating a nonlinear program}, journal = MP, volume = 69, number = 3, pages = {429--441}, year = 1995} @article{BenTTebo96, author = {A. Ben{-}Tal and M. Teboulle}, title = {Hidden convexity in some nonconvex quadratically constrained quadratic-programming}, journal = MP, volume = 72, number = 1, pages = {51--63}, year = 1996, abstract = {We consider the problem of minimizing an indefinite quadratic objective function subject to two-sided indefinite quadratic constraints. Under a suitable simultaneous diagonalization assumption (which trivially holds for a trust-region type problems), we prove that the original problem is equivalent to a convex minimization problem with simple linear constraints. We then consider a special problem of minimizing a concave quadratic function subject to finitely many convex quadratic constraints, which is also shown to be equivalent to a minimax convex problem. In both cases we derive the explicit nonlinear transformations which allow for recovering the optimal solution of the nonconvex problems via their equivalent convex counterparts. Special cases and applications are also discussed. We outline interior-point polynomial-time algorithms for the solution of the equivalent convex programs.}, summary = {The minimization of an indefinite quadratic objective function subject to two-sided indefinite quadratic constraints is considered. Under a simultaneous diagonalization assumption (which trivially holds for a trust-region type problems), it is shown that the original problem is equivalent to a convex minimization problem with simple linear constraints. The special problem of minimizing a concave quadratic function subject to finitely many convex quadratic constraints is then considered and shown to be equivalent to a minmax convex problem. In both cases, the explicit nonlinear transformations which allow the recovery of the optimal solution of the non-convex problems via their equivalent convex counterparts is derived. Interior-point polynomial-time algorithms for the solution of the equivalent convex programs are outlined.}} @article{BenTZibu97, author = {A. Ben{-}Tal and M. Zibulevsky}, title = {Penalty/Barrier Multiplier Methods for Convex Programming Problems}, journal = SIOPT, volume = 7, number = 2, pages = {347--366}, year = 1997} @article{BenTNemi97, author = {A. Ben{-}Tal and A. Nemirovskii}, title = {Robust Truss Topology Design via Semidefinite Programming}, journal = SIOPT, volume = 7, number = 4, pages = {991--1016}, year = 1997} @article{BereCler97, author = {Y. Bereaux and J. R. Clermont}, title = {Numerical simulation of two- and three-dimensional complex flows of viscoelastic fluids using the stream-tube method}, journal = {Mathematics and Computers in Simulation}, volume = 44, number = 4, pages = {387--400}, year = 1997, abstract = {The present paper examines the stream-tube method in two-and three-dimensional duct flows. The analysis uses the concept of stream-tubes in a mapped computational domain of the physical domain, where streamlines are parallel and straight. The primary unknown of the problem includes the transformation between the two domains, together with the pressure. Mass conservation is automatically verified by the formulation. Memory-integral constitutive equations may be considered without the particle-tracking problem. The method is applied to flows in contractions and a three-dimensional flow involving a threefold rotational symmetry. Viscous and elastic liquids involving memory-integral equations are investigated in the flow simulations. The discretized schemes for the unknowns are presented and the relevant equations solved by using optimization procedures such as the Levenberg-Marquardt and trust-region methods.}, summary = {The stream-tube method in two-and three-dimensional duct flows is analyzed using the concept of stream-tubes in a mapped computational domain of the physical domain, where streamlines are parallel and straight. The primary unknown of the problem includes the transformation between the two domains and the pressure. Mass conservation is automatically verified by the formulation. Memory-integral constitutive equations may be considered without the particle-tracking problem. The method is applied to flows in contractions and a three-dimensional flow involving a threefold rotational symmetry. Viscous and elastic liquids involving memory-integral equations are investigated.. The discretized schemes are presented and the relevant equations solved by using optimization procedures such as the Levenberg-Morrison-Marquardt and trust-region methods.}} @article{Bert76, author = {D. P. Bertsekas}, title = {On the {G}oldstein-{L}evitin-{P}oljak gradient projection method}, journal = {IEEE Transactions on Automatic Control}, volume = {AC-21}, pages = {174--184}, year = 1976} @article{Bert82a, author = {D. P. Bertsekas}, title = {Projected {N}ewton Methods for Optimization Problems with Simple Constraints}, journal = SICON, volume = 20, number = 2, pages = {221--246}, year = 1982} @book{Bert82b, author = {D. P. Bertsekas}, title = {Constrained Optimization and {L}agrange Multiplier Methods}, publisher = AP, address = AP-ADDRESS, year = 1982} @article{Best84, author = {M. J. Best}, title = {Equivalence of some quadratic-programming algorithms}, journal = MP, volume = 30, number = 1, pages = {71--87}, year = 1984} @article{BestChak90, author = {M. J. Best and N. Chakravarti}, title = {Active set algorithms for isotonic regression; a unifying framework}, journal = MP, volume = 47, pages = {425--439}, year = 1990} @techreport{BestRitt76, author = {M. J. Best and K. Ritter}, title = {An effective algorithm for quadratic minimization problems}, institution = {University of Wisconsin}, address = {Madison, Wisconsin, U.S.A.}, type = {Technical report}, number = 1691, year = 1976} @article{BiegNoceSchm95, author = {L. T. Biegler and J. Nocedal and C. Schmid}, title = {A Reduced {H}essian Method for Large-Scale Constrained Optimization}, journal = SIOPT, volume = 5, number = 2, pages = {314--347}, year = 1995} @misc{BielGome98, author = {R. H. Bielschowsky and F. A. M. Gomes}, title = {Dynamical Control of Infeasibility in Nonlinearly Constrained Optimization}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998, abstract = {We present a new algorithm for nonconvex nonlinear programming problems with equality constraints in the form $\min f(x)$ subject to $h(x)=0$. The algorithm keeps some characteristics of feasible point methods, but is concerned with flexibilizing the equality restrictions. Essentially, the idea is to avoid both forcing the iterates to stay too near the feasible set and using a merit function. Each major iteration of the algorithm is divided in two phases. In the first, we seek a vertical step $d_v$ such that the infeasibility $x_c=x_k+d_v$ stays controlled, in the sense that $x_c$ satisfies $\|h(x_c)\|=O(\|g_p(x_c)\|)$, where $g_p(x_c)$ is the orthogonal projection of $\nabla f(x_c)$ in the tangent space to the restrictions. Frequently, $x_k$ satisfies this condition, so we can make $x_c=x_k$. The aim of the second phase is to find a horizontal step $d_h$ that reduces the Lagrangian and stays approximately tangent to the constraints. This is done by confining $x_c+d_h$ to a cylinder with radius $r=O(\|g_p(x_c)\|)$ around $h(x)=0$. The new algorithm is well-suited for large-scale problems. Implementation details, as well as some preliminary numerical results based on problems from the CUTE collection, are presented. We also prove global convergence results.}, summary = {An algorithm is presented for solving non-convex problems of the form $\min f(x)$ subject to $h(x)=0$. At each major iteration, one first seeks a normal step $d_v$ such that $x_c$ satisfies $\|h(x_c)\|=O(\|g_p(x_c)\|)$, where $g_p(x_c)$ is the orthogonal projection of $\nabla f(x_c)$ on the tangent space. A tangential step $d_h$ is then computed that reduces the Lagrangian and stays approximately tangent to the constraints. This is done by confining $x_c+d_h$ to a cylinder with radius $r=O(\|g_p(x_c)\|)$ around $h(x)=0$. Implementation details, as well as preliminary numerical results on problems from the {\sf CUTE} collection, are presented. Global convergence results are also proved.}} @article{Bier94, author = {M. Bierlaire}, title = {{HieLoW}: un logiciel d'estimation de mod\`eles Logit embo\^\i t\'es}, journal = {Cahiers du MET}, volume = 2, pages = {29--43}, month = {Novembre}, year = 1994} @techreport{Bier95a, author = {M. Bierlaire}, title = {A robust algorithm for the simultaneous estimation of hierarchical logit models}, institution = FUNDP, address = FUNDP-ADDRESS, type = {GRT Report}, number = {95/3}, year = 1995, abstract = {Estimating simultaneous hierarchical logit models is conditional to the availability of suitable algorithms. Powerful mathematical programs are necessary to maximize the associated non-linear, non-convex, log-likelihood function. Even if classical methods (e.g. Newton-Raphson) can be adapted for relatively simple cases, the need of an efficient and robust algorithm is justified to enable practioners to consider a wider class of models. The purpose of this paper is to analyze and to adapt to this context methodologies available in the optimization literature. An algorithm is proposed based on two major concepts from non-linear programming : \emph{a trust-region method}, that ensures robustness and global convergence, and \emph{a conjugate gradients iteration}, that can be used to solve the quadratic subproblems arising in the estimation process described in this paper. Numerical experiments are finally presented that indicate the power of the proposed algorithm and associated software.}, summary = {A trust-region method is proposed for the estimation of simultaneous hierarchical logit models, where the subproblem is solved by a truncated conjugate-gradients technique. Numerical experiments indicate the power of the proposed algorithm and associated software.}} @inproceedings{Bier98, author = {M. Bierlaire}, title = {Discrete Choice Models}, crossref = {LabbLapoTancToin98}, pages = {203--227}} @article{BierToin95, author = {M. Bierlaire and Ph. L. Toint}, title = {{MEUSE}: an Origin-Destination Estimator That Exploits Structure}, journal = {Transportation Research B}, volume = 29, number = 1, pages = {47--60}, year = 1995} @article{BierToinTuyt91, author = {M. Bierlaire and Ph. L. Toint and D. Tuyttens}, title = {On iterative algorithms for linear least squares problems with bound constraints}, journal = LAA, volume = 143, pages = {111--143}, year = 1991, abstract = {Three new iterative methods for the solution of the linear least squares problem with bound constraints are presented and their performance analyzed. The first is a modification of a method proposed by \citebb{Lots84}, while the two others are characterized by a technique allowing for fast active set changes, resulting in noticeable improvements in the speed with which constraints active at the solution are identified. The numerical efficiency of these algorithms is studied, with particular emphasis on the dependence on the starting point and the use of preconditioning for ill-conditioned problems.}, summary = {Three iterative methods for the solution of the linear least-squares problem with bound constraints are presented and their performance analyzed. The first is a modification of a method proposed by \citebb{Lots84}, while the others two allow fast active set changes. The numerical efficiency of these algorithms is studied.}} @inproceedings{Bigg72, author = {M. C. Biggs}, title = {Constrained Minimization Using Recursive Equality Quadratic Programming}, crossref = {Loot72}, pages = {411--428}} @article{Bigg87, author = {M. C. Bartholomew{-}Biggs}, title = {Recursive quadratic-programming methods based on the augmented {L}agrangian}, journal = MPS, volume = 31, pages = {21--41}, year = 1987} @article{BillFerr97, author = {S. C. Billups and M. C. Ferris}, title = {{QPCOMP}: A Quadratic Program Based Solver for Mixed Complementarity Problems}, journal = MP, volume = 76, number = 3, pages = {533--562}, year = 1997} @book{Bjor96, author = {{\AA}. Bj{\"o}rck}, title = {Numerical Methods for Least Squares Problems}, publisher = SIAM, address = SIAM-ADDRESS, year = 1996} @article{Bock96, author = {C. Bockmann}, title = {Curve-fitting and identification of physical spectra}, journal = JCAM, volume = 70, number = 2, pages = {207--224}, year = 1996, abstract = {A modification of the trust-region Gauss-Newton method for the identification of physical spectra is described and analysed. Local convergence results are presented.}, summary = {A modification of the trust-region Gauss-Newton method for the identification of physical spectra is described and analysed. Local convergence results are presented.}} @inproceedings{BockSchlSchu95, author = {H. G. Bock and J. P. Schl\"{o}der and V. H. Schulz}, title = {Numerik gro\ss er {D}ifferentiell-{A}lgebraischer {G}leichungen---{S}imulation und {O}ptimierung}, booktitle = {Proce\ss simulation}, editor = {H. Schuler}, publisher = {VCH Verlaggesellschaft}, address = {Weinheim, Germany}, pages = {35--80}, year = 1995} @article{Bofi95, author = {J. M. Bofill}, title = {A Conjugate-Gradient Algorithm with a Trust Region for Molecular-Geometry Optimization}, journal = {Journal of Molecular Modeling}, volume = 1, number = 1, pages = {11--17}, year = 1995, abstract = {An algorithm is presented for the optimization of molecular geometries and general non-quadratic functions using the nonlinear conjugate gradient method with a restricted step and a restart procedure. The algorithm only requires the evaluation of the energy function and its gradient and less memory storage is needed than for other conjugate gradient algorithms. Some numerical results are also presented and the efficiency and behaviour of the algorithm is compared with the standard conjugate gradient method. On the other hand we present comparisons of both conjugate gradient and variable metric methods with and without the trust-region technique. One of the main conclusions of the present work is that a trust region always improves the convergence of an optimization method. A sketch of the algorithm is also given.}, summary = {An algorithm is presented for the optimization of molecular geometries and general functions using the nonlinear conjugate-gradient method with a restricted step and a restart procedure. The algorithm requires less memory storage than other conjugate-gradient algorithms. Numerical results are presented, and the efficiency of the algorithm is compared with the standard conjugate-gradient method. A comparison of both conjugate-gradient and variable-metric methods with and without the trust-region technique is made. It is concluded that a trust region always improves the convergence. A sketch of the algorithm is given.}} @article{BoggByrdSchn87, author = {P. T. Boggs and R. H. Byrd and R. B. Schnabel}, title = {A stable and efficient algorithm for nonlinear orthogonal distance regression}, journal = SISSC, volume = 8, number = 6, pages = {1052--1078}, year = 1987, abstract = {One of the most widely used methodologies in scientific and engineering research is the fitting of equations to data by least squares. In cases where significant observation errors exist in the independent variables as well as the dependent variables, however, the ordinary least squares (OLS) approach, where all the error are attributed to the dependent variable, is often inappropriate. An alternate approach, suggested by several researchers, involves minimizing the sum of squared orthogonal distances between each data point and the curve described by the model equation. We refer to this as orthogonal distance regression (ODR). This paper describes a method for solving the orthogonal distance regression problem that is a direct analog of the trust region Levenberg-Marquardt algorithm. The number of unknowns involved is the number of model parameters plus the number of data points, often a very large number. By exploiting sparsity, however, our algorithm has a computational effort per step which is of the same order as required for the Levenberg-Marquardt method for ordinary least squares. We prove our algorithm to be globally and locally convergent, and perform computational tests that illustrate some differences between ODR and OLS.}, summary = {A method for solving the orthogonal distance regression problem is described that is an analog of the trust-region Levenberg-Morrison-Marquardt algorithm. The number of unknowns involved is the number of model parameters plus the number of data points, often a very large number. By exploiting sparsity, the computational effort per step is of the same order as that required for the Levenberg-Morrison-Marquardt method for ordinary least-squares. The algorithm is proved to be globally and locally convergent. Computational tests illustrate differences between orthogonal distance regression and ordinary least-squares.}} @article{BoggDonaByrdSchn89, author = {P. T. Boggs and J. R. Donaldson and R. H. Byrd and R. B. Schnabel}, title = {{ORDPACK} software for weighted orthogonal distance regression}, journal = TOMS, volume = 15, number = 4, pages = {348--364}, year = 1989, abstract = {In this paper, we describe ORDPACK, a software package for the weighted orthogonal distance regression problem. This software is an implementation of the algorithm described in \citebb{BoggByrdSchn87} for finding the parameters that minimize the sum of the squared weighted orthogonal distances from a set of observations to a curve or surface determined by the parameters. It can also be used to solve the ordinary nonlinear least squares problem. The weighted orthogonal distance regression procedure implemented is an efficient and stable trust region (Levenberg-Marquardt) procedure that exploits the structure of the problem so that the computational cost per iteration is equal to that for the same type of algorithm applied to the ordinary least squares problem. The package allows a general weighting scheme, provides for finite difference derivatives, and contains extensive error checking and report generating facilities.}, summary = {ORDPACK, a software package for the weighted orthogonal distance regression problem is described. This software is an implementation of the algorithm described in \citebb{BoggByrdSchn87} for finding the parameters that minimize the sum of the squared weighted orthogonal distances from a set of observations to a curve or surface determined by the parameters. It can also be used to solve the ordinary nonlinear least-squares problem. The package allows a general weighting scheme, provides for finite difference derivatives, and contains extensive error checking and report generating facilities.}} @techreport{BoggDomiRoge95, author = {P. T. Boggs and P. D. Domich and J. E. Rogers}, title = {An interior point method for general large-scale quadratic programming problems}, institution = NIST, address = NIST-ADDRESS, type = {Internal report}, number = {NISTIR 5406}, year = 1995} @article{BoggToll89, author = {P. T. Boggs and J. W. Tolle}, title = {A strategy for global convergence in a sequential quadratic programming algorithm}, journal = SINUM, volume = 26, number = 3, pages = {600-623}, year = 1989} @article{BoggToll95, author = {P. T. Boggs and J. W. Tolle}, title = {Sequential quadratic programming}, journal = {Acta Numerica}, volume = 4, pages = {1--51}, year = 1995} @techreport{BoggTollKear91, author = {P. T. Boggs and J. W. Tolle and A. J. Kearsley}, title = {A merit function for inequality constrained nonlinear programming problems}, institution = NIST, address = NIST-ADDRESS, type = {Internal report}, number = {NISTIR 4702}, year = 1991} @techreport{BoggTollKear94, author = {P. T. Boggs and J. W. Tolle and A. J. Kearsley}, title = {A practical algorithm for general large scale nonlinear optimization problems}, institution = NIST, address = NIST-ADDRESS, type = {Internal report}, number = {NISTIR 5407}, year = 1994} @article{BoggKearToll99, author = {P. T. Boggs and A. J. Kearsley and J. W. Tolle}, title = {A Practical Algorithm for General Large Scale Nonlinear Optimization Problems}, journal = SIOPT, volume = 9, number = 3, pages = {755--778}, year = 1999, abstract = {We provide an effective and efficient implementation of a sequential quadratic programming (SQP) algorithm for the general large scale nonlinear programming problem. In this algorithm the quadratic programming subproblems are solved by an interior point method that can be prematurely halted by a trust region constraint. Numerous computational enhancements to improve the numerical performance are presented. These include a dynamic procedure for adjusting the merit function parameter and procedures for adjusting the trust region radius. Numerical results and comparisons are presented. }, summary = {An effective and efficient implementation of a sequential quadratic programming (SQP) algorithm for the general large scale nonlinear programming problem is given. The quadratic programming subproblems are solved by an interior point method, and can be prematurely halted by a trust region constraint. Numerous computational enhancements to improve the numerical performance are presented. These include a dynamic procedure for adjusting the merit function parameter and procedures for adjusting the trust region radius. Numerical results and comparisons are presented.}} @article{BongConnGoulToin95, author = {I. Bongartz and A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {{\sf CUTE}: {C}onstrained and {U}nconstrained {T}esting {E}nvironment}, journal = TOMS, volume = 21, number = 1, pages = {123--160}, year = 1995} %abstract = {The purpose of this paper is to discuss the scope and % functionality of a versatile environment for testing small % and large-scale nonlinear optimization algorithms. % Although many of these facilities were originally produced % by the authors in conjunction with the software package % {\sf LANCELOT}, we believe that they will be useful in % their own right and should be available to researchers for % their development of optimization software. The tools are % available by anonymous ftp from a number of sources and % may, in many cases, be installed automatically. The scope % of a major collection of test problems written in the % standard input format (SIF) used by the {\sf LANCELOT} % software package is described. Recognising that most % software was not written with the SIF in mind, we provide % tools to assist in building an interface between this % input format and other optimization packages. These tools % already provide a link between the SIF and an number of % existing packages, including MINOS and OSL. In addition, % as each problem includes a specific classification that is % designed to be useful in identifying particular classes of % problems, facilities are provided to build and manage a % database of this information. There is a UNIX and C-shell % bias to many of the descriptions in the paper, since, for % the sake of simplicity, we do not illustrate everything in % its fullest generality. We trust that the majority of % potential users are sufficiently familar with UNIX that % these examples will not lead to undue confusion.}} @article{BonnBouh95, author = {J. F. Bonnans and M. Bouhtou}, title = {The trust region affine interior-point algorithm for convex and nonconvex quadratic-programming}, journal = RAIRO-OR, volume = 29, number = 2, pages = {195--217}, year = 1995, abstract = {We study from a theoretical and numerical point of view an interior point algorithm for quadratic QP using a trust region idea, formulated by \citebb{YeTse89}. We show that, under a nondegeneracy hypothesis the algorithm converges globally in the convex case. For a nonconvex problem, under a mild additional hypothesis, the sequence of points converges to a stationary point. We obtain also an asymptotic linear convergence rate for the cost that depends only on the dimension of the problem. Then we show that, provided some modifications are added to the basic algorithm, the method has a good numerical behaviour.}, summary = {A theoretical and numerical investigation of an interior point algorithm for quadratic programming using a trust-region scheme formulated by \citebb{YeTse89} is performed. Under a non-degeneracy hypothesis, the algorithm converges globally in the convex case. For a non-convex problem, the sequence of points converges to a stationary point under a mild additional assumption. An asymptotic linear convergence factor that depends only on the dimension of the problem is given. Provided simple modifications are made, the method behaves numerically well.}} @article{BonnGilbLemaSaga95, author = {J. F. Bonnans and J. Ch. Gilbert and C. Lemar\'{e}chal and C. A. Sagastiz\'{a}bal}, title = {A family of variable metric proximal methods}, journal = MPA, volume = 68, number = 1, pages = {15--47}, year = 1995} @techreport{BonnLaun92, author = {J. F. Bonnans and G. Launay}, title = {Implicit trust region algorithm for constrained optimization}, institution = {Institute Nationale Recherche Informatique et Automation}, address = {Le Chesnay, France}, year = 1992, abstract = {The authors study the convergence of sequential quadratic programming algorithms for the nonlinear programming problems. Assuming only that the direction is a stationary point of the current quadratic program they study the local convergence properties without strict complementarity. They obtain some global and superlinearly convergent algorithms. As a particular case they formulate an extension of Newton's method that is quadratically convergent to a point satisfying a strong sufficient second order condition.}, summary = {Convergence of sequential quadratic programming algorithms for the nonlinear programming problems is studied without assuming strict complementarity and global and superlinearly convergence results are obtained.}} @article{BonnLaun95, author = {J. F. Bonnans and G. Launay}, title = {Sequential Quadratic-Programming with Penalization of the Displacement}, journal = SIOPT, volume = 5, number = 4, pages = {792--812}, year = 1995, abstract = {In this paper, we study the convergence of a sequential quadratic programming algorithm for the nonlinear programming problem. The Hessian of the quadratic program is the sum of an approximation of the Lagrangian and of a multiple of the identity that allows us to penalize the displacement. Assuming only that direction is a stationary point of the current quadratic program we study the local convergence properties without strict complementarity. In particular, we use a very weak condition on the approximation of the Hessian to the Lagrangian. We obtain some global and superlinearly convergent algorithm under weak hypotheses. As a particular case we formulate an extension of Newton's method that is quadratically convergent to a point satisfying a strong sufficient second-order condition.}, summary = {The convergence of an SQP algorithm for nonlinear programming is considered. The Hessian of the QP is the sum of an approximation of the Lagrangian and of a multiple of the identity that penalizes the displacement. Assuming only that direction is a stationary point for the current QP, the algorithm is globally and superlinearly convergent without strict complementarity. As a particular case, an extension of Newton's method is given that is quadratically convergent to a point satisfying a strong sufficient second-order condition.}} @article{BonnPaniTitsZhou92, author = {J. F. Bonnans and E. Panier and A. L. Tits and J. L. Zhou}, title = {Avoiding the {M}aratos effect by means of a nonmonotone linesearch {II}. Inequality constrained problems---feasible iterates}, journal = SINUM, volume = 29, pages = {1187--1202}, year = 1992, abstract = {When solving inequality constrained optimization problems via Sequential Quadratic Programming (SQP), it is potentially advantageous to generate iterates that all satisfy the constraints: all quadratic programs encountered are then feasible and there is no need for a surrogate merit function. (Feasibility of the successive iterates is in fact required in many contexts such as in real-time applications or when the objective function is not defined outside the feasible set.) It has recently been shown that this, indeed, possible, by means of a suitable perturbation of the original SQP iteration, without loosing superlinear convergence. In this context, the well-known \citebb{Mara78} effect is coumpounded by the possible infeasibility of the full step of one even close to the solution. These difficulties have been accomodated by making use of a suitable modification of a "bending" technique proposed by \citebb{MaynPola82}, requiring evaluation of the constraints function at an auxiliary point at each iteration. In Part I of this two-part paper, it was shown that, when feasibility of the successive iterates is not required, the Maratos effect can be avoided by combining Mayne and Polyak's technique with a non-monotone line serach proposed by \citebb{GripLampLuci86} in the context of unconstrained optimization in such a way that, except possibly at a few early iterations, function evaluations are no longer performed at auxiliary points. In this second part, it is shown that feasibility can be restored without resorting to additional constraint evaluations, by adaptively estimating a bound on the second derivatives of the active constraints. Extension to constrained minimax problems is briefly discussed.}, summary = {When solving inequality constrained optimization problems via SQP, it may be advantageous to generate iterates that always satisfy the constraints. In this context, the Maratos effect is compounded by the possible infeasibility of a unit step even close to the solution. It is shown that feasibility can be restored without resorting to additional constraint evaluations, by adaptively estimating a bound on the second derivatives of the active constraints. The extension to constrained minimax problems is briefly discussed.}} @article{BonnPola97, author = {J. F. Bonnans and C. Pola}, title = {A Trust Region Interior Point Algorithm for Linearly Constrained Optimization}, journal = SIOPT, volume = 7, number = 3, pages = {717--731}, year = 1997, abstract = {We present an extension for nonlinear optimization under linear constraints of an algorithm for quadratic programming using a trust region idea, introduced by \citebb{YeTse89} and extended by \citebb{BonnBouh95}. Due to the nonlinearity of the cost function, we use a linesearch in order to reduce the step if necessary. We prove that, under suitable hypotheses, the algorithm converges to a point satisfying the first-order optimality system, and we analyse under which conditions the unit stepsize will asymptotically be accepted.}, summary = {An extension of the trust-region quadratic programming algorithm of \citebb{YeTse89} and \citebb{BonnBouh95} to nonlinear optimization subject to linear constraints is given. A linesearch is used to reduce the step if necessary. Under suitable hypotheses, the algorithm converges to a first-order stationary point. Conditions under which the unit stepsize is asymptotically accepted are analysed.}} @article{BookDennFranSeraTorcTros99, author = {A. J. Booker and J. E. Dennis and P. D. Frank and D. B. Serafini and V. Torczon and M. W. Trosset}, title = {A Rigorous Framework for Optimization of Expensive Functions by Surrogates}, journal = {Structural Optimization}, volume = 17, number = 1, pages = {1--13}, year = 1999} % abstract = {The goal of the research reported on here is to develop % rigourous optimization algorithms to apply to some % engineering design problems for which direct application % of traditional optimization approaches is not practical. % This paper presents and analyzes a framework for % generating and managing a sequence of surrogate objective % functions to obtain convergence to a minimizer of an % expensive objective function subject to simple constraints. % The approach is widely applicable because it does not % require, or even approximate, derivatives of the objective. % Numerical results are presented for a 31-variable % helicopter rotor design example and for a standard % optimization test example. This is a brief description % of a portion of the Boeing/IBM/Rice University % collaboration, whose purpose is to develop effective % numerical methods for managing the use of approximation % concepts in design optimization.}, % abstract = {A framework is presented for generating and managing a % sequence of surrogate objective functions to obtain % convergence to a minimizer of an expensive objective % function subject to simple constraints. The approach is % widely applicable because it does not require, or even % approximate, derivatives of the objective. This is % especially useful in the context of engineering design % problems for which direct application of traditional % optimization approaches is not practical. Numerical % results are presented for a 31-variable helicopter % rotor design example and for a standard optimization % test example.}} @book{Boot64, author = {J. C. Boot}, title = {Quadratic Programming}, publisher = NH, address = NH-ADDRESS, year = 1964} @phdthesis{Borg94, author = {J. Borggaard}, title = {The sensitivity equation method for optimal design}, school = {Department of Mathematics, Virginia Polytechnic Institute and State University}, address = {Blackburg, Virginia, USA}, year = 1994} @inproceedings{BorgBurn94, author = {J. Borggaard and J. Burns}, title = {A sensitivity equation approach to shape optimization in fluid flows}, booktitle = {Proceedings of the IMA Period of Concentration on Flow Control}, editor = {M. Gunzburger}, publisher = SPRINGER, address = SPRINGER-ADDRESS, pages = {49--78}, year = 1997, abstract = {In this paper we apply a sensitivity equation method to shape optimization problems. An algorithm is developed and tested on a problem of designing optimal forebody simulators for a 2D, inviscid supersonic flow. The algorithm uses a BFGS/Trust Region optimization scheme with sensitivities computed by numerically approximating the linear partial differential equations that determine the flow sensitivities. Numerical examples are presented to illustrate the method.}, summary = {A sensitivity equation method is applied to shape optimization problems. An algorithm is developed and tested on a problem of designing optimal forebody simulators for a 2D, inviscid supersonic flow. The algorithm uses a BFGS/trust-region optimization scheme, with sensitivities computed by numerically approximating the linear partial differential equations that determine the flow sensitivities. Numerical examples are presented.}} @article{BorgBurn97, author = {J. Borggaard and J. Burns}, title = {A {PDE} sensitivity equation method for optimal aerodynamic design}, journal = {Journal of Computational Physics}, volume = 136, number = 2, pages = {366--384}, year = 1997, abstract = {The use of gradient based optimization algorithms in inverse design is well established as a practical approach to aerodynamic design. A typical procedure uses a simulation scheme to evaluate the objective function (from the approximate states) and its gradient, then passes this information to an optimization algorithm. Once the simulation scheme (CFD flow solver) has been selected and used to provide approximate function evaluations, there are several possible approaches to the problem of computing gradients. One popular method is to differentiate the simulation scheme and compute design sensitivities that are then used to obtain gradients. Although this black-box approach has many advantages in shape optimization problems, one must compute mesh sensitivities in order to compute the design sensitivity. In this paper, we present an alternative approach using the PDE sensitivity equation to develop algorithms for computing gradients. This approach has the advantage that mesh sensitivities need not be computed. Moreover, when it is possible to use the CFD scheme for both the forward problem and the sensitivity equation, then there are computational advantages. An apparent disadvantage of this approach is that it does not always produce consistent derivatives. However, for a proper combination of discretization schemes, one can show asymptotic consistency under mesh refinement, which is often sufficient to guarantee convergence of the optimal design algorithm. In particular, we show that when asymptotically consistent schemes are combined with a trust-region optimization algorithm, the resulting optimal design method converges. We denote this approach as the sensitivity equation method. The sensitivity equation method is presented, convergence results are given and the approach is illustrated on two optimal design problems involving shocks.}, summary = {An approach using the PDE sensitivity equation to develop algorithms for computing gradients in inverse design problems is considered, in which that mesh sensitivities need not be computed. Moreover, it is advantageous when possible to use the CFD scheme for both the forward problem and the sensitivity equation. For a proper combination of discretization schemes, asymptotic consistency under mesh refinement is shown, which is often sufficient to guarantee convergence of the optimal design algorithm. In particular, when asymptotically consistent schemes are combined with a trust-region optimization algorithm, the resulting optimal design method converges. Such a method is described, convergence results are given, and the approach is illustrated on two optimal design problems involving shocks.}} @article{Borw82, author = {J. M. Borwein}, title = {Necessary and sufficient conditions for quadratic minimality}, journal = {Numerical Functional Analysis and Optimization}, volume = 5, pages = {127--140}, year = 1982} @article{Boua97, author = {A. Bouaricha}, title = {Algorithm 765: {STENMIN}: a software package for large, sparse unconstrained optimization using tensor methods}, journal = TOMS, volume = 23, number = 1, pages = {81--90}, year = 1997, abstract = {We describe a new package for minimizing an unconstrained nonlinear function where the Hessian is large and sparse. The software allows the user to select between a tensor method and a standard method based upon a quadratic model. The tensor method models the objective function by a fourth-order model, where the third- and fourth-order terms are chosen such that the extra cost of forming and solving the model is small. The new contribution of this package consists of the incorporation of an entirely new way of minimizing the tensor model that makes it suitable for solving large, sparse optimization problems efficiently. The test results indicate that, in general, the tensor method is often more efficient and more reliable than the standard Newton method for solving large, sparse unconstrained optimization problems.}, summary = {A package is presented for minimizing an unconstrained nonlinear function where the Hessian is large and sparse. The software allows the user to select between a tensor method and a standard method based upon a quadratic model. The tensor method models the objective function by a fourth-order model, where the third- and fourth-order terms are chosen such that the extra cost of forming and solving the model is small. The contribution consists of the incorporation of a new way of minimizing the tensor model that is suitable for solving large, sparse problems. The test results indicate that the tensor method is often more efficient and more reliable than the standard Newton method.}} @article{BouaSchn97, author = {A. Bouaricha and R. B. Schnabel}, title = {Algorithm 768: {TENSOLVE}: a software package for solving systems of nonlinear equations and nonlinear least-squares problems using tensor methods}, journal = TOMS, volume = 23, number = 2, pages = {174--195}, year = 1997, abstract = {This article describes a modular software package for solving systems of nonlinear equations and nonlinear least-squares problems, using a new class of methods called tensor methods. It is intended for small- to medium-sized problems, say with up to 100 equations and unknowns, in cases where it is reasonable to calculate the Jacobian matrix or to approximate it by finite differences at each iteration. The software allows the user to choose between a tensor method and a standard method based on a linear model. The tensor method approximates $F(x)$ by a quadratic model, where the second-order term is chosen so that the model is hardly more expensive to form, store, or solve than the standard linear model. Moreover, the software provides two different global strategies: a line search approach and a two-dimensional trust region approach. Test results indicate that, in general, tensor methods are significantly more efficient and robust than standard methods on small-and medium-sized problems in iterations and function evaluations.}, summary = {A modular software package for solving systems of nonlinear equations and nonlinear least-squares problems, using tensor methods, is described. It is intended for small- to medium-sized problems for which it is reasonable to calculate the Jacobian or to approximate it by finite differences at each iteration. It allows the user to choose between a tensor method and a standard method based on a linear model. The tensor method approximates $F(x)$ by a quadratic model, where the second-order term is chosen so that the model is hardly more expensive to form, store, or solve than the linear model. The software provides both a linesearch and a two-dimensional trust-region approach. Test results indicate that tensor methods are significantly more efficient and robust than standard methods on small-and medium-sized problems in iterations and function evaluations.}} @article{BouaSchn98, author = {A. Bouaricha and R. B. Schnabel}, title = {Tensor methods for large sparse systems of nonlinear equations}, journal = MP, volume = 82, number = 3, pages = {377--412}, year = 1998, abstract = {This paper introduces tensor methods for solving large sparse systems of nonlinear equations. Tensor methods for nonlinear equations were developed in the context of solving small to medium-sized dense problems. They base each iteration on a quadratic model of the nonlinear equations, where the second-order term is selected so that the model requires no more derivative or function information per iteration than standard linear model-based methods, and hardly more storage or arithmetic operations per iteration. Computational experiments on small to medium-sized problems have shown tensor methods to be considerably more efficient than standard Newton based methods, with a particularly large advantage on singular problems. This paper considers the extension of this approach to solve large sparse problems. The key issue considered is how to make efficient use of sparsity in forming and solving the tensor model problem at each iteration. Accomplishing this turns out to require an entirely new way of solving the tensor model that successfully exploits the sparsity of the Jacobian, whether the Jacobian is nonsingular or singular. We develop such an approach and, based upon it, an efficient tensor method for solving large sparse systems of nonlinear equations. Test results indicate that this tensor method is significantly more efficient and robust than an efficient sparse Newton-based method, in terms of iterations, function evaluations, and execution time.}, summary = {This paper considers the extension of the tensor approach proposed in \citebb{BouaSchn97}. This requires a new way of solving the tensor model that exploits the sparsity of the Jacobian, whether singular or not.}} @article{BouaSchn99, author = {A. Bouaricha and R. B. Schnabel}, title = {Tensor methods for large sparse nonlinear least-squares}, journal = SISC, volume = 21, number = 4, pages = {1199--1221}, year = 1999} @book{BoydElGhFeroBala94, author = {S. Boyd and L. El{-}Ghaoui and E. Feron and V. Balakrishnan}, title = {Linear Matrix Inequalities in Systems and Control Theory}, publisher = SIAM, address = SIAM-ADDRESS, year = 1994} @techreport{Bran95, author = {M. A. Branch}, title = {Inexact reflective {N}ewton methods for large-scale optimization subject to bound constraints}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR 95-1543}, year = 1995, abstract = {This thesis addresses the problem of minimizing a large-scale nonlinear function subject to simple bound constraints. The most popular methods to handle bound constrained problems, active-set methods, introduce a combinatorial aspect to the problem. For these methods, the number of steps to converge may be related to the number of constraints. For large problems, this behavior is particularly detrimental. Reflective Newton methods avoid this problem by staying strictly within the constrained region. As a result, these methods have strong theoretical properties. Moreover, they behave experimentally like an unconstrained method: the number of steps to a solution is not strongly correlated with problem size. In this thesis, we discuss the reflective Newton approach and how it can be combined with inexact Newton techniques, within a subspace trust-region method, to efficiently solve large problems. Two algorithms are presented. The first uses a line search as its globalizing strategy. The second uses a strictly trust-region approach to globally converge to a local minimizer. Global convergence and rate of convergence results are established for both methods. We present computational evidence that using inexact Newton steps preserves the properties of the reflective Newton methods: the iteration counts are as low as when ``exact'' Newton steps are used. Also, both the inexact and exact methods are robust when the starting point is varied. Furthermore, the inexact reflective Newton methods have fast convergence when negative curvature is encountered, a trait not always shared by similar active-set type methods. The role of negative curvature is further explored by comparing the subspace trust-region approach to other common approximations to the full-space trust-region problem. On problems where only positive curvature is found, these trust-region methods differ little in the number of iterations to converge. However, for problems with negative curvature, the subspace method is more effective in capturing the negative curvature information, resulting in faster convergence. Finally a parallel implementation on the IBM SP2 is described and evaluated; the scalability and efficiency of this implementation are shown to be as good as the matrix-vector multiply routine it depends on.}, summary = {The problem of minimizing a large-scale nonlinear function subject to simple bound constraints using the reflective Newton approach is addressed, including how it can be combined with inexact Newton techniques within a subspace trust-region method. A linesearch and a trust-region algorithm are presented, that have fast convergence when negative curvature is encountered. The subspace trust-region approach is compared to other approximations to the trust-region subproblem. On problems where only positive curvature is found, these methods differ little in efficiency. For problems with negative curvature, the subspace method is more effective in capturing the negative curvature information, resulting in faster convergence. A parallel implementation on the IBM SP2 is evaluated, whose scalability and efficiency of are as good as the matrix-vector multiply routine it depends on.}} @techreport{BranColeLi95, author = {M. A. Branch and T. F. Coleman and Y. Li}, title = {A subspace, interior, and conjugate gradient method for large-scale bound-constrained minimization problems}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR 95-1525}, year = 1995, abstract = {A subspace adaptation of the \citebb{ColeLi96b} trust region and interior method is proposed for solving large-scale bound-constrained minimization problems. This method can be implemented with either sparse Cholesky factorization or conjugate gradient computation. Under reasonable conditions the convergence properties of this subspace trust region method are as strong as those of its full-space version. Computational performance on various large-scale test problems are reported; advantages of our approach are demonstrated. Our experience indicates our proposed method represents an efficient way to solve large-scale bound-constrained minimization problems.}, summary = {A subspace adaptation of the method by \citebb{ColeLi96b} is proposed for solving large-scale bound-constrained minimization problems. This method can be implemented with either sparse Cholesky factorization or conjugate-gradients. The convergence properties of this subspace trust-region method are as strong as those of its full-space version. Computational performance on large-scale test problems illustrates its advantages.}} @book{ColeBranGrac99, author = {T. F. Coleman and M. A. Branch and A. Grace}, title = {Optimization Toolbox for Use with Matlab}, publisher = {The Math Works Inc.}, address = {Natick, Massuchussets, USA}, year = 1999} @article{Breg67, author = {L. M. Bregman}, title = {The relaxation method for finding the common points of convex sets and its applications to the solution of problems in convex programming}, journal = {USSR Computational Mathematics and Mathematical Physics}, volume = 7, pages = {200--217}, year = 1967} @inproceedings{BreiShan93, author = {M. G. Breitfeld and D. F. Shanno}, title = {Preliminary computational experience with modified log-barrier functions for large-scale nonlinear programming}, crossref = {HageHearPard94}, pages = {45--66}} @article{BreiShan96, author = {M. G. Breitfeld and D. F. Shanno}, title = {Computational experience with penalty-barrier methods for nonlinear programming}, journal = AOR, volume = 62, pages = {439-463}, year = 1996} @article{Bren73, author = {R. P. Brent}, title = {Some efficient algorithms for solving systems of nonlinear equations}, journal = SINUM, volume = 10, number = 2, pages = {327--344}, year = 1973} @article{BrimLove91, author = {J. Brimberg and R. F. Love}, title = {Estimating travel distances by the weighted $\ell_p$ norm}, journal = {Naval Research Logistics}, volume = 38, pages = {241-259}, year = 1991 } @inproceedings{BroyAtti84, author = {C. G. Broyden and N. F. Attia}, title = {A smooth sequential penalty function method for nonlinear programming}, crossref = {BalaThom84}, pages = {237--245}} @article{BroyAtti88, author = {C. G. Broyden and N. F. Attia}, title = {Penalty functions, {N}ewton's method and quadratic programming}, journal = JOTA, volume = 58, number = 3, pages = {377--385}, year = 1988} @article{Brow69, author = {K. M. Brown}, title = {A quadratically convergent {N}ewton-like method based on {G}aussian elimination}, journal = SINUM, volume = 6, number = 4, pages = {560--569}, year = 1969} @article{Broy70, author = {C. G. Broyden}, title = {The Convergence Of A Class Of Double-rank Minimization Algorithms}, journal = JIMA, volume = 6, pages = {76--90}, year = 1970} @article{Buck78a, author = {A. G. Buckley}, title = {A Combined Conjugate-Gradient Quasi-{N}ewton Minimization Algorithm}, journal = MP, volume = 15, pages = {200--210}, year = 1978} @article{Buck78b, author = {A. G. Buckley}, title = {Extending the relationship between the conjugate gradient and the {BFGS} algorithms}, journal = MP, volume = 15, pages = {343--348}, year = 1978} @article{BudiLeeSaxeFree96, author = {D. E. Budil and S. Lee and S. Saxena and J. H. Freed}, title = {Nonlinear-least-squares analysis of slow-motion {EPR}-spectra in one and 2 dimensions using a modified {L}evenberg-{M}arquardt algorithm}, journal = {Journal of Magnetic Resonance, Series A}, volume = 120, number = 2, pages = {155--189}, year = 1996, abstract = {The application of the model trust-region modification of the Levenberg-Marquardt minimization algorithm to the analysis of one-dimensional CW EPR and multidimensional Fourier-transform (FT) EPR spectra especially in the slow-motion regime is described. The dynamic parameters describing the slow motion are obtained from least-squares fitting of model calculations based on the stochastic Liouville equation (SLE) to experimental spectra, the trust-region approach is inherently more efficient than the standard Levenberg-Marquardt algorithm, and the efficiency of the procedure may be further increased by a separation-of-variables method in which a subset of fitting parameters is independently minimized at each iteration, thus reducing the number of parameters to be fitted by nonlinear least squares. A particularly useful application of this method occurs in the fitting of multicomponent spectra, for which it is possible to obtain the relative population of each component by the separation-of-variables method. These advantages, combined with recent improvements in the computational methods used to solve the SLE, have led to an order-of-magnitude reduction in computing time, and have made it possible to carry out interactive, real-time fitting on a laboratory workstation with a graphical interface. Examples of fits to experimental data will be given, including multicomponent CW EPR spectra as well as two- and three- dimensional FT EPR spectra, emphasis is placed on the analytic information available from the partial derivatives utilized in the algorithm, and how it may be used to estimate the condition and uniqueness of the fit, as well as to estimate confidence limits for the parameters in certain cases.}, summary = {The application of the model trust-region modification of the Levenberg-Morrison-Marquardt algorithm to the analysis of one-dimensional CW EPR and multidimensional Fourier-transform (FT) EPR spectra especially in the slow-motion regime is described. The dynamic parameters describing the motion are obtained from least-squares fitting of model calculations based on the stochastic Liouville equation (SLE) to experimental spectra. The trust-region approach is more efficient than the standard Levenberg-Morrison-Marquardt algorithm, and the efficiency of the procedure may be further increased by a separation-of-variables method in which a subset of fitting parameters is independently minimized at each iteration. An application is the fitting of multicomponent spectra, for which it is possible to obtain the relative population of each component by the separation-of-variables method. These advantages, combined with improvements in the computational solution of the SLE, have led to an order-of-magnitude reduction in computing time, and have made it possible to carry out interactive, real-time fitting on a laboratory workstation. Examples are given, including multicomponent CW EPR spectra as well as two- and three- dimensional FT EPR spectra.}} @inproceedings{BulsSillSaxe92, author = {A. B. Bulsari and M. Sillanpaa and H. Saxen}, title = {An expert system for continuous steel casting using neural networks}, booktitle = {Expert Systems in Mineral and Metal Processing. Proceedings of the IFAC Workshop}, editor = {J. L. Jamsa-Jounela and A. J. Niemi}, publisher = {Pergamon}, address = {Oxford, England}, pages = {155--159}, year = 1992, abstract = {Developing an expert system is often time consuming even after knowledge acquisition. Artificial neural networks offer an advantageous alternative to coding such knowledge in an expert system shell or writing a program for it. This paper illustrates the feasibility of using a feedforward neural network for knowledge storage and inferencing for an industrial problem. The inputs to the network were information about an incoming ladle of steel, and the output was about its suitability for successful continuous casting, giving an indication on whether problems would be encountered in the beginning and/or at the end of the casting. A trust-region optimisation method was used for training the networks, where the input-output relation of the nodes was given by a sigmoid function. This training method has been used successfully for other neural network problems and was found to be quite reliable and robust. By using a feedforward neural network as an expert system for predicting operational problems in the continuous steel casting process, some inconsistencies in the knowledge base were also revealed.}, summary = {A feedforward neural network for knowledge storage and inferencing is studied for an industrial problem. The inputs to the network receive information about an incoming ladle of steel, and the output predicts its suitability for successful continuous casting. A trust-region optimization method is used for training the network. This training method is found to be reliable and robust.}} @techreport{BultVial83, author = {J. P. Bulteau and J. P. Vial}, title = {Unconstrained Optimization by Approximation of a Projected Gradient Path}, institution = {CORE, UCL}, address = {Louvain-la-Neuve, Belgium}, type = {CORE Discussion Paper}, number = 8352, year = 1983, abstract = {In an earlier paper \citebb{BultVial87} discussed a general algorithm based on a one-dimensional search over a curvilinear path according to a trust-region scheme. This paper proposes a particular implementation of the general algorithm using as a particular path an approximation of the projected gradient path on a two dimensional space. This algorithm is endowed with attractive convergence properties. Newton and quasi-Newton like variants are discussed, with corresponding numerical experiments.}, summary = {\citebb{BultVial87} discuss a general algorithm based on a one-dimensional search over a curvilinear path according to a trust-region scheme. An implementation using an approximation of the projected gradient path on a two-dimensional space is given. This algorithm is endowed with attractive convergence properties. Newton and quasi-Newton like variants are discussed, with corresponding numerical experiments.}} @article{BultVial85, author = {J. P. Bulteau and J. P. Vial}, title = {A restricted trust region algorithm for unconstrained optimization}, journal = JOTA, volume = 47, number = 4, pages = {413--435}, year = 1985, abstract = {This paper proposes an efficient implementation of a trust-region-like algorithm. The trust region is restricted to an appropriately chosen two-dimensional subspace. Convergence properties are discussed and numerical results are reported.}, summary = {An efficient implementation of a trust-region method is proposed, in which the trust region is restricted to an appropriately chosen two-dimensional subspace. Convergence properties are discussed and numerical results are reported.}} @article{BultVial87, author = {J. P. Bulteau and J. P. Vial}, title = {Curvilinear path and trust region in unconstrained optimization---a convergence analysis}, journal = MPS, volume = 30, pages = {82--101}, year = 1987, abstract = {In this paper we propose a general algorithm for solving unconstrained optimization problems. The basic step of the algorithm consists in finding a "good" successor point to the current iterate by choosing it along a curvilinear path within a trust region. This scheme is due to \citebb{Powe70c} and has been applied by \citebb{Sore82} to a particular type of path. We give a series of properties that an arbitrary path should satisfy in order to achieve global convergence and fast asymptotical convergence. We review various paths that have been proposed in the literature and study the extent to which they satisfy our properties.}, summary = {A general algorithm for unconstrained optimization is proposed. Its basic step consists in finding a "good" successor point to the current iterate by choosing it along a curvilinear path within a trust region. Properties that an arbitrary path should satisfy in order to achieve global convergence and fast asymptotic convergence are given. Various paths that have been proposed in the literature are reviewed in this light.}} @article{Bunc74, author = {J. R. Bunch}, title = {Partial pivoting strategies for symmetric matrices}, journal = SINUM, volume = 11, pages = {521--528}, year = 1974} @article{BuncGayWels93, author = {D. S. Bunch and D. M. Gay and R. E. Welsch}, title = {Algorithm 717: subroutines for maximum likelihood and quasi-likelihood estimation of parameters in nonlinear regression models}, journal = TOMS, volume = 19, number = 1, pages = {109--130}, year = 1993, abstract = {The authors present FORTRAN 77 subroutines that solve statistical parameter estimation problems for general nonlinear models, e.g., nonlinear least-squares, maximum likelihood, maximum quasi-likelihood, generalized nonlinear least-squares, and some robust fitting problems. The accompanying test examples include members of the generalized linear model family, extensions using nonlinear predictors ('nonlinear GLIM'), and probabilistic choice models, such as linear-in-parameter multinomial probit models. The basic method, a generalization of the NL2SOL algorithm for nonlinear least-squares, employs a model/trust-region scheme for computing trial steps, exploits special structure by maintaining a secant approximation to the second-order part of the Hessian, and adaptively switches between a Gauss-Newton and an augmented Hessian approximation. Gauss-Newton steps are computed using a corrected seminormal equations approach. The subroutines include variants that handle simple bounds on the parameters, and that compute approximate regression diagnostics.}, summary = {FORTRAN 77 subroutines are presented that solve statistical parameter estimation problems for general nonlinear models, e.g., nonlinear least-squares, maximum likelihood, maximum quasi-likelihood, generalized nonlinear least-squares, and some robust fitting problems. The basic method, a generalization of the NL2SOL algorithm for nonlinear least-squares, employs a model/trust-region scheme for computing trial steps, maintain a secant approximation to the second-order part of the Hessian, and adaptively switches between Gauss-Newton and full Newton approximations. Gauss-Newton steps are computed using a corrected seminormal equations approach. The subroutines include variants that handle simple bounds on the parameters, and that compute approximate regression diagnostics.}} @article{BuncKauf77, author = {J. R. Bunch and L. C. Kaufman}, title = {Some stable methods for calculating inertia and solving symmetric linear equations}, journal = MC, volume = 31, pages = {163--179}, year = 1977} @article{BuncKauf80, author = {J. R. Bunch and L. C. Kaufman}, title = {A computational method for the indefinite quadratic programming problem}, journal = LAA, volume = 34, pages = {341-370}, year = 1980} @article{BuncParl71, author = {J. R. Bunch and B. N. Parlett}, title = {Direct methods for solving symmetric indefinite systems of linear equations}, journal = SINUM, volume = 8, number = 4, pages = {639--655}, year = 1971} @article{Burk90, author = {J. V. Burke}, title = {On the identification of active constraints {II}: the nonconvex case}, journal = SINUM, volume = 27, number = 4, pages = {1081--1102}, year = 1990, abstract = {In this paper the results of \citebb{BurkMore88} on the identification of active constraints are extended to the nonconvex constrained nonlinear programming problem. The approach is motivated by the geometric structure of certain polyhedral convex ``linearization'' of the constraint region at each iteration. As in \citebb{BurkMore88} questions of constraint identification are couched in termes of the faces of these polyhedra. The main result employs a nondegeneracy condition due to \citebb{Dunn87} and the linear independence condition to obtain a characterization of those algorithms that identify the optimal active constraints in a finite number of iterations. The role of the linear independence condition is carefully examined and it is argued that it is required within the context of the \citebb{Wils63}--\citebb{Han77}--\citebb{Powe78} sequential quadratic programming algorithm and \citebb{Flet87}'s $QL$ algorithm.}, summary = {The results of \citebb{BurkMore88}, on the identification of active constraints, are extended to non-convex constrained programming. The approach is motivated by the geometric structure of certain polyhedral convex ``linearization'' of the constraint region at each iteration. Questions of constraint identification are couched in termes of the faces of these polyhedra. The main result employs a non-degeneracy condition due to \citebb{Dunn87} and the linear independence condition to obtain a characterization of those algorithms that identify the optimal active constraints in a finite number of iterations. It is argued that the linear independence condition is required for the Wilson--Han--Powell SQP algorithm and for Fletcher's $QL$ algorithm.}} @article{Burk92, author = {J. V. Burke}, title = {A Robust Trust Region Method for Constrained Nonlinear Programming Problems}, journal = SIOPT, volume = 2, number = 2, pages = {324--347}, year = 1992, abstract = {Most of the published work on trust region algorithms for constrained optimization is derived from the original work of \citebb{Flet87} on trust region algorithms for nondifferentiable exact penalty functions. These methods are restricted to applications where a reasonable estimate of the magnitude of the optimal Kuhn-Tucker multiplier vector can be given, More recently an effort has been made to extend the trust region methodology to the sequential quadratic programming (SQP) algorithm of \citebb{Wils63}, \citebb{Han77} and \citebb{Powe78}. All of these extensions to the Wilson--Han--Powell SQP algorithm consider only the equality-constrained case and require strong global regularity hypotheses. This paper presents a general framework for trust region algorithms for constrained problems that does not require such regularity hypotheses and allows very general constraints. The approach is modeled on the one given by Powell for convex composite optimization problems and is driven by linear subproblems that yield viable estimates for the value of an exact penalty parameter. These results are applied to the Wilson--Han--Powell SQP algorithm and Fletcher's S$\ell_1$QP algorithm. Local convergence results are also given.}, summary = {A general framework for trust-region algorithms for constrained problems is presented, that does not require strong regularity hypotheses and allows very general constraints. The approach is modeled on the one given by Powell for convex composite optimization problems and is driven by linear subproblems that yield viable estimates for the value of an exact penalty parameter. These results are applied to the Wilson-Han-Powell SQP algorithm and Fletcher's S$\ell_1$QP algorithm. Local convergence results are given.}} @article{BurkHan89, author = {J. V. Burke and S. P. Han}, title = {A robust sequential quadratic-programming method}, journal = MP, volume = 43, number = 3, pages = {277--303}, year = 1989} @article{BurkMore88, author = {J. V. Burke and J. J. Mor\'{e}}, title = {On the identification of active constraints}, journal = SINUM, volume = 25, number = 5, pages = {1197--1211}, year = 1988, abstract = {Nondegeneracy conditions that guarantee that the optimal active constraints are identified in a finite number of iterations are studied. Results of this type have only been established for a few algorithms, and then under restrictive hypothesis. The main result is a characterization of those algorithms that identify the optimal constraints in a finite number of iterations. This result is obtained with a non-degeneracy assumption which is equivalent, in the standard nonlinear programming problem, to the assumption that there is a set of strictly complementary Lagrange multipliers. As an important consequence of the authors' results the way that this characterization applies to gradient projection and sequential quadratic programming algorithms is shown.}, summary = {Non-degeneracy conditions that guarantee that the optimal active constraints are identified in a finite number of iterations are studied and a characterization of those algorithms that identify the optimal constraints in a finite number of iterations is derived. This result is obtained with a non-degeneracy assumption which is equivalent, in the standard nonlinear programming problem, to the assumption that there is a set of strictly complementary Lagrange multipliers. As an important consequence, the way that this characterization applies to gradient projection and sequential quadratic programming algorithms is shown.}} @article{BurkMoreTora90, author = {J. V. Burke and J. J. Mor\'{e} and G. Toraldo}, title = {Convergence properties of trust region methods for linear and convex constraints}, journal = MPA, volume = 47, number = 3, pages = {305--336}, year = 1990, abstract = {We develop a convergence theory for convex and linearly constrained trust region methods which only requires that the step between iterates produce a sufficient reduction in the trust region subproblem. Global convergence is established for a general convex minimization problem while local analysis is for linearly constrained problems. The main local result establishes that if the sequence converges to a nondegenerate stationary point then the active constraints at the solution are identified in a finite number of iterations. As a consequence of the identification properties, we develop rate of convergence results by assuming that the step is a truncated Newton method. Our development is mainly geometrical; this approach allows the development of a convergence theory without any linear independence assumptions.}, summary = {A convergence theory is developed for convex and linearly constrained trust-region methods which only requires that the step between iterates produce a sufficient reduction in the subproblem. Global convergence is established for a general convex problem while local analysis is for linearly constrained problems. It is shown that if the sequence converges to a non-degenerate stationary point then the active constraints at the solution are identified in a finite number of iterations. As a consequence, rate of convergence results are developed by assuming that the step is a truncated Newton method. This development is mainly geometrical; such an approach allows the development of a convergence theory without any linear independence assumptions.}} @article{BurkMore95, author = {J. V. Burke and J. J. Mor\'{e}}, title = {Exposing Constraints}, journal = SIOPT, volume = 4, number = 3, pages = {573--595}, year = 1994, abstract = {The development of algorithms and software for the solution of large-scale optimization problems has been the main motivation behind the research on the identification properties of optimization algorithms. The aim of an identification result for a linearly constrained problem is to show that if the sequence generated by an optimization algorithm converges to a stationary point, then there is a nontrivial face $F$ of the feasible set such that after a finite number of iterations, the iterates enter and remain in the face $F$. The paper develops the identification properties of linearly constrained optimization algorithms without any nondegeneracy or linear independence assumptions. The main result shows that the projected gradient converges to zero if and only if the iterates enter and remain in the face exposed by the negative gradient. This result generalizes results of \citebb{BurkMore88} for nondegenerate cases.}, summary = {The identification properties of linearly constrained optimization algorithms is developed without any non-degeneracy or linear independence assumptions. It shown that the projected gradient converges to zero if and only if the iterates enter and remain in the face exposed by the negative gradient, which generalizes results of \citebb{BurkMore88} for non-degenerate cases.}} @misc{BurkWeig97, author = {J. V. Burke and A. Weigmann}, title = {Notes on Limited Memory BFGS Updating in A Trust-Region Framework}, institution = {Department of Mathematics, University of Washington}, address = {Seattle, Washington, USA}, year = 1997, abstract = {The limited memory BFGS method pioneered by Jorge Nocedal is usually implemented as a line search method where the search direction is computed from a BFGS approximation to the inverse of the Hessian. the advantage of inverse updating is that the serach directions are obtained by a matrix-vector multiplication. Furthermore, experience shows that when the BFSG approximation is appropriately re-scaled (or re-sized) at each iteration, the line search stopping criteria are often satisfied for the first trial step. In this note it is observed that limited memory updates to the Hessian approximations can also be applied in the context of a trust-region algorithm with only modest increase in the linear algebra costs. This is true even though in the trust-region framework one maintains approximations to the Hessian rather than its inverse. The key to this observation is the compact form of the limited memory updates derived by Byrd, Nocedal and Schnabel (1994). Numerical results on a few of the MINPACK-2 test problems indicate that an implementation that incorporates re-scaling directly into the trust-region updating procedure exhibits convergence behavior comparable to a standard implementation of the algorithm by \citebb{LiuNoce89}.}, summary = {A limited-memory BFGS method is described that uses re-scaling at each iteration and a trust-region technique to ensure convergence. The effects of a non-monotone technique as well as that of an implicit scheme for updating the trust-region radius are dicussed. Numerical experiments are reported.}} @article{ButcJackMitt97, author = {J. C. Butcher and Z. Jackiewicz and H. D. Mittelmann}, title = {A nonlinear optimization approach to the construction of general linear methods of high order}, journal = JCAM, volume = 81, number = 2, pages = {181--196}, year = 1997, abstract = {We describe the construction of diagonally implicit multistage integration methods of order and stage order $p=q=7$ and $p=q=8$ for ordinary differential equations. These methods were obtained using state-of-the-art optimization methods, particularly variable- model trust-region least-squares algorithms.}, summary = {The construction of diagonally implicit multistage integration methods of order and stage order $p=q=7$ and $p=q=8$ for ordinary differential equations is described. These methods were obtained using variable-model trust-region least-squares algorithms.}} @article{Byrd90, author = {R. H. Byrd}, title = {On the Convergence of Constrained Optimization Methods with Accurate {H}essian Information on a Subspace}, journal = SINUM, volume = 27, number = 1, pages = {141--153}, year = 1990} @misc{Byrd99, author = {R. H. Byrd}, title = {Step Computation in a Trust Region Interior Point Method}, howpublished = {Presentation at the First Workshop on Nonlinear Optimization ``Interior-Point and Filter Methods'', Coimbra, Portugal}, year = 1999, abstract = {In computing a step on an interior point method for nonlinear inequality constrained optimization, use of a trust region provides a unified structure for dealing with negative curvature and rank deficiency. However, in an interior point context, exactly solving the complete trust region subproblem step presents us with an intractable subproblem. Therefore, most practical methods use a cheap approximate solution to the trust region problem. Here, we consider several of these approximate methods, and point out that methods that are theoretically adequate can have serious drawbacks. We present some new approaches to this problem and argue that in many cases simpler is better. We also consider to what extent these approximate approaches provide the benefits promised by trust regions in the cases of negative curvature and rank deficiency.}, summary = {Approximate methods for computing a trust-region step in an interior point method for nonlinear inequality constrained optimization are considered, and some new approaches proposed. The extent to which they provide the benefits promised by trust regions in the cases of negative curvature and rank deficiency are also discussed.}} @article{ByrdNoce91, author = {R. H. Byrd and J. Nocedal}, title = {An analysis of reduced {H}essian methods for constrained optimization}, journal = MP, volume = 49, number = 3, pages = {285--323}, year = 1991} @techreport{ByrdGilbNoce96, author = {R. H. Byrd and J. Ch. Gilbert and J. Nocedal}, title = {A Trust Region Method Based on Interior Point Techniques for Nonlinear Programming}, institution = {INRIA}, address = {Rocquencourt, France}, number = 2896, year = 1996, abstract = {An algorithm for minimizing a nonlinear function subject to nonlinear equality and inequality constraints is described. It can be seen as an extension of primal interior point methods to non-convex optimization. The new algorithm applies sequential quadratic programming techniques to a sequence of barrier problems, and uses trust regions to ensure the robustness of the iteration and to allow the direct use of second order derivatives. An analysis of the convergence of the new method is presented.}, summary = {An algorithm for minimizing a nonlinear function subject to nonlinear equality and inequality constraints is described. It can be seen as an extension of primal interior-point methods to non-convex optimization. The algorithm applies SQP techniques to a sequence of barrier problems, and uses trust regions to ensure the robustness of the iteration and to allow the direct use of second order derivatives. A convergence analysis is presented.}} @article{ByrdHribNoce99, author = {R. H. Byrd and M. E. Hribar and J. Nocedal}, title = {An Interior Point Algorithm for Large Scale Nonlinear Programming}, journal = SIOPT, volume = 9, number = 4, pages = {877-900}, year = 2000, abstract = {We describe a new algorithm for solving large nonlinear programming problems. It incorporates within the interior point method two powerful tools for solving nonlinear problems: sequential quadratic programming (SQP) and trust region techniques. SQP ideas are used to efficiently handle nonlinearities in the constraints. Trust region strategies allow the algorithm to treat convex and non-convex problems uniformly, permit the direct use of second derivative information and provide a safeguard in the presence of nearly dependent constraint gradients. Both primal and primal-dual versions of the algorithm are developed, and their performance is compared with that of LANCELOT on a set of large and difficult nonlinear problems.}, summary = {An algorithm for solving large nonlinear programming problems is described. It incorporates SQP and trust-region techniques within the interior-point method. SQP ideas are used to efficiently handle nonlinearities in the constraints. Trust-region strategies allow the algorithm to treat convex and non-convex problems uniformly, permit the direct use of second derivative information and provide a safeguard in the presence of nearly dependent constraint gradients. Both primal and primal-dual versions of the algorithm are developed, and their performance is compared with that of {\sf LANCELOT} on a set of large and difficult nonlinear problems.}} @techreport{ByrdNoceWalt00, author = {R. H. Byrd and J. Nocedal and R. A. Waltz}, title = {Feasible Interior Methods Using Slacks for Nonlinear Optimization}, institution = OTC, address = OTC-ADDRESS, number = 11, year = 2000, abstract = {A slack-based feasible interior point method is described which can be derived as a modification of infeasible methods. The modification is minor for most line search methods, but trust region methods require special attention. It is shown how the Cauchy point, which is often computed in trust region methods, must be modified so that the feasible method is effective for problems containing both equality and inequality constraints. The relationship between slack-based methods and traditional feasible methods is discussed. Numerical results showing the relative performance of feasible versus infeasible interior point methods are presented.}, summary = {A slack-based feasible interior-point method is described which can be derived as a modification of infeasible methods. The modification is minor for most line search methods, but trust-region methods require special attention. It is shown how the Cauchy point must be modified so that the feasible trust-region method is effective for problems containing both equality and inequality constraints. The relationship between slack-based methods and traditional feasible methods is discussed. Numerical results showing the relative performance of feasible versus infeasible interior-point methods are presented.}} @article{ByrdKhalSchn96, author = {R. H. Byrd and H. F. Khalfan and R. B. Schnabel}, title = {Analysis of a symmetric rank-one trust region method}, journal = SIOPT, volume = 6, number = 4, pages = {1025--1039}, year = 1996, abstract = {Several recent computational studies have shown that the symmetric rank-one (SR1) method for unconstrained optimization and shows that the method has an $n+1$ step $q$-superlinear rate of convergence. The analysis makes neither of the assumptions of uniform linear independence of the iterates nor positive definiteness of the Hessian approximations that have been made in other recent analysis of SR1 methods. The trust region method that is analyzed is fairly standard, except that it includes the feature that the Hessian approximation is updated after all steps, including rejected steps. We also present computational results that show that this feature, safeguarded in a way that is consistent with the convergence analysis, does not harm the efficiency of the SR1 trust region method.}, summary = {Computational studies have considered the symmetric rank-one (SR1) method for unconstrained optimization and shown that the method has an $n+1$ step $q$-superlinear rate of convergence. The proposed analysis makes neither of the assumptions of uniform linear independence of the iterates nor positive definiteness of the Hessian approximations that have been made in former such analyses. The trust-region method is standard, but requires the Hessian approximation to be updated after all steps, including rejected ones. Computational results indicate that this feature, safeguarded in a way that is consistent with the convergence analysis, does not harm the efficiency of the SR1 trust-region method.}} @article{ByrdLuNoceZhu95, author = {R. H. Byrd and P. Lu and J. Nocedal and C. Zhu}, title = {A limited memory algorithm for bound constrained optimization}, journal = SISC, volume = 16, number = 5, pages = {1190--1208}, year = 1995} @article{ByrdSchn86, author = {R. H. Byrd and R. B. Schnabel}, title = {Continuity of the null space basis and constrained optimization}, journal = MP, volume = 35, number = 1, pages = {32--41}, year = 1986} @article{ByrdSchnShul87, author = {R. H. Byrd and R. B. Schnabel and G. A. Shultz}, title = {A trust region algorithm for nonlinearly constrained optimization}, journal = SINUM, volume = 24, pages = {1152--1170}, year = 1987, abstract = {We present a trust region-based method for the general nonlinearly constrained optimization problem. The method works by iteratively minimizing a quadratic model of the Lagrangian subject to a possibly relaxed linearization of the problem constraints and a trust region constraint. The model minimization may be done approximately with a dogleg-type approach. We show that this method is globally convergent even if singular or indefinite Hessian approximations are made. A second order correction step that brings the iterates closer to the feasible set is described. If sufficiently precise Hessian information is used, this correction step allows us to prove that the method is also locally quadratically convergent, and that the limit satisfies the second order necessary conditions for constrained optimization. An example is given to show that, without this correction, a situation similar to the \citebb{Mara78} effect may occur where the iteration is unable to move away from a saddle point.}, summary = {A trust-region-based method is given for general nonlinearly constrained optimization problem. It iteratively minimizes a quadratic model of the Lagrangian subject to a possibly relaxed linearization of the problem constraints and a trust-region constraint. The model minimization may be done approximately with a dogleg-type approach. Global convergence is shown. A second order correction step is also described. If sufficiently precise Hessian information is used, this step ensures locally quadratically convergence and satisfaction of the second order necessary conditions. An example shows that, without this correction, a situation similar to the \citebb{Mara78} effect may occur where the iteration is unable to move away from a saddle point.}} @article{ByrdSchnShul88, author = {R. H. Byrd and R. B. Schnabel and G. A. Shultz}, title = {Approximate solution of the trust region problem by minimization over two-dimensional subspaces}, journal = MP, volume = 40, number = 3, pages = {247--263}, year = 1988, abstract = {The trust region problem, minimization of a quadratic function subject to a spherical trust region constraint, occurs in many optimization algorithms. In a previous paper, the authors introduced an inexpensive approximate solution technique for this problem that involves the solution of a two-dimensional trust region problem. They showed that using this approximation in an unconstrained optimization algorithm leads to the same theoretical global and local convergence properties as are obtained using the exact solution to the trust region problem. This paper reports computational results showing that the two-dimensional minimization approach gives nearly optimal reductions in the $n$-dimension quadratic model over a wide range of test cases. We also show that there is very little difference, in efficiency and reliability, between using the approximate or exact trust region step in solving standard test problems for unconstrained optimization. These results may encourage the application of similar approximate trust region techniques in other contexts.}, summary = {Computational results are given, showing that the two-dimensional minimization approach of \citebb{ByrdSchnShul87} gives nearly optimal reductions in the $n$-dimensional quadratic model over a wide range of test cases. It is also shown that there is very little difference, in efficiency and reliability, between using the approximate or exact trust-region step when solving standard test problems for unconstrained optimization.}} @article{ByrdTapiZhan92, author = {R. H. Byrd and R. A. Tapia and Y. Zhang}, title = {An {SQP} Augmented {L}agrangian {BFGS} Algorithm for Constrained Optimization}, journal = SIOPT, volume = 2, number = 2, pages = {210--241}, year = 1992} %%% C %%% @article{CalaMore87, author = {P. H. Calamai and J. J. Mor\'{e}}, title = {Projected gradient methods for linearly constrained problems}, journal = MP, volume = 39, pages = {93--116}, year = 1987} @article{CarpLustMulvShan93, author = {T. J. Carpenter and I. J. Lustig and J. M. Mulvey and D. F. Shanno}, title = {Higher-Order Predictor-Corrector Interior Point Methods with Application to Quadratic Objectives}, journal = SIOPT, volume = 3, number = 4, pages = {696--725}, year = 1993} @phdthesis{Carr59, author = {C. W. Carrol}, title = {An operations research approach to the economic optimization of a kraft pulping process}, school = {Institute of Paper Chemistry}, address = {Appleton, Wisconsin, USA}, year = 1959} @article{Carr61, author = {C. W. Carrol}, title = {The Created Response Surface Technique for Optimizing Nonlinear Restrained Systems}, journal = {Operations Research}, volume = 9, number = 2, pages = {169--184}, year = 1961} @techreport{Cart86, author = {R. G. Carter}, title = {Multi-Model Algorithms for Optimization}, institution = CAAM, address = RICE-ADDRESS, number = {TR86-3}, year = 1986, abstract = {A recent approach for the construction of nonlinear optimization software has been to allow an algorithm to choose between two possible models to the objective function at each iteration. The model switching algorithm NL2SOL of \citebb{DennGayWels81} and the hybrid algorithms of Al-Baali and Fletcher have proven highly effective in practice. Although not explicitly formulated as multi-model methods, many other algorithms implicitly perform a model switch under certain circumstances (e.g., resetting a secant model to the exact value of the Hessian). We present a trust region formulation for multi-model methods which allows the efficient incorporation of an arbitrary number of models. Global convergence can be shown for three classes of algorithms under different assumptions on the models. First, essentially any multi-model algorithm is globally convergent if each of the models is sufficiently well-behaved. Second, algorithms based on the central feature of the NL2SOL switching system are globally convergent if one model is well behaved and each other model obeys a ``sufficient predicted decrease'' condition. No requirement is made that these alternate models be quadratic. Third, algorithms of the second type which directly enforce the ``sufficient predicted decrease'' condition are globally convergent if a single model is sufficiently well behaved.}, summary = {A trust-region formulation for multi-model methods is presented which allows the efficient incorporation of an arbitrary number of models. Global convergence is established for three classes of algorithms under different assumptions on the models. Firstly, essentially any multi-model algorithm is globally convergent if each of the models is sufficiently well-behaved. Secondly, algorithms based on the central feature of the NL2SOL switching system are globally convergent if one model is well behaved and each other model obeys a ``sufficient predicted decrease'' condition. No requirement is made that these alternate models be quadratic. Finally, algorithms of the second type which directly enforce the ``sufficient predicted decrease'' condition are globally convergent if a single model is sufficiently well behaved.}} @techreport{Cart87, author = {R. G. Carter}, title = {Safeguarding {H}essian Approximations in Trust Region Algorithms}, institution = CAAM, address = RICE-ADDRESS, number = {TR87-12}, year = 1987, abstract = {In establishing global convergence results for trust region algorithms applied to unconstrained optimization, it is customary to assume either a uniform upper bound on the sequence of Hessian approximations or an upper bound linear in the iteration count. The former property has not been established for most commonly used secant updates, and the latter has only been established for some updates under the highly restrictive assumption of convexity. One purpose of the uniform upper bound assumption is to establish a technical condition we refer to as the \emph{uniform predicted decrease condition}. We show that this condition can also be obtained by milder assumptions, the simplest of which is a uniform upper bound on the sequence of Rayleigh quotients of the Hessian approximations \emph{in the gradient directions}. This in turn suggests both a simple procedure for detecting questionable Hessian approximations, and several natural procedures for {\em correcting} them when detected. In numerical testing, one of these procedures increased the reliability of the popular BFGS method by a factor of two (i.e., the procedure halved the number of test cases to fail to converge to a critical point in a reasonable number of iterations). Further, for those problems where both methods were successful, this safeguarding procedure actually improved the average efficiency of the BFGS by ten to twenty percent.}, summary = {It is shown that the assumptions on the Hessian approximations in a trust-region method for unconstrained optimization can be replaced by a uniform upper bound on the sequence of Rayleigh quotients of the Hessian approximations in the gradient directions. This suggests both a simple procedure for detecting questionable approximations, and several natural procedures for correcting them when detected. In numerical tests, one of these procedures increased the reliability of the BFGS method by a factor of two. For those problems where both the safeguarded and original methods were successful, this safeguarding procedure improved the average efficiency of the BFGS by ten to twenty percent.}} @article{Cart91, author = {R. G. Carter}, title = {On the global convergence of trust region methods using inexact gradient information}, journal = SINUM, volume = 28, number = 1, pages = {251--265}, year = 1991, abstract = {Trust region algorithms are an important class of methods that can be used to solve unconstrained optimization problems. Strong global convergence results are demonstrated for a class of methods where the gradient values are approximated rather than computed exactly, provided they obey a simple relative error condition. No requirement is made that gradients be recomputed to successively greater accuracy after unsuccessful iterations.}, summary = {Strong global convergence results are demonstrated for trust-region methods for unconstrained minimization where the gradient values are approximated rather than computed exactly, provided they obey a simple relative error condition. No requirement is made that gradients be recomputed to successively greater accuracy after unsuccessful iterations.}} @article{Cart93, author = {R. G. Carter}, title = {Numerical Experience with a class of Algorithms for nonlinear Optimization using inexact function and gradient information}, journal = SISSC, volume = 14, number = 2, pages = {368--388}, year = 1993, abstract = {For optimization problems associated with engineering design, parameter estimation, image reconstruction, and other optimization/simulation applications, low accuracy function and gradient values are frequently much less expensive to obtain than high accuracy values. The computational performance of trust region methods for nonlinear optimization is investigated for cases when high accuracy evaluations are unavailable or prohibitively expensive, and earlier theoretical predictions that such methods are convergent even with relative gradient errors of 0.5 or more is confirmed. The proper choice of the amount of accuracy to use in function and gradient evaluations can result in orders-of-magnitude savings in computational cost.}, summary = {The computational performance of trust-region methods for nonlinear optimization is investigated for cases when high accuracy evaluations of function and gradient are unavailable or prohibitively expensive, and theoretical predictions that such methods are convergent even with relative gradient errors of 0.5 or more is confirmed. The proper choice of the amount of accuracy to use in function and gradient evaluations can result in orders-of-magnitude savings in computational cost.}} @phdthesis{Case97, author = {L. Case}, title = {An $\ell_1$ penalty function approach to the nonlinear bilevel programming problem}, school = {University of Waterloo}, address = {Waterloo, Canada}, year = 1997, summary = {The nonlinear bilevel problem is a difficult constrained optimization problem where the variables are partitioned into two sets, $z$ and $y$. The feasibility conditions require that y is the solution of a separate optimization problem. The approach of this thesis replaces the original problem by stating the necessary conditions for a solution and determining a one level programming problem by using an exact penalty function to attempt to satisfy these conditions. The resulting non-convex, non-smooth problems are solved by a trust-region approach and specialized techniques are used to overcome difficulties arising from the non-differentiability. A unique method is developed to handle degeneracy. Proof of convergence to a minimum of the penalty function is given. Test results and an analysis of the solutions are included. }} @article{Cauc47, author = {A. Cauchy}, title = {M\'{e}thode g\'{e}n\'{e}rale pour la r\'{e}solution des syst\`{e}mes d'\'{e}quations simultan\'{e}es}, journal = {Comptes Rendus de l'Acad\'{e}mie des Sciences}, pages = {536--538}, year = 1847} @techreport{Celi85, author = {M. R. Celis}, title = {A Trust Region Strategy for Nonlinear Equality Constrained Optimization.}, institution = CAAM, address = RICE-ADDRESS, number = {TR85-4}, year = 1985, abstract = {Many current algorithms for nonlinear constrained optimization problems determine a search direction by solving a quadratic programming subproblem. The global convergence properties are addressed by using a line search technique and a merit function to modify the length of the step obtained from the quadratic program. In unconstrained optimization, trust region strategies have been very successful. In this thesis we present a new approach for equality constrained optimization problems based on a trust region strategy. The direction selected is not necessarily the solution of the standard quadratic programming subproblem.}, summary = {An approach for equality constrained optimization problems based on a trust-region strategy is presented. The direction selected is not necessarily the solution of the standard quadratic programming subproblem.}} @inproceedings{CeliDennTapi85, author = {M. R. Celis and J. E. Dennis and R. A. Tapia}, title = {A trust region strategy for nonlinear equality constrained optimization}, crossref = {BoggByrdSchn85}, pages = {71--82}, abstract = {Many current algorithms for nonlinear constrained optimization problems determine a direction by solving a quadratic programming subproblem. The global convergence properties are addressed by using a line search technique and a merit function to modify the length of the step obtained from the quadratic program. In unconstrained optimization, trust regions strategies have been very successful. In this paper, we present a new approach for equality constrained optimization problems based on a trust region strategy. The direction selected is not necessarily the solution of the standard quadratic programming subproblem.}, summary = {As for \citebb{Celi85}.}} @article{CesaAgreHelgJorgJens91, author = {A. Cesar and H. Agren and T. Helgaker and P. Jorgensen and H. J. A. Jensen}, title = {Excited-State Structures and Vibronic Spectra of {H}$_2${CO}$_+$, {HDCO}$_+$, and {D}$_2${CO}$_+$ Using Molecular Gradient and {H}essian Techniques}, journal = {Journal of Chemical Physics}, volume = 95, number = 8, pages = {5906--5917}, year = 1991, abstract = {We choose H$_2$CO$_+$ and its deuterated species to demonstrate the potential for using second-order multiconfigurational self-consistent field theory to optimize structures and calculate properties of ionized and excited states. We focus on the calculation of multidimensional vibronic spectra using only the local information of the potential hypersurface, viz. the molecular energy, gradient, and Hessian. Second-order multiconfigurational self-consistent field optimization on lowest excited states using the trust radius algorithm is found to give the same stable convergence as for neutral ground states, while for higher lying states, the problem of multidimensional potential crossings renders the calculations more difficult.}, summary = {H$_2$CO$_+$ and its deuterated species are chosen to demonstrate the potential for using second-order multiconfigurational self-consistent field theory to optimize structures and calculate properties of ionized and excited states. The focus is on calculation of multidimensional vibronic spectra using only the molecular energy, gradient, and Hessian of the potential hypersurface. Second-order multiconfigurational self-consistent field optimization on lowest excited states using the trust-region algorithm is found to give the same stable convergence as for neutral ground states. For higher lying states, the problem of multidimensional potential crossings renders the calculations more difficult.}} @article{ChabCrou84, author = {Y. Chabrillac and J.-P. Crouzeix}, title = {Definiteness and semidefiniteness of quadratic forms revisited}, journal = LAA, volume = 63, pages = {283--292}, year = 1984} @article{ChamPoweLemaPede82, author = {R. M. Chamberlain and M. J. D. Powell and C. Lemar\'{e}chal and H. C. Pedersen}, title = {The watchdog technique for forcing convergence in algorithms for constrained optimization}, journal = MPS, volume = 16, number = {MAR}, pages = {1--17}, year = 1982} % abstract = {Han proves that a line search objective function, which is % of a form that occurs in many algorithms for constrained % optimization, can be used to force convergence to a % Kuhn-Tucker point. We give an example, however, to show % that this line search objective function can prevent a % superlinear rate of convergence. If this situation occurs, % we find that it is advantageous to replace the line search % objective function by an estimate of the Lagrangian % function. Therefore a technique is proposed, which chooses % automatically between Han's line search function and the % Lagrangian function, in a way that gives superlinear % convergence. We call it the ``watchdog technique'', because % the conditions on the step-length of a line search are % restricted on some iterations by a monitor, in order to % retain global convergence to a Kuhn-Tucker point from a % poor initial estimate of the solution.}, % summary = {\citebb{Han77} proved that a line search objective % function, which is of a form that occurs in many algorithms % for constrained optimization, can be used to force % convergence to a Kuhn-Tucker point. An example is given % which shows that this linesearch objective function can % prevent a superlinear rate of convergence. If this % situation occurs, it is advantageous to replace the line % search objective function by an estimate of the Lagrangian % function. Therefore a technique is proposed, which chooses % automatically between Han's linesearch function and the % Lagrangian function, in a way that gives superlinear % convergence. This technique is known as the ``watchdog % technique'', because the conditions on the step-length of a % linesearch are restricted on some iterations by a monitor, % in order to retain global convergence to a Kuhn-Tucker % point from a poor initial estimate of the solution.}} @article{ChanOlkiCool92, author = {T. F. Chan and J. A. Olkin and D. W. Cooley}, title = {Solving quadratically constrained least squares using black box solvers}, journal = BIT, volume = 32, pages = {481-495}, year = 1992} @phdthesis{Chan78, author = {R. Chandra}, title = {Conjugate gradient methods for partial differential equations}, school = {Yale University}, address = {New Haven, USA}, year = 1978} @article{ChanCott80, author = {Y. Y. Chang and R. W. Cottle}, title = {Least-index resolution of degeneracy in quadratic programming}, journal = MP, volume = 18, number = 2, pages = {127--137}, year = 1980} @article{Char78, author = {C. Charalambous}, title = {A lower bound for the controlling parameter of the exact penalty functions}, journal = MP, volume = 15, number = 3, pages = {278--290}, year = 1978} @article{Char79, author = {C. Charalambous}, title = {Acceleration of the least $p$-th algorithm for minimax optimization with engineering applications}, journal = MP, volume = 17, number = 1, pages = {270--297}, year = 1979} @article{CharConn75, author = {A. R. Conn and C. Charalambous}, title = {Optimization of Microwave Networks}, journal = {IEEE Transactions on Microwave Theory and Techniques}, volume = 23, number = 10, pages = {834--838}, year = 1975} @inproceedings{Chen95, author = {Z. Chen}, title = {A new trust region algorithm for optimization with simple bounds}, booktitle = {Operations Research and Its Applications. Proceedings of the First International Symposium, ISORA '95}, editor = {D. Z. Du and X. S. Zhang and K. Cheng}, publisher = {Beijing World Publishing}, address = {Beijing, China}, pages = {49--58}, year = 1995, abstract = {We present an algorithm of trust region type for minimizing a differentiable function of many variables with simple bounds. Under milder conditions, we prove the global convergence of the main algorithm. It is also proved that the correct active set is identified in a finite number of iterations with a strict complementarity condition, and so the proposed algorithm reduces to an unconstrained minimization method in a finite number of iterations, allowing a fast asymptotic rate of convergence.}, summary = {A globally convergent trust-region algorithm is presented for minimizing a differentiable function of many variables with simple bounds.It is proved that the correct active set is identified in a finite number of iterations under a strict complementarity condition, allowing a fast asymptotic rate of convergence.}} @incollection{Chen96, author = {Z. Chen}, title = {Some algorithms for a class of CDT subproblems}, booktitle = {Lecture Notes in Operations Research}, editor = {D. Du and X. Zhang and W. Wang}, publisher = {Beijing World Publishing}, address = {Beijing, China}, pages = {108--114}, year = 1996} @inproceedings{ChenDengZhan95, author = {L. Chen and N. Deng and J. Zhang}, title = {A trust region method with partial-update technique for unary optimization}, booktitle = {Operations Research and Its Applications. Proceedings of the First International Symposium, ISORA '95}, editor = {D. Z. Du and X. S. Zhang and K. Cheng}, publisher = {Beijing World Publishing}, address = {Beijing, China}, pages = {40--46}, year = 1995, abstract = {We propose a modified partial-update algorithm for solving unconstrained unary optimization problems based on trust region stabilization via indefinite dogleg curves. This algorithm only partially updates an approximation to the Hessian matrix in each iteration by utilizing limited times of rank-one updating of Bunch-Parlett factorization. In contrast with the original algorithms in \citebb{GoldWang93}, the algorithm not only converges globally, but also possesses a locally quadratic convergence rate. Furthermore, our numerical experiments show that the new algorithm outperforms the trust region method which uses the partial update criteria suggested in the above paper.}, summary = {A modified partial-update algorithm for solving unconstrained unary optimization problems is proposed, based on trust-region stabilization via indefinite dogleg curves. This algorithm only partially updates an approximation to the Hessian matrix in each iteration by applying a limited number of rank-one updates to its Bunch-Parlett factorization. In contrast with the original algorithms proposed by \citebb{GoldWang93}, the algorithm not only converges globally, but also possesses a locally quadratic convergence rate. Furthermore, numerical experiments show improved performance.}} @techreport{ChenChenKanz97, author = {B. Chen and X. Chen and Ch. Kanzow}, title = {A penalized {F}ischer-{B}urmeister {NCP}-Function: theoretical Investigation and Numerical Results }, institution = HAMBURG, address = HAMBURG-ADDRESS, number = {A-126}, year = 1997} @article{ChenDengZhan98, author = {L. Chen and N. Deng and J. Zhang}, title = {Modified partial-update {N}ewton-type algorithms for unary optimization}, journal = JOTA, volume = 97, number = 2, pages = {385--406}, year = 1998, abstract = {In this paper, we propose two modified partial-update algorithms for solving unconstrained unary optimization problems based on trust-region stabilization via indefinite dogleg curves. The two algorithms partially update an approximation to the Hessian matrix in each iteration by utilizing a number of times the rank-one updating of the Bunch-Parlett factorization. In contrast with earlier algorithms, the two algorithms not only converge globally, but possess also a locally quadratic or superlinear convergence rate. Furthermore, our numerical experiments show that the new algorithms outperform the trust-region method which uses the partial update criteria suggested in Ref. 1.}, summary = {Two modified partial-update algorithms for solving unconstrained unary optimization problems based on trust-region stabilization via indefinite dogleg curves are proposed. They both partially update an approximation to the Hessian matrix in each iteration by using the SR1 updating of the Bunch-Parlett factorization. They converge globally with a locally quadratic or superlinear convergence rate. Numerical experiments indicate that they outperform the trust-region method which uses some other partial update criteria.}} @article{ChenHigh98, author = {S. H. Cheng and N. J. Higham}, title = {A Modified {Cholesky} Algorithm Based on a Symmetric Indefinite Factorization}, journal = SIMAA, volume = 19, number = 4, pages = {1097--1110}, year = 1998} institution = MCCM, address = MCCM-ADDRESS, type = {Numerical Analysis Report}, number = {No. 289}, year = 1996} @article{ChenHan96, author = {Z. W. Chen and J. Y. Han}, title = {A trust region algorithm for optimization with nonlinear equality and linear inequality constraints}, journal = {Science in China Series A --- Mathematics Physics Astronomy}, volume = 39, number = 8, pages = {799--806}, year = 1996, abstract = {A new algorithm of trust region type is presented to minimize a differentiable function of many variables with nonlinear equality and linear inequality constraints. Under the milder conditions, the global convergence of the main algorithm is proved. Moreover, since any nonlinear inequality constraint can be converted into an equation by introducing a slack variable, the trust region method can be used in solving general nonlinear programming problems.}, summary = {A globally convergent trust-region algorithm is presented to minimize a smooth function of many variables with nonlinear equality and linear inequality constraints.}} @article{ChenMang96, author = {C. Chen and O. L. Mangasarian}, title = {A class of smoothing functions for nonlinear and mixed complementarity problems}, journal = COAP, volume = 5, number = 2, pages = {97--138}, year = 1996} @article{ChenSyku96, author = {Y. B. Cheng and J. K. Sykulski}, title = {Automated design and optimization of electromechanical actuators}, journal = {International Journal of Numerical Modelling-Electronic Networks Devices and Fields}, volume = 9, number = {1--2}, pages = {59--69}, year = 1996, abstract = {The paper investigates various optmization techniques and their suitability for the magnetic design of electromechanical actuators. Selected algorithms, including Gauss-Newton, \citebb{Leve44}--\citebb{Marq63} and Trust region, are examined and compared using 18 test functions. The Levenberg-Marquardt method is chosen for its robustness and fast convergence, and incorporated into an automated CAD optimization system (EAMON), which interfaces an external optimizer to a general purpose finite element package; The EAMON program, which is user friendly with pull-down menus, searches for constrained shape design variables that fulfill prescribed performance criteria. The electromagnetic field analysis forms part of the optimization iterative cycle. Finally, two application examples are described. First, a DC solenoid actuator with truncated cone pole face is optimized to produce a user specified force-displacement characteristic. Secondly, an actuator solenoid is optimized to produce maximum energy per stroke.}, summary = {The suitability of various optimization techniques for the magnetic design of electromechanical actuators is examined. The Gauss-Newton, Levenberg-Morrison-Marquardt and trust-region algorithms are compared using 18 test functions. The Levenberg-Morrison-Marquardt method is chosen for its robustness and fast convergence, and incorporated into an automated CAD optimization system (EAMON), which interfaces an external optimizer to a general purpose finite element package. The user-friendly EAMON program searches for constrained shape design variables that fulfill prescribed performance criteria. The electromagnetic field analysis forms part of the optimization iterative cycle. As examples, a DC solenoid actuator with truncated cone pole face is optimized to produce a user-specified force-displacement characteristic, and an actuator solenoid is optimized to produce maximum energy per stroke.}} @article{ChenYuan99, author = {X. D. Chen and Y. Yuan}, title = {On local solutions of the CDT subproblem}, journal = SIOPT, note = {To appear.}, year = 1999, abstract = {In this paper, we discuss the distribution of the local solutions of the CDT subproblem which appears in some trust region algorithms for nonlinear optimization. We also give some examples to show the differences between the CDT subproblem and the single-ball constraint subproblem. These results show that the complexity of the CDT subproblem does not depend on the complexity of the structure of the dual plane, thus they provide the possibility to search the global minimizer in the dual plane.}, summary = {The distribution of the local solutions of the CDT subproblem which appears in some trust region algorithms for nonlinear optimization is discussed. Examples illustrate the differences between the CDT subproblem and the single-ball constraint subproblem. The complexity of the CDT subproblem is shown not to depend on the complexity of the structure of the dual plane, which opens the possibility of searching the global minimizer in this plane.}} @techreport{ChinFlet99, author = {C. M. Chin and R. Fletcher}, title = {Convergence Properties of SLP-filter Algorithms that use EQP steps}, institution = DUNDEE, address = DUNDEE-ADDRESS, type = {Numerical Analysis Report}, number = {(in preparation)}, year = 1999} @inproceedings{ChowChen94, author = {T. T. Chow and P. K. Chen}, title = {A new trust region global strategy for unconstrained optimization}, booktitle = {1994 International Computer Symposium Conference Proceedings. National Chiao Tung University, Hsinchu, Taiwan}, volume = 1, pages = {394--401}, year = 1994, abstract = {This paper introduces a new global strategy, the tensor dogleg method, for solving unconstrained optimization problems, especially using tensor methods. Tensor methods for unconstrained optimization were first introduced by R.B. Schnabel and T.T. Chow (1991). They adopted line search method and two trust region methods as global strategies, but these trust region methods were either inefficient or too complicated. Therefore, the software package, TENMIN, developed by T.T. Chow et al. employed only the line search method as the global strategy for the tensor methods. We tested several different versions of our tensor dogleg algorithm. Although the performance of each version of the algorithm differed slightly, most of them performed better than TENMIN. The final version of the tensor dogleg algorithm comprises eleven states. During our tests we found that the candidate steps generated by our algorithm were in the tensor step directions more often than in other directions. The test results indicate that our tensor dogleg algorithm performs better than not only the standard double dogleg algorithm with Newton steps but also the conventional line search method using tensor steps.}, summary = {This paper introduces the tensor dogleg method, a trust-region technique for solving unconstrained optimization problems, which seems to outperform the linesearch based TENMIN package.}} @book{Chva83, author = {V. Chv\'{a}tal}, title = {Linear Programming}, publisher = FREEMAN, address = FREEMAN-ADDRESS, year = 1983} @book{Clar83, author = {F. H. Clarke}, title = {Optimization and Nonsmooth Analysis}, publisher = WILEY, address = WILEY-ADDRESS, series = {Canadian Mathematical Society series in mathematics}, year = 1983, note = {Reprinted as \emph{Classics in Applied Mathematics 5}, SIAM, Philadelphia, USA, 1990}} @article{ClerDelaPhamYass91, author = {J. R. Clermont and M. E. Delalande and Pham Dinh, T. and A. Yassine}, title = {Analysis of Plane and Axisymmetrical Flows of Incompressible Fluids with the Steam Tube Method---Numerical-Simulation by Trust-Region Optimization Algorithm}, journal = {International Journal for Numerical Methods in Fluids}, volume = 13, number = 3, pages = {371--399}, year = 1991, abstract = {New concepts for the study of incompressible plane or axisymmetric flows are analysed by the stream tube method. Flows without eddies and pure vortex flows are considered in a transformed domain where the mapped streamlines are rectilinear or circular. The transformation between the physical domain and the computational domain is an unknown of the problem. In order to solve the non-linear set of relevant equations, we present a new algorithm based on a trust region technique which is effective for non-convex optimization problems. Experimental results show that the new algorithm is more robust compared to the Newton-Raphson method.}, summary = {Concepts for the study of incompressible plane or axisymmetric flows are analysed by the stream tube method. Flows without eddies and pure vortex flows are considered in a transformed domain where the mapped streamlines are rectilinear or circular. The transformation between the physical domain and the computational domain is an unknown of the problem. A trust-region algorithm is given for solving the relevant nonlinear set of equations, Experimental results show that it is more robust than the Newton-Raphson method.}} @inproceedings{ClinConnVanL82, author = {A. K. Cline and A. R. Conn and Van Loan, C. F.}, title = {Generalizing the {LINPACK} condition estimator}, crossref = {Henn82}, pages = {73--83}} @article{ClinMoleStewWilk79, author = {A. K. Cline and C. B. Moler and G. W. Stewart and J. H. Wilkinson}, title = {An estimate for the condition number of a matrix}, journal = SINUM, volume = 16, pages = {368-375}, year = 1979} @inproceedings{Cole94, author = {T. F. Coleman}, title = {Linearly Constrained Optimization and Projected Preconditioned Conjugate Gradients}, booktitle = {Proceedings of the Fifth SIAM Conference on Applied Linear Algebra}, editor = {J. Lewis}, publisher = SIAM, address = SIAM-ADDRESS, pages = {118--122}, year = 1994} @article{ColeConn80, author = {T. F. Coleman and A. R. Conn}, title = {Second-order conditions for an exact penalty function}, journal = MP, volume = 19, number = 2, pages = {178--185}, year = 1980} @article{ColeConn82, author = {T. F. Coleman and A. R. Conn}, title = {Non-linear programming via an exact penalty-function: asymptotic analysis}, journal = MP, volume = 24, number = 2, pages = {123--136}, year = 1982} @article{ColeConn82b, author = {T. F. Coleman and A. R. Conn}, title = {Non-linear programming via an exact penalty-function: global analysis}, journal = MP, volume = 24, number = 2, pages = {137--161}, year = 1982} @article{ColeFeny92, author = {T. F. Coleman and P. A. Fenyes}, title = {Partitioned quasi-{N}ewton methods for nonlinear equality constrained optimization}, journal = MP, volume = 53, number = 1, pages = {17--44}, year = 1992} @article{ColeHemp90, author = {T. F. Coleman and C. Hempel}, title = {Computing a trust region step for a penalty function}, journal = SISSC, volume = 11, number = 1, pages = {180--201}, year = 1990, abstract = {The problem of minimizing a quadratic function subject to an ellipsoidal constraint when the matrix involved is the Hessian of a penalty function (i.e., a function of the form $p(x)=f(x)+(1/2\mu)c(x)^Tc(x)$) is considered. Most applications of penalty functions require $p(x)$ to be minimized for values of $\mu$ decreasing to zero. In general, as $\mu$ tends to zero the nature of finite precision arithmetic causes a considerable loss of information about the null space of the constraint gradients when $\nabla^2p(x)$ is formed. This loss of information renders ordinary trust region Newton's method unstable and degrades the accuracy of the solution to the trust region problem. The algorithm of \citebb{MoreSore83} is modified so as to be more stable and less sensitive to the nature of finite precision arithmetic in this situation. Numerical experiments clearly demonstrate the stability of the proposed algorithm.}, summary = {The minimization of a quadratic function subject to an ellipsoidal constraint is considered in the case when the matrix involved is the Hessian of a penalty function $p(x)=f(x)+(1/2\mu)c(x)^Tc(x)$. Most applications require $p(x)$ to be minimized for values of $\mu$ decreasing to zero. The algorithm of \citebb{MoreSore83} is modified so as to be less sensitive to the nature of finite precision arithmetic in this situation. Numerical experiments illustrate the stability of the modified algorithm.}} @article{ColeHulb89, author = {T. F. Coleman and L. A. Hulbert}, title = {A direct active set algorithm for large sparse quadratic programs with simple bounds}, journal = MPB, volume = 45, number = 3, pages = {373--406}, year = 1989} @article{ColeHulb93, author = {T. F. Coleman and L. A. Hulbert}, title = {A Globally and Superlinearly Convergent Algorithm for Convex Quadratic Programs with Simple Bounds}, journal = SIOPT, volume = 3, number = 2, pages = {298--321}, year = 1993} abstract = {We present a globally and superlinearly convergent algorithm for solving convex quadratic programs with simple bounds. We develop our algorithm using a new formulation of the problem: the minimization of an unconstrained piecewise quadratic function that has the same optimality conditions as the original problem. The major work at each iteration is the Cholesky factorization of a positive definite matrix with the size and structure of the Hessian of the quadratic. Hence our algorithm is suitable for solving large-scale problems and for implementation on parallel computers. We implemented our algorithm and tested it on a sequential computer on a variety of dense problems, and we present numerical results which show that our algorithm solves many problems quickly.}, summary = {A globally and superlinearly convergent algorithm for solving convex quadratic programs with simple bounds is given. It uses the minimization of an unconstrained piecewise quadratic function that has the same optimality conditions as the original problem. The major work at each iteration is the Cholesky factorization of a positive definite matrix with the size and structure of the Hessian of the quadratic. Hence the algorithm is suitable for solving large-scale problems and for implementation on parallel computers. An implementation of the algorithm is tested on a sequential computer using a variety of dense problems. Numerical results are presented.}} @article{ColeLi94, author = {T. F. Coleman and Y. Li}, title = {On the convergence of interior-reflective {N}ewton methods for nonlinear minimization subject to bounds}, journal = MP, volume = 67, number = 2, pages = {189--224}, year = 1994} @article{ColeLi96, author = {T. F. Coleman and Y. Li}, title = {A Reflective {N}ewton Method for Minimizing a Quadratic Function Subject to Bounds on Some of the Variables}, journal = SIOPT, volume = 6, number = 4, pages = {1040--1058}, year = 1996, abstract = {We propose a new algorithm, a reflective Newton method, for the minimization of a quadratic function of many variables subject to upper and lower bounds on some of the variables. The method applies to a general (indefinite) quadratic function for which a local minimum subject to bounds is required and is particularly suitable for the large-scale problem. Our new method exhibits strong convergence properties and global and second-order convergence and appears to have significant practical potential. Strictly feasible points are generated. We provide experimental results on moderately large and sparse problems based on both sparse Cholesky and preconditioned conjugate gradient linear solvers.}, summary = {A reflective Newton method is proposed for the minimization of a quadratic function of many variables subject to upper and lower bounds on some of the variables. It applies to a general (indefinite) quadratic function for which a local minimum subject to bounds is required and is particularly suitable for the large-scale problem. The method is globally and asymptotically quadratically convergent. generates strictly feasible points. Experimental results are presented for moderately large and sparse problems based on both sparse Cholesky and preconditioned conjugate-gradient linear solvers.}} @article{ColeLi96b, author = {T. F. Coleman and Y. Li}, title = {An Interior Trust Region Approach for Nonlinear Minimization Subject to Bounds}, journal = SIOPT, volume = 6, number = 2, pages = {418--445}, year = 1996, abstract = {We propose a new trust region approach for minimizing a nonlinear function subject to simple bounds. Unlike most existing methods, our proposed method does not require that a quadratic programming subproblem, with inequality constraints, be solved each iteration. Instead, a solution to a trust region subproblem is defined by minimizing a quadratic function subject only to an ellipsoidal constraint. The iterates generated are strictly feasible. Our proposed method reduces to a standard trust region approach for the unconstrained problem when there are no upper or lower bounds on the variables. Global and local quadratic convergence is established. Preliminary numerical experiments are reported indicating the practical viability of this approach.}, summary = {A trust-region approach for minimizing a nonlinear function subject to simple bounds is proposed, that does not require that a quadratic programming subproblem with inequality constraints be solved every iteration. Instead, a solution to a trust-region subproblem is sought. The iterates generated are strictly feasible. The proposed method reduces to a standard trust-region approach for the unconstrained problem. Global and locally quadratic convergence is established. Preliminary numerical experiments are reported.}} @techreport{ColeLi97b, author = {T. F. Coleman and Y. Li}, title = {A trust region and affine scaling interior point method for nonconvex minimization with linear inequality constraints}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR 97-1642}, year = 1997, abstract = {A trust region and affine scaling interior point method (TRAM) is proposed for a general nonlinear minimization with linear inequality constraints by \citebb{ColeLi98}. In the proposed approach, a Newton step is derived from the complementarity conditions. Based on this Newton step, a trust region subproblem is formed, and the original objective function is monotonically decreased. Explicit sufficient decrease conditions are proposed for satisfying complementarity, dual feasibility and second order optimality. The objective of this paper is to establish global and local convergence properties of the proposed trust region and affine scaling interior point method. It is shown that the proposed decrease conditions are sufficient for achieving complementarity, dual feasibility and second order optimality respectively. It is also established that a trust region solution is asymptotically in the interior of the proposed trust region subproblem and a damped trust region step can achieve quadratic convergence.}, summary = {Global and local convergence properties of the trust-region and affine-scaling interior-point method (TRAM) by \citebb{ColeLi98} are established. It is shown that a trust-region solution is asymptotically in the interior of the trust region subproblem and a damped trust-region step can achieve quadratic convergence.}} @inproceedings{ColeLi98, author = {T. F. Coleman and Y. Li}, title = {Combining trust region and affine scaling for linearly constrained nonconvex minimization}, crossref = {Yuan98}, pages = {219--250}, abstract = {An interior point method is proposed for a general nonlinear (non-convex) minimization with linear inequality constraints. This method is a combination of the trust region idea for nonlinearity and affine scaling technique for constraints. Using this method, the original objective function is monotonically decreased. In the proposed approach, a Newton step is derived directly from the complementarity conditions. A trust region subproblem is formed which yields an approximate Newton step as its solution asymptotically. The objective function of the trust region subproblem is the quadratic approximation to the original objective function plus an augmented quadratic convex term. Similar to an augmented Lagrangian function, this augmentation adds positive curvature in the range space of the constraint normals. The global convergence is achieved by possibly using trust regions with different shapes. A reflection technique, which accelerates convergence, is described. Explicit sufficient decrease conditions are proposed. Computational results of a two-dimensional trust region implementation are reported for large-scale problems. Preliminary experiments suggest that this method can be effective; a relatively small number of function evaluations are required for some medium and large test problems.}, summary = {An interior-point method is proposed for non-convex minimization with linear inequality constraints. It combines the trust-region idea for nonlinearity and affine-scaling technique for constraints, and ensures that the original objective function is monotonically decreased. A subproblem is formed which asymptotically yields an approximate Newton step, directly derived from the complementarity conditions. Global convergence is achieved by possibly using trust regions with different shapes. A reflection technique accelerates convergence. Explicit sufficient decrease conditions are proposed. Computational results of a two-dimensional implementation are reported for large-scale problems.}} @misc{ColeLi98b, author = {T. F. Coleman and Y. Li}, title = {A primal-dual Trust Region Algorithm for Nonconvex Programming using a $\ell_1$ Penalty Function}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998, abstract = {A primal and dual algorithm is proposed for nonconvex programming. Primal and dual steps are derived directly from the complementarity conditions. A primal step is used to yield decrease for the $\ell_1$ penalty function. In addition, a dual step yields decrease for an appropriate function of dual variables. Reflection procedures are used to accelerate convergence and preliminary computational results are reported.}, summary = {A primal-dual algorithm is proposed for non-convex programming. Primal and dual steps are derived directly from the complementarity conditions. A primal trust-region step is used to yield decrease for the $\ell_1$ penalty function, and a dual constrained least-squares step yields decrease for an appropriate function of dual variables. Reflection procedures are used to accelerate convergence and preliminary computational results are reported.}} @article{ColeLiao95, author = {T. F. Coleman and A. Liao}, title = {An efficient trust region method for unconstrained discrete-time optimal control problem}, journal = COAP, volume = 4, number = 1, pages = {47--66}, year = 1995, abstract = {Discrete-time optimal control (DTOC) problems are large-scale optimization problems with a dynamic structure. In previous work this structure has been exploited to provide very fast and efficient local procedures. Two examples are the differential dynamic programming algorithm (DDP) and the stagewise Newton procedure---both require only $O(N)$ operations, where $N$ is the number of timesteps. Both exhibit a quadratic convergence rate. However, most algorithms in this category do not have a satisfactory global convergence strategy. The most popular global strategy is shifting: this sometimes works poorly due to the lack of automatic adjustment to the shifting element. In this paper we propose a method that incorporates the trust region idea with the local stagewise Newton's method. This method possesses advantages of both the trust region idea and the stagewise Newton's method, i.e., our proposed method has strong global and local convergence properties yet remains economical. Preliminary numerical results are presented to illustrate the behavior of the proposed algorithm. We also collect in the Appendix some DTOC problems that have appeared in the literature.}, summary = {A method is proposed that incorporates the trust-region idea with the local stagewise Newton's method for discrete-time optimal control (DTOC) problems. This method has strong global and local convergence properties yet remains economical. Preliminary numerical results illustrate the behaviour of the algorithm. Some DTOC problems that have appeared in the literature are collected in appendix.}} @article{ColeLiu99, author = {T. F. Coleman and J. Liu}, title = {An interior {N}ewton method for quadratic programming}, journal = MPA, volume = 85, number = 3, pages = {491--524}, year = 1999, abstract = {We propose a new (interior) approach for the general quadratic programming problem. We establish that the new method has strong convergence properties: the generated sequence converges globally to a point satisfying the second-order necessary optimality conditions, and the rate of convergence is 2-step quadratic if the limit point is a strong minimizer. Published alternative interior approaches do not share such strong convergence properties for the nonconvex case. We also report on the results of preliminary numerical experiments: the results indicate that the proposed method has considerable practical potential.}, summary = {An interior point method is proposed for the general quadratic programming problem. The method converges globally to a point satisfying the second-order necessary optimality conditions, and the rate of convergence is 2-step quadratic if the limit point is a strong minimizer. Preliminary numerical experiments indicate that the method has practical potential.}} @article{ColeMore83, author = {T. F. Coleman and J. J. Mor\'{e}}, title = {Estimation of sparse {J}acobian matrices and graph coloring problems}, journal = SINUM, volume = 20, pages = {187--209}, year = 1983} @article{ColeMore84, author = {T. F. Coleman and J. J. Mor\'{e}}, title = {Estimation of sparse {H}essian matrices and graph coloring problems}, journal = MP, volume = 28, pages = {243--270}, year = 1984} @article{ColePlas92, author = {T. F. Coleman and P. E. Plassman}, title = {A Parallel Nonlinear Least-Squares Solver--Theoretical Analysis and Numerical Results}, journal = SISSC, volume = 13, number = 3, pages = {771--793}, year = 1992, abstract = {The authors (1989) proposed a parallel algorithm, based on the sequential Levenberg-Marquardt method for the nonlinear least-squares problem. The algorithm is suitable for message-passing multiprocessor computers. A parallel efficiency analysis is provided and computational results are reported. The experiments were performed on an Intel iPSC/2 multiprocessor with 32 nodes: the paper presents experimental results comparing the given parallel algorithm with sequential MINPACK code executed on a single processor. These experimental results show that essentially full efficiency is obtained for problems where the row size is sufficiently larger than the number of processors.}, summary = {The paper presents experimental results comparing a parallel version of the Levenberg-Morrison-Marquardt algorithm on an Intel iPSC/2 multiprocessor with 32 nodes with sequential MINPACK code executed on a single processor. These experimental results show that essentially full efficiency is obtained for problems where the row size is sufficiently larger than the number of processors.}} @article{ColePoth86a, author = {T. F. Coleman and A. Pothen}, title = {The Null Space Problem {I}: complexity}, journal = SIADM, volume = 7, number = 4, pages = {527-537}, year = 1986} @article{ColePoth87, author = {T. F. Coleman and A. Pothen}, title = {The Null Space Problem {II}: algorithms}, journal = SIADM, volume = 8, number = 4, pages = {544-563}, year = 1987} @article{ColeSore84, author = {T. F. Coleman and D. C. Sorensen}, title = {A note on the computation of an orthonormal basis for the null space of a matrix}, journal = MP, volume = 29, number = 2, pages = {234--242}, year = 1984} @techreport{ColeYuan95, author = {T. F. Coleman and W. Yuan}, title = {A New Trust Region Algorithm for Equality Constrained Optimization}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR95-1477}, year = 1995, abstract = {We present a new trust region algorithm for solving nonlinear equality constrained optimization problems. At each iterate a change of variables is performed to improve the ability of the algorithm to follow the constraint level sets. The algorithm employs $L_2$ penalty functions for obtaining global convergence. Under certain assumptions we prove that this algorithm globally converges to a point satisfying the second order necessary conditions; the local convergence rate is quadratic. Results of preliminary numerical experiments are presented.}, summary = {A trust-region algorithm for solving nonlinear equality constrained problems is presented. At each iterate a change of variables improves the ability of the algorithm to follow the constraint level sets. The algorithm employs quadratic penalty functions to obtain global convergence. It converges globally and Q-quadratically to a point satisfying second-order necessary optimality conditions. Preliminary numerical experiments are presented.}} @book{Coll66, author = {L. Collatz}, title = {Functional Analysis and Numerical Mathematics}, publisher = AP, address = AP-ADDRESS, year = 1966} @mastersthesis{Cols99, author = {B. Colson}, title = {Mathematical Programs with Equilibrium Constraints and Nonlinear Bilevel Programming Problems}, school = FUNDP, address = FUNDP-ADDRESS, year = 1999} @inproceedings{ConcGoluOLea76, author = {P. Concus and G. H. Golub and D. P. O'Leary}, title = {Numerical Solution of Nonlinear Elliptic Partial Differential Equations by a Generalized Conjugate Gradient Method}, booktitle = {Sparse Matrix Computations}, editor = {J. Bunch and D. Rose}, publisher = AP, address = AP-ADDRESS, pages = {309--332}, year = 1976} @article{Conn73, author = {A. R. Conn}, title = {Constrained optimization via a nondifferentiable penalty function}, journal = SINUM, volume = 10, number = 4, pages = {760--779}, year = 1973} @inproceedings{ConnCoulHariMoriVisw96, author = {A. R. Conn and P. K. Coulman and R. A. Haring and G. L. Morrill and C. Visweswariah}, title = {Optimization of custom {MOS} circuits by transistor sizing}, booktitle = {IEEE/ACM International Conference on Computer-Aided Design. Digest of Technical Papers (Cat. No.96CB35991)}, publisher = {IEEE}, address = {IEEE Comput. Soc. Press, Los Alamitos, USA}, pages = {p.174--180}, year = 1996, abstract = {Optimization of a circuit by transistor sizing is often a slow, tedious and iterative manual process which relies on designer intuition. Circuit simulation is carried out in the inner loop of this tuning procedure. Automating the transistor sizing process is an important step towards being able to rapidly design high-performance, custom circuits. JiffyTune is a new circuit optimization tool that automates the tuning task. Delay, rise/fall time, area and power targets are accommodated. Each (weighted) target can be either a constraint or an objective function. Minimax optimization is supported. Transistors can be ratioed and similar structures grouped to ensure regular layouts. Bounds on transistor widths are supported. JiffyTune uses {\sf LANCELOT}, a large-scale nonlinear optimization package with an augmented Lagrangian formulation. Simple bounds are handled explicitly and trust region methods are applied to minimize a composite objective function. In the inner loop of the optimization, the fast circuit simulator SPECS is used to evaluate the circuit. SPECS is unique in its ability to efficiently provide time-domain sensitivities, thereby enabling gradient-based optimization. Both the adjoint and direct methods of sensitivity computation have been implemented in SPECS. To assist the user, interfaces in the Cadence and SLED design systems have been constructed.}, summary = {JiffyTune is a circuit optimization tool that automates the tuning task. Delay, rise/fall time, area and power targets are accommodated. Each (weighted) target can be either a constraint or an objective function. Minimax optimization is supported. Transistors can be ratioed and similar structures grouped to ensure regular layouts. Bounds on transistor widths are supported. JiffyTune uses {\sf LANCELOT}. In the inner loop of the optimization, the fast circuit simulator SPECS is used to evaluate the circuit. SPECS is unique in its ability to provide time-domain sensitivities, thereby enabling gradient-based optimization. Both the adjoint and direct methods of sensitivity computation have been implemented in SPECS. Interfaces in the Cadence and SLED design systems have been constructed.}} @inproceedings{ConnGoulLescToin94, author = {A. R. Conn and N. I. M. Gould and M. Lescrenier and Ph. L. Toint}, title = {Performance of a multifrontal scheme for partially separable optimization}, crossref = {GomeHenn94}, pages = {79--96}} %abstract = {We consider the solution of partially separable % minimization problems subject to simple bounds % constraints. At each iteration, a quadratic model is used % to approximate the objective function within a trust % region. To minimize this model, the iterative method of % conjugate gradients has usually been used. The aim of this % paper is to compare the performance of a direct method, a % multifrontal scheme, with the conjugate gradient method % (with and without preconditioning). To assess our % conclusions, a set of numerical experiments, including % large-dimensional problems, is presented.} @article{ConnGoulToin88a, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {Global convergence of a class of trust region algorithms for optimization with simple bounds}, journal = SINUM, volume = 25, number = 182, pages = {433--460}, year = 1988, note = {See also same journal 26:764--767, 1989.}, abstract = {This paper extends the known excellent global convergence properties of trust-region algorithms for unconstrained optimization to the case where bounds on the variables are present. Weak conditions on the accuracy of the Hessian approximations are considered. It is also shown that, when the strict complementarity condition holds, the proposed algorithms reduce to an unconstrained calculation after finitely many iterations, allowing a fast rate of convergence.}, summary = {The global convergence properties of trust-region algorithms for unconstrained optimization are extended to the case where bounds on the variables are present. Weak conditions on the accuracy of the Hessian approximations are considered. When the strict complementarity condition holds, the proposed algorithms reduce to an unconstrained calculation after finitely many iterations, allowing fast convergence.}} @article{ConnGoulToin88b, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {Testing a class of methods for solving minimization problems with simple bounds on the variables}, journal = MC, volume = 50, pages = {399--430}, year = 1988, abstract = {We describe the results of a series of tests upon a class of new methods of trust region type for solving the simple bound constrained minimization problem. The results are encouraging and lead us to believe that the method will prove useful in solving large problems.}, summary = {The results of tests on the trust-region methods proposed by \citebb{ConnGoulToin88a} for solving the bound constrained minimization problem are discussed.}} @inproceedings{ConnGoulToin89b, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {An introduction to the structure of large scale nonlinear optimization problems and the {{\sf LANCELOT}} project}, booktitle = {Computing Methods in Applied Sciences and Engineering}, editor = {R. Glowinski and A. Lichnewsky}, publisher = SIAM, address = SIAM-ADDRESS, pages = {42--51}, year = 1990} %abstract = {This paper presents the authors' personal views on two % fundamental aspects amongst the recent developments in the % growing field of large-scale nonlinear mathematical % programming. Important concepts for the description of % problem structure are discussed in detail. A systematic % approach to software development for this class of % problems is also presented. The approach incorporates both % suitable numerical algorithms and user oriented standard % format for problem specification in a modular and coherent % system.} @article{ConnGoulToin91, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {A Globally Convergent Augmented {L}agrangian Algorithm for Optimization with General Constraints and Simple Bounds}, journal = SINUM, volume = 28, number = 2, pages = {545--572}, year = 1991} @article{ConnGoulToin91a, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {Convergence of quasi-{N}ewton matrices generated by the Symmetric Rank One update}, journal = MP, volume = 50, number = 2, pages = {177--196}, year = 1991} @inproceedings{ConnGoulToin91e, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {On the number of inner iterations per outer iteration of a globally convergent algorithm for optimization with general nonlinear equality constraints and simple bounds}, crossref = {GrifWats92}, pages = {49--68}} @book{ConnGoulToin92, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {{\sf LANCELOT}: a {F}ortran package for Large-scale Nonlinear Optimization ({R}elease {A})}, publisher = SPRINGER, address = SPRINGER-ADDRESS, series = {Springer Series in Computational Mathematics}, year = 1992} @techreport{ConnGoulToin92g, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {Intensive numerical tests with {{\sf LANCELOT}} ({R}elease {A}): the complete results}, institution = FUNDP, address = FUNDP-ADDRESS, number = {92/15}, year = 1992, note = {Also issued as Research Report RC 18750, IBM T.J. Watson Center, Yorktown Heights, USA, and as Research Report 92-069, RAL, Chilton, Oxfordshire, England}, abstract = {This report contains the detailed results of the numerical experiments on the {\sf LANCELOT} package for nonlinear optimization (Release A). These results constitute the basis of the discussion and analysis presented by the authors in \citebb{ConnGoulToin96a}.}, summary = {The detailed results for the tests reported by \citebb{ConnGoulToin96a} are presented.}} @article{ConnGoulToin94a, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {A note on using alternative second-order models for the subproblems arising in barrier function methods for minimization}, journal = NUMMATH, volume = 68, pages = {17--33}, year = 1994} @article{ConnGoulToin96a, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {Numerical experiments with the {{\sf LANCELOT}} package ({R}elease {A}) for large-scale nonlinear optimization}, journal = MPA, volume = 73, number = 1, pages = {73--110}, year = 1996, abstract = {In this paper, we describe the algorithmic options of Release A of {\sf LANCELOT}, a Fortran package for large-scale nonlinear optimization. We then present the results of intensive numerical tests and discuss the relative merits of the options. The experiments described involve both academic and applied problems. Finally, we propose conclusions, both specific to {\sf LANCELOT} and of more general scope.}, summary = {The algorithmic options available within Release A of {\sf LANCELOT}, a Fortran package for large-scale nonlinear optimization, are presented. The results of intensive numerical tests are described, and the relative merits of the options discussed. The experiments described involve both academic and applied problems. Conclusions specific to {\sf LANCELOT} and of more general scope are made.}} @inproceedings{ConnGoulToin97, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {Methods for Nonlinear Constraints in Optimization Calculations}, crossref = {DuffWats97}, pages = {363--390}} %abstract = {Ten years ago, the broad consensus among researchers in % constrained optimization was that sequential quadratic % programming (SQP) methods were the methods of choice. % While, in the long term, this position may be justified, % the past ten years have exposed a number of difficulties % with the SQP approach. Moreover, alternative methods have % shown themselves capable of solving large-scale problems. % In this paper, we shall outline the defects with SQP % methods, and discuss the alternatives. In particular, we % shall indicate how our understanding of the subproblems % which inevitably arise in constrained optimization % calculations has improved. We shall also consider the % impact of interior-point methods for inequality % constrained problems, described elsewhere in this volume, % and argue that these methods likely provide a more useful % Newton model for such problems than do traditional SQP % methods. Finally, we shall consider trust-region methods % for constrained problems, and the impact of automatic % differentiation on algorithm design. } @article{ConnGoulToin97a, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {A globally convergent {L}agrangian barrier algorithm for optimization with general inequality constraints and simple bounds}, journal = MC, volume = 66, pages = {261--288}, year = 1997} @article{ConnGoulToin97z, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {On the number of inner iterations per outer iteration of a globally convergent algorithm for optimization with general nonlinear inequality constraints and simple bounds}, journal = COAP, volume = 7, number = 1, pages = {41--70}, year = 1997} @inproceedings{ConnGoulToin99, author = {A. R. Conn and N. I. M. Gould and Ph. L. Toint}, title = {A Primal-Dual Algorithm for Minimizing a Nonconvex Function Subject to Bound and Linear Equality Constraints}, crossref = {DiPiGian99}, pages = {15--30}} @article{ConnGoulSartToin93, author = {A. R. Conn and N. I. M. Gould and A. Sartenaer and Ph. L. Toint}, title = {Global convergence of a class of trust region algorithms for optimization using inexact projections on convex constraints}, journal = SIOPT, volume = 3, number = 1, pages = {164--221}, year = 1993, abstract = {A class of trust region based algorithms is presented for the solution of nonlinear optimization problems with a convex feasible set. At variance with previously published analysis of this type, the theory presented allows for the use of general norms. Furthermore, the proposed algorithms do not require the explicit computation of the projected gradient, and can therefore be adapted to cases where the projection onto the feasible domain may be expensive to calculate. Strong global convergence results are derived for the class. It is also shown that the set of linear and nonlinear constraints that are binding at the solution are identified by the algorithms of the class in a finite number of iterations.}, summary = {Trust-region algorithms for the solution of nonlinear optimization problems with a convex feasible set are presented. The theory given allows for the use of general norms. Furthermore, the proposed algorithms do not require the explicit computation of the projected gradient, and can therefore be adapted to cases where the projection onto the feasible domain may be expensive to calculate. Strong global convergence results are derived. The set of linear and nonlinear constraints that are binding at the solution are identified by the algorithms in a finite number of iterations.}} @article{ConnGoulSartToin96, author = {A. R. Conn and N. I. M. Gould and A. Sartenaer and Ph. L. Toint}, title = {Convergence properties of an Augmented {L}agrangian Algorithm for Optimization with a Combination of General Equality and Linear Constraints}, journal = SIOPT, volume = 6, number = 3, pages = {674--703}, year = 1996} @article{ConnGoulSartToin96a, author = {A. R. Conn and N. I. M. Gould and A. Sartenaer and Ph. L. Toint}, title = {Convergence properties of minimization algorithms for convex constraints using a structured trust region}, journal = SIOPT, volume = 6, number = 4, pages = {1059--1086}, year = 1996, abstract = {In this paper, we present a class of trust region algorithms for minimization problems within convex feasible regions in which the structure of the problem is explicitly used in the definition of the trust region. This development is intended to reflect the possibility that some parts of the problem may be more accurately modelled than others, a common occurrence in large-scale nonlinear applications. After describing the structured trust region mechanism, we prove global convergence for all algorithms in our class.}, summary = {A class of structured trust-region algorithms is presented for minimization problems within convex feasible regions, in which the structure of the problem is explicitly used in the definition of the trust region. Global convergence is established.}} @techreport{ConnGoulOrbaToin99, author = {A. R. Conn and N. I. M. Gould and D. Orban and Ph. L. Toint}, title = {A primal-dual trust-region algorithm for minimizing a non-convex function subject to bound and linear equality constraints}, institution = FUNDP, address = FUNDP-ADDRESS, number = {TR99-04}, year = 1999, abstract = {A new primal-dual algorithm is proposed for the minimization of non-convex objective functions subject to simple bounds and linear equality constraints. The method uses a primal-dual trust-region model to ensure descent on a suitable merit function. Convergence of a well-defined subsequence of iterates is proved to a second-order critical point from arbitrary starting points. Algorithmic variants are discussed and preliminary numerical results presented.}, summary = {A primal-dual algorithm is proposed for the minimization of non-convex objective functions subject to simple bounds and linear equality constraints. The method uses a primal-dual trust-region model to ensure descent on a suitable merit function. Convergence of a well-defined subsequence of iterates is proved to a second-order critical point from arbitrary starting points. Algorithmic variants are discussed and preliminary numerical results presented.}} @article{ConnLi92, author = {A. R. Conn and Y. Li}, title = {A structure-exploiting algorithm for nonlinear minimax problems}, journal = SIOPT, volume = 2, number = 2, pages = {242-263}, year = 1992} @techreport{ConnSinc75, author = {A. R. Conn and J. W. Sinclair}, title = {Quadratic programming via a non-differentiable penalty function}, institution = {Faculty of Mathematics, University of Waterloo}, number = {CORR 75/15}, year = 1975} @inproceedings{ConnToin96, author = {A. R. Conn and Ph. L. Toint}, title = {An algorithm using quadratic interpolation for unconstrained derivative free optimization}, crossref = {DiPiGian96}, pages = {27--47}, abstract = {This paper explores the use of multivariate interpolation techniques in the context of method for unconstrained optimization that do not require derivatives of the objective function. A new algorithm is proposed that uses quadratic models in a trust region framework. The algorithm is constructed to require few evaluations of the objective function and to be relatively insensitive to noise in the objective function values. Its performance is analyzed on a set of 20 examples, both with and without noise.}, summary = {The use of multivariate interpolation techniques is explored in the context of method for unconstrained optimization that do not require derivatives. An algorithm is proposed that uses quadratic models in a trust-region framework. It requires few evaluations of the objective function and is relatively insensitive to noise in the objective function values. Its performance is analyzed on a set of 20 examples, both with and without noise.}} @article{ConnPiet77, author = {A. R. Conn and T. Pietrzykowski}, title = {A penalty function method converging directly to a constrained optimum}, journal = SINUM, volume = 14, number = 2, pages = {348--375}, year = 1977} @inproceedings{ConnScheToin97, author = {A. R. Conn and K. Scheinberg and Ph. L. Toint}, title = {On the convergence of derivative-free methods for unconstrained optimization}, booktitle = {Approximation Theory and Optimization: Tributes to M. J. D. Powell}, editor = {A. Iserles and M. Buhmann}, publisher = CUP, address = CUP-ADDRESS, pages = {83--108}, year = 1997, abstract = {The purpose of this paper is to examine a broad class of derivative-free trust-region methods for unconstrained optimization inspired by the proposals of \citebb{Powe94b} and to derive a general framework in which reasonable global convergence results can be obtained. The developments make extensive use of an interpolation error bound derived by \citebb{SaueXu95} in the context of multivariate polynomial interpolation.}, summary = {Derivative-free trust-region methods for unconstrained optimization, inspired by \citebb{Powe94b}, are discussed and global convergence results obtained. The developments make extensive use of an interpolation error bound derived by \citebb{SaueXu95} in the context of multivariate polynomial interpolation.}} @article{ConnScheToin97b, author = {A. R. Conn and K. Scheinberg and Ph. L. Toint}, title = {Recent progress in unconstrained nonlinear optimization without derivatives}, journal = MPB, volume = 79, number = 3, pages = {397--414}, year = 1997, abstract = {We present an introduction to a new class of derivative free methods for unconstrained optimization. We start by discussing the motivation for such methods and why they are in high demand by practitioners. We then review the past developments in this field, before introducing the features that characterize the newer algorithms. In the context of a trust region framework, we focus on techniques that ensure a suitable ``geometric quality'' of the considered models. We then outline the class of algorithms based on these techniques, as well as their respective merits. We finally conclude the paper with a discussion of open questions and perspectives.}, summary = {Derivative-free trust-region methods for unconstrained optimization are introduced. Motivation is given, and past developments in the field reviewed. Techniques that ensure a suitable ``geometric quality'' of the models are considered. A discussion of open questions and perspectives is given.}} @techreport{ConnScheToin98, author = {A. R. Conn and K. Scheinberg and Ph. L. Toint}, title = {A Derivative Free Optimization Algorithm in Practice}, institution = FUNDP, address = FUNDP-ADDRESS, number = {TR98/11}, year = 1998, abstract = {We consider an algorithm for optimizing, at first without constraints, a nonlinear function whose first-order derivatives exist but are unavailable. The algorithm is based on approximating the objective function by a quadratic polynomial interpolation model and using this model within a trust-region framework. We study some practical properties of the algorithm and show how it can be extended to solve certain constrained optimization problems. We present computational results for analytical and for real-life problems from the aeronautical industry.}, summary = {An algorithm is presented for optimizing, at first without constraints, a nonlinear function whose first-order derivatives exist but are unavailable. It is based on approximating the objective function by a quadratic polynomial interpolation model and using this model within a trust-region framework. Some practical properties of the algorithm are studied, including how it can be extended to solve certain constrained optimization problems. Computational results are presented for analytical and for real-life problems from the aeronautical industry.}} @article{ConnViceVisw99, author = {A. R. Conn and L. N. Vicente and C. Visweswariah}, title = {Two-step algorithms for nonlinear optimization with structured applications}, journal = SIOPT, volume = 9, number = 4, pages = {924--947}, year = 1999, abstract = {In this paper we propose extensions to trust-region and line-search algorithms in which the classical step is augmented with a second step that is required to yield a decrease in the value of the objective function. The classical convergence theory for trust-region and line-search algorithms is adapted to this class of two-step algorithms. It is shown that the algorithms are globally convergent to a stationary point under the ``classical'' assumptions. The algorithms can be applied to any problem with variable(s) whose contribution to the objective function is a known functional form. In the nonlinear programming package {\sf LANCELOT}, they have been applied to update slack variables and variables introduced to solve minimax problems, leading to enhanced optimization efficiency. Numerical results are presented to show the effectiveness of these techniques.}, summary = {Extensions to trust-region and line-search algorithms are proposed in which the classical step is augmented with a second step that yields a decrease of the objective function. The convergence theory for trust-region and linesearch algorithms is adapted to this class of two-step algorithms. It is shown that the algorithms are globally convergent. The algorithms can be applied to any problem with variable(s) whose contribution to the objective function is a known functional form. In the nonlinear programming package {\sf LANCELOT}, they have been applied to update slack variables and variables introduced to solve minimax problems, leading to enhanced optimization efficiency. Numerical results are presented.}} @article{ContTapi93, author = {M. Contreras and R. A. Tapia}, title = {Sizing the {BFGS} and {DFP} updates: numerical study}, journal = JOTA, volume = 78, number = 1, pages = {93--108}, year = 1993, abstract = {The authors develop and test a strategy for selectively sizing (multiplying by an appropriate scalar) the approximate Hessian matrix before it is updated in the BFGS and DFP trust-region methods for unconstrained optimization. The numerical results imply that, for use with the DFP update, the Oren-Luenberger sizing factor is completely satisfactory and selective sizing is vastly superior to the alternatives of never sizing or first-iteration sizing and is slightly better than the alternative of always sizing. Numerical experimentation showed that the Oren-Luenberger sizing factor is not a satisfactory sizing factor for use with the BFGS update. Therefore, based on the newly acquired understanding of the situation, the authors propose a centered Oren-Luenberger sizing factor to be used with the BFGS update. The numerical experimentation implies that selectively sizing the BFGS update with the centered Oren-Luenberger sizing factor is superior to the alternatives. These results contradict the folk axiom that sizing should be done only at the first iteration. They also show that, without sufficient sizing, DFP is vastly inferior to BFGS and however, when selectively sized, DFP is competitive with BFGS.}, summary = {A strategy is developed for selectively sizing the approximate Hessian matrix before it is updated in the BFGS and DFP trust-region methods for unconstrained optimization. The numerical results suggest that sizing should be done not only at the first iteration. They also show that, without sufficient sizing, DFP is vastly inferior to BFGS and however, when selectively sized, DFP is competitive with BFGS.}} @article{Cont80, author = {L. B. Contesse}, title = {Une caract\'{e}risation compl\`{e}te des minima locaux en programmation quadratique}, journal = NUMMATH, volume = 34, pages = {315--332}, year = 1980} @article{Corr97, author = {G. Corradi}, title = {A trust region algorithm for unconstrained optimization}, journal = {International Journal of Computer Mathematics}, volume = 65, number = {1-2}, pages = {109--119}, year = 1997, abstract = {In this paper a trust region method, based on approximation of $f(\cdot)$ and $f'(\cdot)$ of higher order, is presented. A convergence analysis for the method is considered too. Numerical results are reported.}, summary = {A trust region method is proposed which is based on approximation of $f(\cdot)$ and $f'(\cdot)$ of higher order. A convergence analysis for the method is also presented. Numerical results are reported.}} @inproceedings{CostStanSnym96, author = {J. E. Coster and N. Stander and J. A. Snyman}, title = {Trust region augmented {L}agrangian methods with secant {H}essian updating applied to structural optimization}, booktitle = {Proceedings of the ASME Design Engineering Technical Conference and Computers in Engineering Conference, August 18-22, 1996, Irvine, California}, year = 1996, abstract = {The problem of determining the optimal sizing design of truss structures is considered. An augmented Lagrangian optimization algorithm which uses a quadratic penalty term is formulated. The implementation uses a first-order Lagrange multiplier update and a strategy for progressively increasing the accuracy with which the bound constrained minimizations are performed. The allowed constraint violation is also progressively decreased but at a slower rate so as to prevent ill-conditioning due to large penalty values. Individual constraint penalties are used and only the penalties of the worst violated constraints are increased. The scheme is globally convergent. The bound constrained minimizations are performed using the {\em SBMIN} algorithm where a sophisticated trust-region strategy is employed. The Hessian of the augmented Lagrangian function is approximated using partitioned secant updating. Each function contributing to the Lagrangian is individually approximated by a secant update and the augmented Lagrangian Hessian is formed by appropriate accumulation. The performance of the algorithm is evaluated for a number of different secant updates on standard explicit and truss sizing optimization problems. The results show the formulation to be superior to other implementations of augmented Lagrangian methods reported in the literature and that, under certain conditions, the method approaches the performance of the state-of-the-art SQP and SAM methods. Of the secant updates, the symmetric rank one update is superior to the other updates including the BFGS scheme. It is suggested that the individual function, secant updating employed may be usefully applied in contexts where structural analysis and optimization are performed simultaneously, as in the simultaneous analysis and design method. In such cases the functions are partially separable and the associated Hessians are of low rank.}, summary = {An augmented Lagrangian algorithm is formulated for the optimal sizing design of truss structures. The scheme is globally convergent. The bound constrained minimizations are performed using the {\sf SBMIN} trust-region algorithm. The Hessian of the augmented Lagrangian is approximated using partitioned secant updating. The performance of the algorithm is evaluated for different secant updates on standard explicit and truss sizing optimization problems. The results show the formulation to be superior to other implementations of augmented Lagrangian methods and that the method may approach the performance of the state-of-the-art SQP and SAM methods. Of the secant updates, the SR1 update is superior to the other updates including the BFGS scheme. Secant updating may be usefully applied in contexts where structural analysis and optimization are performed simultaneously, as in the simultaneous analysis and design method. In such cases the functions are partially separable.}} @book{CottPangSton92, author = {R. W. Cottle and J.-S. Pang and R. E. Stone}, title = {The Linear Complementarity Problem}, publisher = AP, address = AP-ADDRESS, year = 1992} @article{Cour43, author = {R. Courant}, title = {Variational methods for the solution of problems of equilibrium and vibrations}, journal = {Bulletin of the American Mathematical Society}, volume = 49, pages = {1--23}, year = 1943} @manual{CPLEX98, author = {{CPLEX 6.0}}, title = {High-performance linear, integer and quadratic programming software}, organization = {ILOG SA}, address = {Gentilly, France}, year = 1998} @book{Crye82, author = {C. W. Cryer}, title = {Numerical Functional Analysis}, publisher = OUP, address = OUP-ADDRESS, year = 1982} @article{CullWill80, author = {J. Cullum and R. A. Willoughby}, title = {The {L}anczos phenomenon---an interpretation based upon conjugate gradient optimization}, journal = LAA, volume = 29, pages = {63--90}, year = 1980} % abstract = {The equivalence in exact arithmetic of the Lanczos % tridiagonalization procedure and the conjugate gradient % optimization procedure for solving Ax=b, where A is a real % symmetric, positive definite matrix, is well known. % We demonstrate that a relaxed equivalence is valid in the % presence of errors. Specifically we demonstrate that local % $\epsilon$-orthonormality of the Lanczos vectors guarantees % local $\epsilon$-A-conjugacy of the direction vectors in the % associated conjugate gradient procedure. Moreover we % demonstrate that all the conjugate gradient relationships % are satisfied approximately. Therefore, any statements valid % for the conjugate gradient optimization procedure, which we % show converges under very weak conditions, apply directly to % the Lanczos procedures. We then use this equivalence to % obtain an explanation of the Lanczos phenomenon: the % empirically observed "convergence" of Lanczos eigenvalue % procedures despite total loss of the global orthogonality of % the Lanczos vectors.}, % summary = {It is demonstrated that there is a one sided % $\epsilon$-equivalence relationship between the practical % Lanczos tridiagonalizarion procedure and the conjugate % gradient optimization procedure for solving Ax=b. This % equivalence allows the exploitation of properties of the % optimization procedure to provide a plausible mechanism for % explaining the observed Lanczos phenomenon. The arguments % required an assumption on the variation in the ratios of the % norms of the residuals that seems to occur in practice, % although no proof that it occurs in general was presented. % This equivalence was also used to provide, for positive % definite matrices, a convergence argument for the SYMMLQ % Lanczos procedure for solving systems of equations developed % in \citebb{PaigSaun75} and for the related procedure for % computing elements of A-1 used in \citebb{KaplGray76}} @article{CurtPoweReid74, author = {A. R. Curtis and M. J. D. Powell and J. K. Reid}, title = {On The Estimation of Sparse {J}acobian Matrices}, journal = JIMA, volume = 13, pages = {117--119}, year = 1974} %%% D %%% @article{Dafe80, author = {S. Dafermos}, title = {Traffic equilibrium and variational inequalities}, journal = {Transportation Science}, volume = 14, pages = {42--54}, year = 1980} @article{Dani67a, author = {J. W. Daniel}, title = {The conjugate gradient method for linear and nonlinear operator equations}, journal = SINUM, volume = 4, pages = {10--25}, year = 1967} @article{Dani67b, author = {J. W. Daniel}, title = {Convergence of the conjugate gradient method with computationally convenient modifications}, journal = NUMMATH, volume = 10, pages = {125--131}, year = 1967} @book{Dant63, author = {G. B. Dantzig}, title = {Linear Programming and Extensions}, publisher = {Princeton Uiversity Press}, address = {Princeton, USA}, year = 1963} @techreport{Das96, author = {I. Das}, title = {An interior point algorithm for the general nonlinear programming problem with trust region globalization}, institution = ICASE, address = ICASE-ADDRESS, number = {96--61}, year = 1996, abstract = {This paper presents an SQP-based interior point technique for solving the general nonlinear programming problem using trust region globalization and the \citebb{ColeLi96b} scaling. The SQP subproblem is decomposed into a normal and a reduced tangential subproblem in the tradition of numerous works on equality constrained optimization, and strict feasibility is maintained with respect to the bounds. This is intended to be an extension of previous work by \citebb{ColeLi96b} and \citebb{Vice96}. Though no theoretical proofs of convergence are provided, some computational results are presented which indicate that this algorithm holds promise. The computational experiments have been geared towards improving the semi-local convergence of the algorithm; in particular high sensitivity of the speed of convergence with respect to the fraction of the trust region radius allowed for the normal step and with respect to the initial trust region radius are observed. The chief advantages of this algorithm over primal-dual interior point algorithms are better handling of the `sticking problem' and a reduction in the number of variables by elimination of the multipliers of bound constraints.}, summary = {An SQP-based interior-point technique is presented for solving the general nonlinear programming problem using trust-region globalization and the \citebb{ColeLi96b} scaling. The SQP subproblem is decomposed into a normal and a reduced tangential subproblem, and strict feasibility is maintained with respect to the bounds. Computational experiments have been geared towards improving the semi-local convergence of the algorithm; in particular high sensitivity of the speed of convergence with respect to the fraction of the trust region radius allowed for the normal step and with respect to the initial trust region radius are observed. The chief advantages of this algorithm over primal-dual interior-point algorithms are better handling of the `sticking problem' and a reduction in the number of variables by elimination of the multipliers of bound constraints.}} @article{Davi68, author = {W. C. Davidon}, title = {Variance algorithms for minimization}, journal = COMPJ, volume = 10, pages = {406--410}, year = 1968} @article{Davi75, author = {W. C. Davidon}, title = {Optimally Conditioned Optimization Algorithms Without Line Searches}, journal = MP, volume = 9, number = 1, pages = {1--30}, year = 1975} @article{Dean92, author = {E. J. Dean}, title = {A Model Trust-Region Modification of {N}ewton Method for Nonlinear 2-Point Boundary-Value-Problems}, journal = {Journal of Optimization Theory and Applications}, volume = 75, number = 2, pages = {297--312}, year = 1992, abstract = {The method of quasilinearization for nonlinear two-point boundary-value problems is Newton's method for a nonlinear differential operator equation. A model trust-region approach to globalizing the quasilinearization algorithm is presented. A double-dogleg implementation yields a globally convergent algorithm that is robust in solving difficult problems.}, summary = {The method of quasilinearization for nonlinear two-point boundary- value problems is Newton's method for a nonlinear differential operator equation. A model trust-region approach to globalizing the quasilinearization algorithm is presented. A double-dogleg implementation yields a globally convergent algorithm that is robust in solving difficult problems.}} @article{DeBoRon92, author = {De Boor, C. and A. Ron}, title = {Computational aspects of polynomial interpolation in several variables}, journal = MC, volume = 58, number = 198, pages = {705--727}, year = 1992} @techreport{DeLuFaccKanz95, author = {De Luca, T. and F. Facchinei and Ch. Kanzow}, title = {A semismooth approach to the solution of nonlinear complementarity problems}, institution = HAMBURG, address = HAMBURG-ADDRESS, number = 93, year = 1995} @article{DembEiseStei82, author = {R. S. Dembo and S. C. Eisenstat and T. Steihaug}, title = {Inexact-{N}ewton Methods}, journal = SINUM, volume = 19, number = 2, pages = {400--408}, year = 1982} @article{DembStei83, author = {R. S. Dembo and T. Steihaug}, title = {Truncated-{N}ewton Algorithms for Large-Scale Unconstrained Optimization}, journal = MP, volume = 26, pages = {190--212}, year = 1983} @techreport{DembTulo83, author = {R. S. Dembo and U. Tulowitzki}, title = {On the minimization of quadratic functions subject to box constraints}, institution = {Yale University}, address = {Yale, USA}, type = {School of Organization and Management Working paper }, number = {Series B no. 71}, year = 1983} @book{DemyMalo74, author = {V. F. Dem'yanov and V. N. Malozemov}, title = {Introduction to Minmax}, publisher = WILEY, address = WILEY-ADDRESS, year = 1974} @article{DengXiaoZhou93, author = {N. Deng and Y. Xiao and F. Zhou}, title = {Nonmonotonic Trust Region Algorithms}, journal = JOTA, volume = 76, number = 2, pages = {259--285}, year = 1993, abstract = {A non-monotonic trust region method for unconstrained optimization problems is presented. Although the method allows the sequence of values of the objective function to be non-monotonic, convergence properties similar to those for the usual trust region method are proved under certain conditions, including conditions on the approximate solutions to the subproblem. To make the solution satisfy these conditions, an algorithm to solve the subproblem is also established. Finally, some numerical results are reported which show that the non-monotonic trust region method is superior to the usual trust region method according to both the number of gradient evaluations and the number of function evaluations.}, summary = {A non-monotonic trust-region method for unconstrained optimization is presented, whose convergence properties are similar to those for the usual trust-region method under conditions including requirements on the approximate solutions to the subproblem. An algorithm to solve the subproblem is also presented and numerical results discussed.}} @inproceedings{Denn78, author = {J. E. Dennis}, title = {A brief introduction to quasi-{N}ewton methods}, booktitle = {Numerical Analysis}, editor = {G. H. Golub and J. Oliger}, publisher = AMS, address = AMS-ADDRESS, series = {Proceedings of Symposia in Applied Mathematics}, number = 22, pages = {19--52}, year = 1978} @article{DennElAlMaci97, author = {J. E. Dennis and M. El{-}Alem and M. C. Maciel}, title = {A global convergence theory for general Trust-Region based algorithms for equality constrained optimization}, journal = SIOPT, volume = 7, number = 1, pages = {177--207}, year = 1997, abstract = {A class of algorithms based on the successive quadratic programming method for solving the general nonlinear programming problem is presented. The objective function and the constraints of the problem are only required to be differentiable and their gradients to satisfy a Lipschitz condition. The strategy for obtaining global convergence is based on the trust-region approach. The merit function is an augmented Lagrangian. A new updating technique is introduced for the penalty parameter, by means of which monotone increase is not necessary. Global convergence results are proved and numerical experiments are presented.}, summary = {A class of algorithms based on the SQP method for general nonlinear programming is presented. The objective function and the constraints of the problem are only required to be differentiable and their gradients to satisfy a Lipschitz condition. Global convergence is obtained by using a trust-region approach. The merit function is an augmented Lagrangian. A possibly non-monotone updating technique is introduced for the penalty parameter. Global convergence results are proved and numerical experiments presented.}} @article{DennElAlWill99, author = {J. E. Dennis and M. El{-}Alem and K. A. Williamson}, title = {A trust-region approach to nonlinear systems of equalities and inequalities}, journal = SIOPT, volume = 9, number = 2, pages = {291--315}, year = 1999, abstract = {In this paper, two new trust-region algorithms for the numerical solution of systems of nonlinear equalities and inequalities are introduced. The formulation is free of arbitrary parameters and possesses sufficient smoothness to exploit the robustness of the trust-region approach. The proposed algorithms are one-sided least-squares trust-region algorithms. The first algorithm is a single model algorithm, and the second one is a multi-model algorithm where the Cauchy point computation is a model selection procedure. Global convergence analysis for the two algorithms are presented. Our analysis generalizes to nonlinear systems of equalities and inequalities the well-developed theory for nonlinear least-squares problems. Numerical experiments on the two algorithms are also presented. The performance of the two algorithm are reported. The numerical results validate the effectiveness of our approach.}, summary = {Two one-sided trust-region algorithms for the numerical solution of systems of nonlinear equalities and inequalities are introduced. The first is a single model algorithm, while the second uses multiple models with the Cauchy point computation being used a model selection procedure. Global convergence analysis is presented for both algorithms, and numerical experiments show their effectiveness.}} @article{DennEcheGuarMartScolVacc91, author = {J. E. Dennis and N. Echebest and M. T. Guardarucci and J. M. Mart\'{\i}nez and H. D. Scolnik and C. Vacchino}, title = {A Curvilinear Search using Tridiagonal Secant Updates for Unconstrained Optimization}, journal = SIOPT, volume = 1, number = 3, pages = {333--357}, year = 1991} @article{DennGayWels81, author = {J. E. Dennis and D. M. Gay and R. E. Welsh}, title = {An adaptive nonlinear least squares algorithm}, journal = TOMS, volume = 7, number = 3, pages = {348--368}, year = 1981} abstract = {NL2SOL is a modular program for solving nonlinear least-squares problems that incorporates a number of novel features. It maintains a secant approximation S to the second-order part of the least-squares Hessian and adaptively decides when to use this approximation. S is 'sized' before updating, something that is similar to Oren-Luenberger scaling. The step choice algorithm is based on minimizing a local quadratic model of the sum of squares function constrained to an elliptical trust region centered at the current approximate minimizer. This is accomplished using ideas discussed by More (1978) together with a special module for assessing the quality of the step thus computed. These and other ideas behind NL2SOL are discussed, and its evolution and current implementation are also described briefly.}, summary = {NL2SOL is a modular program for solving nonlinear least-squares problems. It maintains a sized secant approximation to the second-order part of the least-squares Hessian and adaptively decides when to use this approximation. The step choice algorithm is based on minimizing a local quadratic model of the sum of squares function constrained to an elliptical trust region.}} @article{DennHeinVice98, author = {J. E. Dennis and M. Heinkenschloss and L. N. Vicente}, title = {Trust-region interior-point {SQP} algorithms for a class of nonlinear programming problems}, journal = SICON, volume = 36, number = 5, pages = {1750--1794}, year = 1998, abstract = {In this paper, a family of trust-region interior-point sequential quadratic programming (SQP) algorithms for the solution of a class of minimization problems with nonlinear equality constraints and simple bounds on some of the variables is described and analyzed. Such nonlinear programs arise, e.g., from the discretization of optimal control problems. The algorithms treat states and controls as independent variables. They are designed to take advantage of the structure of the problem. In particular they do not rely on matrix factorizations of the linearized constraints but use solutions of the linearized state equation and the adjoint equation. They are well suited for large scale problems arising from optimal control problems governed by partial differential equations. The algorithms keep strict feasibility with respect to the bound constraints by using an affine scaling method proposed, for a different class of problems, by \citebb{ColeLi96b} and they exploit trust-region techniques for equality-constrained optimization. Thus, they allow the computation of the steps using a variety of methods, including many iterative techniques. Global convergence of these algorithms to a first-order Karush- Kuhn-Tucker (KKT) limit point is proved under very mild conditions on the trial steps. Under reasonable, but more stringent, conditions on the quadratic model and on the trial steps, the sequence of iterates generated by the algorithms is shown to have a limit point satisfying the second-order necessary KKT conditions. The local rate of convergence to a nondegenerate strict local minimizer is q-quadratic. The results given here include, as special cases, current results for only equality constraints and for only simple bounds. Numerical results for the solution of an optimal control problem governed by a nonlinear heat equation are reported.}, summary = {Trust-region interior-point SQP algorithms are presented for solving minimization problems with nonlinear equality constraints and simple bounds. The algorithms treat states and controls as independent variables and take advantage of the structure of the problem. In particular they do not rely on matrix factorizations of the linearized constraints but use solutions of the linearized state equation and the adjoint equation. They are suited for large-scale problems arising from optimal control problems governed by partial differential equations. They keep strict feasibility with respect to the bound constraints by using an affine-scaling method inspired by \citebb{ColeLi96b} and they exploit trust-region techniques for equality-constrained optimization. They allow the computation of the steps using a variety of methods, including many iterative techniques. Global convergence is proved under very mild conditions on the trial steps. Under more stringent conditions on the quadratic model and on the trial steps, the iterates converge Q-quadratically to a limit point satisfying the second-order necessary conditions. Numerical results are reported for an optimal control problem governed by a nonlinear heat equation.}} @article{DennLiTapi95, author = {J. E. Dennis and S. B. B. Li and R. A. Tapia}, title = {A unified approach to global convergence of trust region methods for nonsmooth optimization}, journal = MP, volume = 68, number = 3, pages = {319--346}, year = 1995, abstract = {This paper investigates the global convergence of trust-region (TR) mthods for solving nonsmooth minimization problems. For a class of nonsmooth objective functions called regular functions, conditions are found on the TR local models that imply three fundamental convergence properties. These conditions are shown to be satisfied by appropriate forms of \citebb{Flet87}'s TR method for solving constrained optimization problems, \citebb{Powe83} and \citebb{Yuan83}'s TR method for solving nonlinear fitting problems, \citebb{DuffNoceReid87}'s TR method for solving systems of nonlinear equations, and \citebb{ElHaTapi93}'s TR method for solving systems of nonlinear equations. Thus our results can be viewed as a unified convergence theory for TR methods for nonsmooth problems.}, summary = {The global convergence of trust-region methods for non-smooth minimization is investigated. Conditions are found on the local models that imply three convergence properties for regular problems. These conditions are satisfied by appropriate forms of \citebb{Flet87}'s method for constrained optimization, \citebb{Powe83} and \citebb{Yuan83}'s method for solving nonlinear fitting problems, and \citebb{DuffNoceReid87}'s and \citebb{ElHaTapi93}'s methods for solving systems of nonlinear equations. The results may thus be viewed as a unified convergence theory for trust-region methods for non-smooth problems.}} @article{DennMei79, author = {J. E. Dennis and H. H. W. Mei}, title = {Two New Unconstrained Optimization Algorithms Which Use Function and Gradient Values}, journal = JOTA, volume = 28, number = 4, pages = {453--482}, year = 1979, abstract = {Two new methods for unconstrained optimization are presented. Both methods employ a hybrid direction strategy which is a modification of \citebb{Powe70c}'s dogleg strategy. They also employ a projection technique introduced by \citebb{Davi75} which uses projection images of $\Delta x$ and $\Delta g$ in updating the approximate Hessian. The first method uses Davidon's optimally conditioned update formula, while the second uses only the BFGS update. Both methods performed well without Powell's special iterations and singularity safeguards, and the numerical results are very promising.}, summary = {Two methods for unconstrained optimization are presented. They employ a hybrid direction strategy, which is a modification of \citebb{Powe70c}'s dogleg strategy, and a projection technique introduced by \citebb{Davi75} which uses projection images of $\Delta x$ and $\Delta g$ in updating the approximate Hessian. The first method uses Davidon's optimally conditioned update formula, while the second uses only the BFGS update. Both methods performed well without Powell's special iterations and singularity safeguards.}} @book{DennSchn83, author = {J. E. Dennis and R. B. Schnabel}, title = {Numerical Methods for Unconstrained Optimization and Nonlinear Equations}, publisher = PH, address = PH-ADDRESS, year = 1983, note = {Reprinted as \emph{Classics in Applied Mathematics 16}, SIAM, Philadelphia, USA, 1996}} @article{DennTorc91, author = {J. E. Dennis and V. Torczon}, title = {Direct Search Methods on Parallel Machines}, journal = SIOPT, volume = 1, number = 4, pages = {448--474}, year = 1991} @inproceedings{DennTorc97, author = {J. E. Dennis and V. Torczon}, title = {Managing Approximation Models in Optimization}, booktitle = {Multidisciplinary Design Optimization}, editor = {N. M. Alexandrov and M. Y. Hussaini}, publisher = SIAM, address = SIAM-ADDRESS, pages = {330--347}, year = 1997, abstract = {It is standard engineering practice to use approximation models in place of expensive simulations to drive an optimal design process based on nonlinear programming algorithms. This paper uses well-established notions from the literature on trust-region methods and a powerful global convergence theory for pattern search methods to manage the interplay between optimization and the fidelity of the approximation models to insure that the process converges to a reasonable solution of the original design problem. We present a specific example from the class of algorithms outlined here, but many other interesting options exist that we will explore in later work. The algorithm we present as an example of the management strategies we propose is based on a family of pattern search algorithms developed by the authors. Pattern search methods can be successfully applied when only ranking (ordinal) information is available and when derivatives are either unavailable or unreliable. Since we are interested here in using approximations to provide arguments for the objective function, our choice seems relevant. This work is in support of the Rice effort in collaboration with Boeing and IBM to look at the problem of designing helicopter rotor blades.}, summary = {It is standard engineering practice to use approximation models in place of expensive simulations to drive an optimal design process based on nonlinear programming algorithms. Known notions on trust-region methods and a global convergence theory for pattern search methods are used to manage the interplay between optimization and the fidelity of the approximation models to insure that the process converges to a reasonable solution of the original problem. The algorithm given as an example is based on the family of pattern search algorithms by \citebb{DennTorc91}, which can be successfully applied when only ranking (ordinal) information is available and when derivatives are either unavailable or unreliable.}} @inproceedings{DennVice96, author = {J. E. Dennis and L. N. Vicente}, title = {Trust Region Interior-Point Algorithms for minimization problems with simple bounds}, booktitle = {Applied Mathematics and Parallel Computing, Festschrift for Klaus Ritter}, editor = {H. Fisher and B. Riedm\u{u}ller and S. Sch\u{a}ffler}, publisher = {Physica-Verlag, Springer-Verlag}, address = SPRINGER-ADDRESS, pages = {97--107}, year = 1996, abstract = {Two trust-region interior-point algorithms for the solution of minimization problems with simple bounds are presented. The algorithms scale the local model in a way proposed by \citebb{ColeLi96b}, but they are new otherwise. The first algorithm is more usual in that the trust region and the local quadratic model are consistently scaled. The second algorithm proposed here uses an unscaled trust region. A first-order convergence result for these algorithms is given and dogleg and conjugate-gradient algorithms to compute trial steps are introduced. Some numerical examples that show the advantages of the second algorithm are presented.}, summary = {Two trust-region interior-point algorithms for the solution of minimization problems with simple bounds are presented, that scale the local model as proposed by \citebb{ColeLi96b}. In the first, the trust region and the local quadratic model are consistently scaled. The second uses an unscaled trust region. A first-order convergence result is given and dogleg and conjugate-gradient algorithms to compute trial steps introduced. Numerical examples illustrate the advantages of the second algorithm.}} %also %institution = CAAM, address = RICE-ADDRESS, %number = {TR94-42}, year = 1994, @article{DennVice97, author = {J. E. Dennis and L. N. Vicente}, title = {On the Convergence Theory of Trust-Region Based Algorithms for Equality-Constrained Optimization}, journal = SIOPT, volume = 7, number = 4, pages = {927--950}, year = 1997, abstract = {In a recent paper, \citebb{DennElAlMaci97} developed a global first-order convergence theory for a general trust-region-based algorithm for equality-constrained optimization. This general algorithm is based on appropriate choices of trust-region subproblems and seems particularly suitable for large problems. This paper presents the global second-order convergence theory for the same general trust-region-based algorithm. The results given here can be seen as a generalization of the second-order convergence results for trust-region methods for unconstrained optimization obtained by \citebb{MoreSore83}. The behavior of the trust region radius and the local rate of convergence are analyzed. Some interesting facts concerning the trust-region subproblem for the linearized constraints, the quasi-normal component of the step and the hard case are presented. It is shown how these results can be applied to a class of discretized optimal control problems.}, summary = {A global second-order convergence theory is given for the algorithm of \citebb{DennElAlMaci97}, that generalizes second-order convergence results of \citebb{MoreSore83}. The behaviour of the trust-region radius and the local rate of convergence are analyzed. Some results concerning the trust-region subproblem for the linearized constraints, the quasi-normal component of the step and the hard case are presented. It is shown how these results can be applied to some discretized optimal control problems.}} %also %institution = CAAM, address = RICE-ADDRESS, %number = {TR94-36}, year = 1994, @inproceedings{DennWill88, author = {J. E. Dennis and K. A. Williamson}, title = {A new parallel optimization algorithm for parameter identification in ordinary differential equations}, booktitle = {Proceedings of the 27th IEEE Conference on Decision and Control}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 3, pages = {1836--1840}, year = 1988, abstract = {Standard approaches to the solution of the parameter identification problem in systems of ordinary differential equations are reviewed. An algorithm that is based on the \citebb{CeliDennTapi85} trust region algorithm for equality-constrained optimization problems is described. This algorithm should be both more efficient and more stable than standard solution techniques, and it also provides a flexible framework for introducing parallelism into the parameter identification problem.}, summary = {A variant of the \citebb{CeliDennTapi85} trust-region algorithm for equality-constrained optimization problems is described in the context of parameter identification in ordinary differential equations}} @article{deSaYuanSun97, author = {de Sampaio, R. J. B. and J. Yuan and W. Sun}, title = {Trust region algorithm for nonsmooth optimization}, journal = {Applied Mathematics and Computation}, volume = 85, number = {2-3}, pages = {109--116}, year = 1997, abstract = {Minimization of a composite function $h(f(x))$ is considered here, where $f: \Re^n \leftarrow \Re^m$ is a locally Lipschitzian function, and $h: \Re^m \leftarrow \Re$ is a continuously differentiable convex function. Theory of trust region algorithm for nonsmooth optimization given by \citebb{Flet87}, and \citebb{PoweYuan90} is extended to this case. Trust region algorithm and its global convergence are studied. Finally, some applications on nonlinear and nonsmooth least squares problems are also given.}, summary = {Minimization of a composite function $h(f(x))$ is considered, where $f:\Re^n \leftarrow \Re^m$ is a locally Lipschitzian function, and $h: \Re^m \leftarrow \Re$ is a continuously differentiable convex function. The theory of trust region algorithms for non-smooth optimization given by \citebb{Flet87}, and \citebb{PoweYuan90} is extended to this case. A trust-region algorithm and its global convergence are studied. Some applications to nonlinear and non-smooth least-squares problems are given.}} @inproceedings{DeScDeMo97, author = {De Schutter, B. and De Moor, B.}, title = {The Extended Linear Complementarity Problem and Its Applications in the Max-Plus Algebra}, crossref = {FerrPang97}, pages = {22--39}} @article{DeufPotr92, author = {P. Deuflhard and F. A. Potra}, title = {Asymptotic Mesh Independence for {N}ewton--{G}alerkin Methods via a Refined {M}ysovskii Theorem}, journal = SINUM, volume = 29, number = 5, pages = {1395--1412}, year = 1992} @article{DiSun96, author = {S. Di and W. Sun}, title = {A trust region method for conic model to solve unconstrained optimization}, journal = OMS, volume = 6, number = 4, pages = {237--263}, year = 1996, abstract = {A trust region method for conic models to solve unconstrained optimization problems is proposed. We analyze the trust region approach for conic models and present necessary and sufficient conditions for the solution of the associated trust region subproblems. A corresponding numerical algorithm is developed and has been tested for 19 standard test functions in unconstrained optimization. The numerical results show that this method is superior to some advanced methods in the current software libraries. Finally, we prove that the proposed method has global convergence and Q-superlinear convergence properties.}, summary = {A trust-region method using conic models is proposed for solving unconstrained optimization problems. Necessary and sufficient conditions for the solution of the associated subproblems are given. The method is globally and Q-superlinearly convergent. Numerical experiments are reported.}} @techreport{DiniGomeSant98, author = {M. A. Diniz{-}Ehrhardt and M. A. Gomes{-}Ruggiero and S. A. Santos}, title = {Numerical analysis of leaving-face parameters in bound-constrained quadratic minimization}, institution = UNICAMP, address = UNICAMP-ADDRESS, number = {52/98}, year = 1998, abstract = {In this work, we focus our attention on the quadratic subproblem of trust-region algorithms for large-scale bound-constrained minimization. An approach that combines a mild active set strategy with gradient projection techniques is employed in the solution of large-scale bound-constrained quadratic problems. To fill in some gaps that have appeared in previous work, we propose, test and analyze heuristics which dynamically choose the parameters in charge of the decision of leaving or not the current face of the feasible set. The numerical analysis is based on problems from CUTE collection and randomly generated convex problems with controlled conditioning and degeneracy. The practical consequences of an appropriate decision of such parameters are shown to be crucial, particularly when dual degenerate and ill-conditioned problems are solved.}, summary = {The problem of leaving the current face of the feasible domain in a combined active-set and gradient-projection method for large-scale bound-constrained quadratic problems is considered. Heuristics which dynamically choose the parameters in charge of the decision are proposed, tested and analyzed. The practical consequences of an appropriate choice of such parameters are crucial for dual-degenerate and ill-conditioned problems.}} @article{DiniGomeSant98b, author = {M. A. Diniz{-}Ehrhardt and M. A. Gomes{-}Ruggiero and S. A. Santos}, title = {Comparing the numerical performance of two trust-region algorithms for large-scale bound-constrained minimization}, journal = {Investigaci\'{o}n Operativa}, note = {To appear.}, year = 1998, abstract = {In this work we compare the numerical performance of the software BOX-QUACAN with the package LANCELOT. We put BOX-QUACAN in a context by means of solving an extensive set of problems, so that specific features of both approaches are compared. Through the computational tests, conclusions are made about the classes of problems for which each algorithm suits better and ideas for future research are devised.}, summary = {The numerical performance of the BOX-QUACAN and {\sf LANCELOT} software packages are compared on an extensive set of problems. Conclusions are drawn about the classes of problems for which each package performs better.}} @article{DiPiFaccGrip92, author = {Di Pillo, G. and F. Facchinei and L. Grippo}, title = {An {RQP} algorithm using a differentiable exact penalty function for inequality constrained problems}, journal = MP, volume = 55, number = 1, pages = {49--68}, year = 1992} @article{DipiGrip85, author = {Di Pillo, G. and L. Grippo}, title = {A continuously differentiable exact penalty-function method for nonlinear programming with inequality constraints}, journal = SICON, volume = 23, number = 1, pages = {72--84}, year = 1985} @article{DipiGrip86, author = {Di Pillo, G. and L. Grippo}, title = {An exact penalty-function method with global convergence properties for nonlinear programming problems}, journal = MP, volume = 36, number = 1, pages = {1--18}, year = 1986} @article{Diki67, author = {I. I. Dikin}, title = {Iterative solution of problems of linear and quadratic programming}, journal = {Doklady Akademiia Nauk USSR}, volume = 174, pages = {747--748}, year = 1967} @book{DikiZork80, author = {I. I. Dikin and V. I. Zorkaltsev}, title = {Iterative Solutions of Mathematical Programming Problems}, publisher = {Nauka}, address = {Novosibirsk}, year = 1980} @article{DirkFerr95, author = {S. P. Dirkse and M. C. Ferris}, title = {The {PATH} Solver: a Non-Monotone Stabilization Scheme for Mixed Complementarity Problems}, journal = OMS, volume = 5, number = 2, pages = {123--156}, year = 1995} @phdthesis{Djan79, author = {A. Djang}, title = {Algorithmic equivalence in quadratic programming}, school = STANFORD, address = STANFORD-ADDRESS, year = 1979} @book{DongBuncMoleStew79, author = {J. J. Dongarra and J. R. Bunch and C. B. Moler and G. W. Stewart}, title = {{LINPACK} Users's guide}, publisher = SIAM, address = SIAM-ADDRESS, year = 1979} @book{DongDuffSoreVors98, author = {J. J. Dongarra and I. S. Duff and D. C. Sorensen and van der Vorst, H. A.}, title = {Numerical Linear Algebra for High-Performance Computers}, publisher = SIAM, address = SIAM-ADDRESS, year = 1998} @article{Dost97, author = {Z. Dost\'{a}l}, title = {Box Constrained Quadratic Programming with Proportioning and Projections }, journal = SIOPT, volume = 7, number = 3, pages = {871--887}, year = 1997} @article{DrusGreeKniz98, author = {V. Druskin and A. Greenbaum and L. Knizherman}, title = {Using nonorthogonal {L}anczos vectors in the computation of matrix functions}, journal = SISC, volume = 19, number = 1, pages = {38--54}, year = 1998} @book{DuffErisReid86, author = {I. S. Duff and A. M. Erisman and J. K. Reid}, title = {Direct Methods for Sparse Matrices}, publisher = OUP, address = OUP-ADDRESS, year = 1986} @article{DuffNoceReid87, author = {I. S. Duff and J. Nocedal and J. K. Reid}, title = {The use of linear programming for the solution of sparse sets of nonlinear equations}, journal = SISSC, volume = 8, number = 2, pages = {99--108}, year = 1987, abstract = {In this paper, we propose a trust region algorithm for solving sparse sets of nonlinear equations. It is based on minimizing the $\ell_1$-norm of the linearized residual within an $\ell_{\infty}$-norm trust region, thereby permitting linear programming techniques to be easily applied. The new algorithm has sparsity advantages over the \citebb{Leve44}--\citebb{Marq63} algorithm}, summary = {A trust-region algorithm for solving sparse sets of nonlinear equations is proposed. It is based on minimizing the $\ell_1$-norm of the linearized residual within an $\ell_{\infty}$-norm trust region, thereby permitting linear programming techniques to be applied. The algorithm has sparsity advantages over the Levenberg-Morrison-Marquardt algorithm}} @inproceedings{Duff97, author = {I. S. Duff}, title = {Sparse numerical linear algebra: direct methods and preconditioning}, crossref = {DuffWats97}, pages = {27--62}} @article{DuffReid83b, author = {I. S. Duff and J. K. Reid}, title = {The multifrontal solution of indefinite sparse symmetric linear equations}, journal = TOMS, volume = 9, number = 3, pages = {302--325}, year = 1983} @article{DuffReid96b, author = {I. S. Duff and J. K. Reid}, title = {Exploiting zeros on the diagonal in the direct solution of indefinite sparse symmetric linear systems}, journal = TOMS, volume = 22, number = 2, pages = {227--257}, year = 1996} @article{DuffReidMunkNeil79, author = {I. S. Duff and J. K. Reid and N. Munksgaard and H. B. Neilsen}, title = {Direct solution of sets of linear equations whose matrix is sparse, symmetric and indefinite}, journal = JIMA, volume = 23, pages = {235--250}, year = 1979} @article{Dunn80, author = {J. C. Dunn}, title = {{N}ewton's method and the {G}oldstein step-length rule for constrained minimization problems}, journal = SICON, volume = 6, pages = {659--674}, year = 1980} @article{Dunn87, author = {J. C. Dunn}, title = {On the convergence of projected gradient processes to singular critical points}, journal = JOTA, volume = 55, pages = {203--216}, year = 1987} @article{DussFerlLema86, author = {J. P. Dussault and J. A. Ferland and B. Lemaire}, title = {Convex quadratic programming with one constraint and bounded variables}, journal = MP, volume = 36, number = 1, pages = {90--104}, year = 1986} %%% E %%% @article{EckeNiem75, author = {J. G. Ecker and R. D. Niemi}, title = {A dual method for quadratic programs with quadratic constraints}, journal = SIAPM, volume = 28, number = 3, pages = {568--576}, year = 1975, abstract = {A dual method is developed for minimizing a convex quadratic function of several variables subject to inequality constraints on the same type of function. The dual program is a concave maximization problem with constraints that are essentially linear. However, the dual objective function is not differentiable over the dual constraint region. The numerical difficulties associated with this nondifferentiability are circumvented by considering a sequence of dual programs via a modified penalty function technique that does not eliminate the dual constraints but does insure that they will all be active at optimality. A numerical example is included.}, summary = {A dual method is developed for minimizing a convex quadratic function of several variables subject to inequality constraints on the same type of function. The method solves a sequence of dual programs via a modified penalty function technique that does not eliminate the dual constraints but ensures that they will be active at optimality. A numerical example is included.}} @article{Ecks93, author = {J. Eckstein}, title = {Nonlinear Proximal Point Algorithms Using {B}regman Functions with Applications to Convex Programming}, journal = MOR, volume = 18, number = 1, pages = {202--226}, year = 1993} @article{Edlu97, author = {O. Edlund}, title = {Linear {M}-estimation with bounded variables}, journal = {BIT}, volume = 37, number = 1, pages = {13--23}, year = 1997, abstract = {A subproblem in the trust region algorithm for non-linear M-estimation by \citebb{EkblMads89} is to find the restricted step. It is found by calculating the M-estimator of the linearized model, subject to an $\ell_2$-norm bound on the variables. In this paper it is shown that this subproblem can be solved by applying \citebb{Hebd73} iterations to the minimizer of the Lagrangian function. The new method is compared with an Augmented Lagrange implementation.}, summary = {A subproblem in the trust-region algorithm for nonlinear M-estimation by \citebb{EkblMads89} is to find the restricted step, by calculating the M-estimator of the linearized model, subject to an $\ell_2$-norm bound on the variables. It is shown that this subproblem can be solved by applying \citebb{Hebd73} iterations to the minimizer of the Lagrangian function. The method is compared with an Augmented Lagrange implementation.}} @article{EdluEkblMads97, author = {O. Edlund and H. Ekblom and K. Madsen}, title = {Algorithms for non-linear {M}-estimation}, journal = {Computational Statistics}, volume = 12, number = 3, pages = {373--383}, year = 1997, abstract = {In non-linear regression, the least squares method is most often used. Since this estimator is highly sensitive to outliers in the data, alternatives have become increasingly popular during the last decades. We present algorithms for non-linear M-estimation. A trust region approach is used, where a sequence of estimation problems for linearized models is solved. In the testing we apply four estimators to ten non-linear data fitting problems. The test problems are also solved by the Generalized \citebb{Leve44}--\citebb{Marq63} method and standard optimization BFGS method. It turns out that the new method is in general more reliable and efficient.}, summary = {Algorithms for nonlinear M-estimation are presented. A trust-region approach is used, where a sequence of estimation problems for linearized models is solved. Numerical tests involving four estimators and ten non-linear data fitting problems are performed.}} @article{EdsbWedi95, author = {L. Edsberg and P. A. Wedin}, title = {Numerical tools for parameter-estimation in {ODE} systems}, journal = {Optimization Methods and Software}, volume = 6, number = 3, pages = {193--217}, year = 1995, abstract = {The numerical problem of estimating unknown parameters in systems of ordinary differential equations from complete or incomplete data is treated. A new numerical method for the optimization part, based on the Gauss-Newton method with a trust region approach to subspace minimization for the weighted nonlinear least squares problem, is presented. The method is implemented in the framework of a toolbox (called diffpar) in Matlab and several test problems from applications, giving non-stiff and stiff ODE-systems, are treated.}, summary = {The numerical problem of estimating unknown parameters in systems of ordinary differential equations from complete or incomplete data is treated. A numerical method for the optimization part is presented, based on the Gauss-Newton method with a trust-region approach to subspace minimization for the weighted nonlinear least-squares problem. The method is implemented in Matlab and several test problems from applications, giving non-stiff and stiff ODE-systems, are treated.}} @misc{EinaMads98, author = {H. Einarsson and K. Madsen}, title = {Cutting planes and trust-regions for nondifferentiable optimization}, howpublished = {Presentation at the International Conference on Nonlinear Programming and Variational Inequalities, Hong Kong}, year = 1998, abstract = {We discuss the problem of minimizing a nonsmooth function $f:\Re^n \righarrow \Re$. $f$ is assumed to be continuous and piecewise smooth, and the number of sooth pieces is assumed to be finite. The bundle trust region method of \citebb{SchrZowe92}, is discussed. it is shown that in the neighbourhood of a minimizer the general function $f$ may be considered as a minimax function, and the relation between the cutting plane methods of \citebb{SchrZowe92} and the minimax trust region method of \citebb{Mads75} is discussed. Based on these ideas an iterative method for minimizing $f$ is proposed. The basic principles are the following. At each iteration $f$ is approximated by a piecewise linear minimax function which is intended to model the set of generalized gradients in the neighbourhood of the current iterate. Two trust regions are used: an inner $R_i$ in which the piecewise linear approximation to $f$ is found, and an outer $R_o$ in which the next tentative step is calculated. Initially the two trust regions are equal, but if the iterate is close to a kink (i.e.\ the intersection between two or more smooth pieces) then the inner trust region radius may be smaller. The method has been tested on a number of convex as well as nonconvex test problems, and the results are compared with those of \citebb{SchrZowe92}. Some of the test problems are of the minimax or $L_1$ type, and in these cases the new algorithm is compared with the dedicated methods of \citebb{Mads75} and Hald and Madsen (1985).}, summary = {A bundle trust-region method is proposed for the minimization of piecewise smooth functions. At each iteration $f$ is approximated by a piecewise linear minimax function that models the set of generalized gradients in the neighbourhood of the current iterate. Two trust regions are used: an inner one in which the piecewise linear approximation to the objective function is found, and an outer one in which the trial step is calculated. Initially the two radii are equal, but the inner radius may be smaller if the iterate is close to a the intersection between smooth pieces. The method is tested on convex and non-convex test problems}} @techreport{EiseWalk94, author = {S. C. Eisenstat and H. F. Walker}, title = {Choosing the forcing terms in an inexact {N}ewton method}, institution = {Dept of Mathematics and Statistics, Utah State University}, address = {Logan, USA}, number = {6/94/75}, year = 1994} @book{Eispack76, author = {B. T. Smith and J. M. Boyle and J. J. Dongarra and B. S. Garbow and Y. Ikebe and V. C. Klema and C. B. Moler}, title = {Matrix Eigensystem Routine---EISPACK Guide}, publisher = SPRINGER, address = SPRINGER-ADDRESS, year = 1976} @article{EkblMads89, author = {H. Ekblom and K. Madsen}, title = {Algorithms for non-linear {H}uber estimation}, journal = BIT, volume = 29, number = 1, pages = {60--76}, year = 1989, abstract = {The Huber criterion for data fitting is a combination of the $\ell_1$ and the $\ell_2$ criteria which is robust in the sense that the influence of ``wild'' data points can be reduced. The authors present a trust region and a Marquardt algorithm for Huber estimation in the case where the functions used in the fit are nonlinear. It is demonstrated that the algorithms converge under the usual conditions.}, summary = {A converfent Levenberg-Morrison-Marquardt method for nonlinear Huber estimation is presented.}} @techreport{ElAl88, author = {M. El{-}Alem}, title = {A Global Convergence Theory for a Class of Trust Region Algorithms for Constrained Optimization}, institution = CAAM, address = RICE-ADDRESS, number = {TR88-5}, year = 1988, abstract = {In this research, we present a trust region algorithm for solving the equality constrained optimization problem. This algorithm is a variant of the \citebb{CeliDennTapi85} algorithm. The augmented Lagrangian function is used as a merit function. A scheme for updating the penalty parameter is presented. The behavior of the penalty parameter is discussed. We present a global and local convergence analysis for this algorithm. We also show that under mild assumptions, in a neighborhood of the minimizer, the algorithm will reduce to the standard SQP algorithm; hence the local rate of convergence of SQP is maintained. Our global convergence theory is sufficiently general that it holds for any algorithm that generates steps that give at least a fraction of Cauchy decrease in the quadratic model of constraints.}, summary = {A variant of the \citebb{CeliDennTapi85} trust-region algorithm for equality constrained optimization is given. An augmented Lagrangian merit function is used, and a scheme for updating the penalty parameter presented. A global and local convergence analysis is given, showing that the algorithm reduces to the standard SQP algorithm in a neighborhood of the minimizer. The global convergence theory is sufficiently general that it holds for any algorithm that generates steps giving at least a fraction of Cauchy decrease in the quadratic model of constraints.}} @article{ElAl91, author = {M. El{-}Alem}, title = {A global convergence theory for the {D}ennis-{C}elis-{T}apia trust-region algorithm for constrained optimization}, journal = SINUM, volume = 28, number = 1, pages = {266--290}, year = 1991, abstract = {A global convergence theory for a class of trust-region algorithms for solving the equality constrained optimization problem is presented. This theory is sufficiently general that it holds for any algorithm that generates steps giving at least a fraction of Cauchy decrease in the quadratic model of the constraints, and that uses the augmented Lagrangian as a merit function. This theory is used to establish global convergence of the \citebb{CeliDennTapi85} algorithm with a different scheme for updating the penalty parameter. The behaviour of the penalty parameter is also discussed.}, summary = {A global convergence theory for a class of trust-region algorithms for equality constrained optimization is presented, that holds for any algorithm that generates steps giving at least a fraction of Cauchy decrease in the quadratic model of the constraints, and that uses the augmented Lagrangian as a merit function. This theory is used to establish global convergence of the \citebb{CeliDennTapi85} algorithm with a different scheme for updating the penalty parameter. The behaviour of the penalty parameter is also discussed.}} @article{ElAl95, author = {M. El{-}Alem}, title = {A Robust Trust-Region Algorithm with a Nonmonotonic Penalty Parameter Scheme for Constrained Optimization}, journal = SIOPT, volume = 5, number = 2, pages = {348--378}, year = 1995, abstract = {An algorithm for solving the problem of minimizing a non-linear function subject to equality constraints is introduced. This algorithm is a trust-region algorithm. In computing the trial step, a projected-Hessian technique is used that converts the trust-region subproblem to a one similar to that of the unconstrained case. To force global convergence, the augmented Lagrangian is empoyed as a merit function. One of the main advantages of this algorithm is the way that the penalty parameter is updated. We introduce an updating scheme that allows (for the first time to the best of our knowledge) the penalty parameter to be decreased whenever it is warranted. The behaviour of this penalty parameter is studied. A convergence theory for this algorithm is presented. It is shown that this algorithm is globally convergent and that the globalization strategy will not disrupt fast local convergence. The local rate of convergence is also discussed. This theory is sufficiently general that it holds for any algorithm that generates steps whose normal component give at least a fraction of the Cauchy decrease in the quadratic model of the constraints and uses \citebb{Flet70b}'s exact penalty function as a merit function.}, summary = {A trust-region algorithm for nonlinear optimization subject to equality constraints is introduced. In computing the trial step, a projected-Hessian technique converts the trust-region subproblem to one similar to that of the unconstrained case. To force global convergence, the augmented Lagrangian is employed as a merit function. An updating scheme that allows the penalty parameter to be decreased whenever it is warranted is proposed, and the its behaviour is studied. It is shown that this algorithm is globally convergent and that the globalization strategy does not disrupt fast local convergence. The local rate of convergence is also discussed. This theory is sufficiently general that it holds for any algorithm that generates steps whose normal component give at least a fraction of the Cauchy decrease in the quadratic model of the constraints and uses \citebb{Flet70b}'s exact penalty function as a merit function.}} @article{ElAl95b, author = {M. El{-}Alem}, title = {Global convergence without the assumption of linear independence for a trust-region algorithm for constrained optimization}, journal = JOTA, volume = 87, number = 3, pages = {563--577}, year = 1995, abstract = {A trust-region algorithm for solving the equality constrained optimization problem is presented. This algorithm uses the Byrd and \citebb{Omoj89} way of computing the trial steps, but it differs from the Byrd and Omojokun algorithm in the way steps are evaluated. A global convergence theory for this new algorithm is presented. The main feature of this theory is that the linear independence assumption on the gradients of the constraints is not assumed.}, summary = {A trust-region algorithm for solving the equality constrained optimization problem is presented. This algorithm uses the Byrd and \citebb{Omoj89} mechanism for computing the trial steps, but it differs from this algorithm in the way steps are evaluated. Global convergence is proved without assuming linear independence of the constraints' gradients.}} @article{ElAl96, author = {M. El{-}Alem}, title = {Convergence to a 2nd order point of a trust-region algorithm with nonmonotonic penatly parameter for constrained optimization}, journal = JOTA, volume = 91, number = 1, pages = {61--79}, year = 1996, abstract = {In a recent paper, the author proposed a trust-region algorithm for solving the problem of minimizing a nonlinear function subject to a set of equality constraints. The main feature of the algorithm is that the penalty parameter in the merit function can be decreased whenever it is warranted. He studied the behavior of the penalty parameter and proved several global and local convergence results. One of these results is that there exists a subsequence of the iterates generated by the algorithm that converges to a point that satisfies the first-order necessary conditions. In the current paper, we show that, for this algorithm, there exists a subsequence of iterates that converges to a point that satisfies both the first-order and the second-order necessary conditions.}, summary = {It is shown that a subsequence of iterates produced by the trust-region algorithm of \citebb{ElAl95} converges to a point that satisfies both the first- and second-order necessary conditions.}} %also %institution = CRPC, address = RICE-ADDRESS, %number = {CRPC-TR96654}, year = 1996, @article{ElAl99, author = {M. El{-}Alem}, title = {A Global Convergence Theory for a General Class of Trust-Region-Based Algorithms for Constrained Optimization Without Assuming Regularity}, journal = SIOPT, volume = 9, number = 4, pages = {965--990}, year = 1999, abstract = {This work presents a convergence theory for a general class of trust-region-based algorithms for solving the smooth nonlinear programming problem with equality constraints. The results are proved under very mild conditions on the quasi-normal and tangential components of the trial steps. The Lagrange multiplier estimates and the Hessian estimates are assumed to be bounded. In addition, the regularity assumption is not made. In particular, the linear independence of the gradients of the constraints is not assumed. The theory proves global convergence to one of four different types of Mayer-Bliss stationary points. The theory holds for any algorithm that uses the augmented Lagrangian as a merit function, the \citebb{ElAl95} scheme for updating the penalty parameter, and bounded multiplier and Hessian estimates.}, summary = {A convergence theory is presented for a general class of trust-region algorithms for solving the smooth nonlinear programming problem with equality constraints. The results are proved under very mild conditions on the quasi-normal and tangential components of the trial steps. The Lagrange multiplier estimates and the Hessian estimates are assumed to be bounded. In addition, no regularity assumption, such as linear independence of the constraints' gradients, is made. The theory proves global convergence to one of four different types of Mayer-Bliss stationary points, and holds for any algorithm that uses the augmented Lagrangian as a merit function, the \citebb{ElAl95} scheme for updating the penalty parameter, and bounded multiplier and Hessian estimates.}} %also %institution = CRPC, address = RICE-ADDRESS, %number = {CRPC-TR96655}, year = 1996, @techreport{ElAl96c, author = {M. El{-}Alem}, title = {A Strong Global Convergence Result for {D}ennis, {E}l-{A}lem, and {M}aciel's Class of Trust Region Algorithms}, institution = CAAM, address = RICE-ADDRESS, number = {TR96-15}, year = 1996, abstract = {In a recent paper, \citebb{DennElAlMaci97} suggested a class of trust-region-based algorithms for solving the equality constrained optimization problem. They established a global convergence result that is analogous to \citebb{Powe75}'s result for the unconstrained optimization problem. In this paper, a global convergence theory for \citebbs{DennElAlMaci97} class of algorithms is presented. The theory is analogous to \citebb{Thom75}'s result for unconstrained optimization. In particular, it proves that every accumulation point of the sequence of iterates generated by any member of \citebbs{DennElAlMaci97} class of algorithms is a first-order point. In other words, the sequence of iterates converges to the set of first-order points of the problem. To the best of our knowledge, the global convergence result presented in this paper generalizes all existing global convergence theories for trust region algorithms that are suggested for solving the equality constrained optimization problem and use the augmented Lagrangian as a merit function.}, summary = {A global convergence theory for \citebb{DennElAlMaci97}'s class of algorithms is presented, which is analogous to \citebb{Thom75}'s result for unconstrained optimization. In particular, every accumulation point of the sequence of iterates is a first-order stationary point. This result generalize all current global convergence theories for trust-region algorithms that have been suggested for solving the equality constrained optimization problem and use the augmented Lagrangian as a merit function.}} %also %institution = CRPC, address = RICE-ADDRESS, %number = {CRPC-TR96656}, year = 1996, @article{ElAlTapi95, author = {M. El{-}Alem and R. A. Tapia}, title = {Numerical Experience with a polyhedral-norm {CDT} trust-region algorithm}, journal = JOTA, volume = 85, number = 3, pages = {575--591}, year = 1995, abstract = {In this paper, we study a modification of the \citebb{CeliDennTapi85} trust-region subproblem, which is obtained by replacing the $l_2$-norm with a polyhedral norm. The polyhedral norm Celis-Dennis-Tapia (CDT) subproblem can be solved using a standard quadratic programming code. We include computational results which compare the performance of the polyhedral-norm CDT trust-region algorithm with the performance of existing codes. The numerical results validate the effectiveness of the approach. These results show that there is not much loss of robustness or speed and suggest that the polyhedral-norm CDT algorithm may be a viable alternative. The topic merits further investigation.}, summary = {A modification of the \citebb{CeliDennTapi85} (CDT) trust-region subproblem, which is obtained by replacing the $l_2$-norm with a polyhedral norm, is studied. The polyhedral norm CDT subproblem can be solved using a standard quadratic programming code. Computational results which compare the performance of the polyhedral-norm CDT trust-region algorithm with the performance of existing codes are given.}} @article{ElBa98, author = {A. S. El{-}Bakry}, title = {Convergence rate of primal-dual reciprocal barrier {N}ewton interior-point methods}, journal = OMS, volume = 9, number = {1--3}, pages = {37--44}, year = 1998} @techreport{ElHa87, author = {M. El{-}Hallabi}, title = {A Global Convergence Theory for Arbitrary Norm Trust Region Methods for Nonlinear Equations}, institution = CAAM, address = RICE-ADDRESS, number = {TR87-5}, year = 1987, abstract = {In this research we extend the \citebb{Leve44}--\citebb{Marq63} algorithm for approximating zeros of the nonlinear system $F(x)=0$, where $F$ is continuously differentiable from $\Re^n$ to $\Re^n$. Instead of the $\ell_2$-norm, arbitrary norms can be used in the objective function and in the trust region constraint. The algorithm is shown to be globally convergent. This research was motivated by the recent work of \citebb{DuffNoceReid87}. A key point in our analysis is that the tools from nonsmooth analysis, namely locally Lipschitz analysis, allow us to establish essentially the same properties for our algorithm that have been established for the \citebb{Leve44}--\citebb{Marq63} algorithm using the tools from smooth optimization. In our analysis, the sequence generated by the algorithm is the couple $(x_k, \delta_k)$ where $x_k$ is the iterate and $\delta_k$ the trust region radius. Since the successor $(x_{k+1}, \delta_{k+1})$ of $(x_k,\delta_k)$ is not unique we model our algorithm by a point-to-set map and then apply Zangwill's theorem of convergence to our case. It is shown that our algorithm reduces locally to Newton's method.}, summary = {The Levenberg-Morrison-Marquardt algorithm for approximating zeros of the nonlinear system $F(x)=0$ is generalized to allow the use of arbitrary norms n the objective function and in the trust region constraint. The algorithm, which is motivated by that of \citebb{DuffNoceReid87}, is globally convergent. Essentially the same properties apply for the general and for the Levenberg-Morrison-Marquardt algorithm. In this analysis, the sequence generated is the couple $(x_k, \Delta_k)$ where $x_k$ is the iterate and $\Delta_k$ the trust region radius. Since the successor $(x_{k+1}, \Delta_{k+1})$ of $(x_k,\Delta_k)$ is not unique the algorithm is modelled by a point-to-set map and then Zangwill's convergence theorem is applied. The algorithm locally reduces to Newton's method.}} @techreport{ElHa90, author = {M. El{-}Hallabi}, title = {A Global Convergence Theory for A Class of Trust-Region Methods for Nonsmooth Optimization.}, institution = CAAM, address = RICE-ADDRESS, number = {TR90-10}, year = 1990, abstract = {In this work we define a class of trust-region algorithms for approximating a minimizer of the function $f=h(F)$ where $F: \Re^n \rightarrow \Re^m$ is continuously differentiable and $h=\Re^m \rightarrow \Re$ is regular locally Lipschitz. We show that algorithms from this class are globally convergent. Our analysis is a generalization of the recent work of \citebb{ElHaTapi93} and can be applied to most algorithms in the literature. Our algorithms are a natural generalization of those for smooth minimization to nonsmooth optimization.}, summary = {A class of trust-region algorithms is defined for approximating a minimizer of the function $f=h(F)$ where $F: \Re^n \rightarrow \Re^m$ is continuously differentiable and $h=\Re^m \rightarrow \Re$ is regular locally Lipschitz. Algorithms from this class are globally convergent. The analysis is a generalization of that given by \citebb{ElHaTapi93} and can be applied to most algorithms in the literature. The algorithms are a natural generalization of those for smooth minimization to non-smooth optimization.}} @techreport{ElHa93, author = {M. El{-}Hallabi}, title = {An Inexact Minimization Trust-Region Algorithm: globalization of {N}ewton's Method}, institution = CAAM, address = RICE-ADDRESS, number = {TR93-43}, year = 1993, abstract = {In this work we define a trust region algorithm for approximating zeros of the nonlinear system $F(x)=0$, where $F: \Re^n \rightarrow \Re^n$ is continuously differentiable. We are concerned with the fact that $n$ may be large. So we replace the $\ell_2$ norm with arbitrary norms in the objective function and in the trust region constraint. In particular, if polyhedral norms are used, then the algorithm can be viewed as a sequential linear programming algorithm. At each iteration, the local trust-region model is only solved within some tolerance. This research is an extension of \citebb{ElHaTapi93} for nonlinear equations, where an exact solution of the local model was required. We demonstrate that the algorithm under consideration is globally convergent, and that, under mild assumptions, the iteration sequence generated by the algorithm converges to a solution of the nonliner system. We also demonstrate that, under the standard assumptions for Newton's method theory, the rate of convergence is $q$-superlinear. Moreover, quadratic convergence can be obtained by requiring sufficient accuracy in the solution of the local model.}, summary = {A trust-region algorithm generalizing that of \citebb{ElHaTapi93} is given, in which the trust-region subproblem can be solved approximately. The algorithm considered is globally convergent and its rate of convergence is $Q$-superlinear. Quadratic convergence can be obtained by requiring sufficient accuracy in the solution of the local model.}} @techreport{ElHa99, author = {M. El{-}Hallabi}, title = {Globally Convergent Multi-Level Inexact Hybrid Algorithm for Equality Constrained Optimization}, institution = {D\'{e}partement Informatique et Optimisation, Institut National des Postes et T\'{e}l\'{e}communications}, address = {Rabat, Morocco}, number = {RT11-98(revised)}, year = 1999, abstract = {Trust-region globalization strategies have proved to be powerful tools to design globally convergent algorithms, but at the cost of allowing the local model to be solved more than once at each iteration. On the other hand, beside their poor global convergence properties, linesearch strategies are quite popular for their low cost of obtaining an acceptable steplength whenever a search direction is provided. In this paper we aim to combine both strategies in a globally convergent multi-level inexact hybrid algorithm to minimize a contunuouly differentiable nonlinear function $f: \Re^n \rightarrow \Re$ subject to equality constraint $h_i(x)=0$, $i=1,\ldots,m$ where $h_i:\Re^n \rightarrow \Re$ are continuously differentiable. First, the trust-region approach is used to determine a trial step that is shown to ne a descent direction of the merit function, and second, linesearch techniques are used to obtain an acceptable steplength in such a direction. We prove that the hybrid algorithm is globally convergent in the sense that any accumulation point of the iteration sequence is a Karush-Kuhn-Tucker point of the minimization problem. In our algorithm, the curvature of the local model is taken into account first, to obtain the trial step direction and second, to accept or reject the steplength. Both tests are less conservative than the usual ones. Also, we prove that the penalty parameter is uniformly bounded away from zero at nonstationary points, instead of forcing the trust-region radius to be initialized, at each iteration, as large as some given positive $\delta_{\min}$, we show that the internal trust-region increasing strategy, a technique quite popular for preventing large numbers of gradient and Hessian evaluations, yields the same important property. Furthermore, we show that the steplength is bounded away from zero at nonstaionary points. We do not use the regularity assumption of linear independent gradients. On the other hand, we assume that $h(x_k)$ does not belong to the nullspace of $\nabla h(x_k)$ for non feasible iterates. Moreover, we assume that if $\{x_k\mid k \in N\subset\calN\}$ is a subsequence converging to some accumulation point of the iteration sequence, say $x_*$, then the normalized constraint vector is uniformly bounded away from the nullspace of $\nabla h(x_k)$.}, summary = {The combination of linesearch and trust-region techniques is investigated in the context of problems with equality constraints. Beneficial effects of internal doubling in this context are also discussed, together with an alternative expression of constraint qualification.}} @techreport{ElHaTapi93, author = {M. El{-}Hallabi and R. A. Tapia}, title = {A Global Convergence Theory for Arbitrary Norm Trust-Region Methods for Nonlinear Equations}, institution = CAAM, address = RICE-ADDRESS, number = {TR93-41}, year = 1993, abstract = {In this work, we extend the Levenberg-Marquardt algorithm for approximating zeros of the nonlinear system $F(x)=0$, where $F: \Re^n \rightarrow \Re^n$ is continuously differentiable. Instead of the $\ell_2$ norm, arbitrary norms can be used in the trust-region constraint. The algorithm is shown to be globally convergent. This research is motivated by the recent work of \citebb{DuffNoceReid87}. A key point in our analysis is that the tools from nonsmooth analysis and the Zangwill convergence theory allow us to establish essentially the same properties for an arbitrary trust-region algorithm that have been established for the Levenberg-Marquardt algorithm using the tools from smooth optimization. It is shown that all members of this class of algorithms locally reduce to Newton's method and that the iteration sequence actually converges to a solution.}, summary = {The Levenberg-Morrison-Marquardt algorithm for approximating zeros of the nonlinear system $F(x)=0$, where $F: \Re^n \rightarrow \Re^n$ is continuously differentiable, is extended. Arbitrary norms can be used in place of the $\ell_2$-norm for the trust-region constraint. The algorithm is globally convergent. This algorithm is motivated by the work of \citebb{DuffNoceReid87}. It locally reduce to Newton's method and the iteration sequence converges to a solution.}} @techreport{ElHaTapi95, author = {M. El{-}Hallabi and R. A. Tapia}, title = {An Inexact Trust-Region Feasible-Point Algorithm for Nonlinear Systems of Equalities and Inequalities}, institution = CAAM, address = RICE-ADDRESS, number = {TR95-09}, year = 1995, abstract = {In this work we define a trust-region feasible-point algorithm for approximating solutions of the nonlinear system of equalities and inequalities $F(x, y)=0, y \ge 0$, where $F: { \Re^n \times \Re^m } \rightarrow \Re^p$ is continuously differentiable. This formulation is quite general; the Karush-Kuhn-Tucker conditions of a general nonlinear programming problem are an obvious example, and a set of equalities and inequalities can be transformed, using slack variables, into such form. We will be concerned with the possibility that $n$, $m$ and $p$ may be large and that the Jacobian matrix may be sparse and rank deficient. Exploiting the convex structure of the local model trust-region subproblem, we propose a globally convergent inexact trust-region feasible-point algorithm to minimize an arbitrary norm of the residual, say $\| F(x, y)\|_a$, subject to the nonnegativity constraints. This algorithm uses a trust-region globalization strategy to determine a descent direction as an inexact solution of the local model trust-region subproblem and then, it uses linesearch techniques to obtain an acceptable steplength. We demonstrate that, under rather weak hypotheses, any accumulation point of the iteration sequence is a constrained stationary point for $f=\|F\|_a$, and that the sequence of constrained residuals converges to zero.}, summary = {A feasible-point trust-region algorithm for approximating solutions of the nonlinear system of equalities and inequalities $F(x, y)=0, y \ge 0$, where $F: { \Re^n \times \Re^m } \rightarrow \Re^p$ is continuously differentiable, is considered. By exploiting the convex structure of the local trust-region subproblem, a globally convergent inexact trust-region feasible-point algorithm is suggested for minimizing an arbitrary norm of the residual, $\| F(x, y)\|_a$, subject to non-negativity constraints. This algorithm uses a descent direction which is an inexact solution of the trust-region subproblem and then uses linesearch techniques to obtain an acceptable steplength. It is shown that, under weak hypotheses, any accumulation point of the iteration sequence is a constrained stationary point for $f=\|F\|_a$, and that the sequence of constrained residuals converges to zero.}} @article{ElstNeum97, author = {C. Elster and A. Neumaier}, title = {A method of trust region type for minimizing noisy functions}, journal = {Computing}, volume = 58, number = 1, pages = {31--46}, year = 1997, abstract = {The optimization of noisy functions in a few variables only is a common problem occurring in various applications, for instance in finding the optimal choice of a few control parameters in chemical experiments. The traditional tool for the treatment of such problems is the method of \citebb{NeldMead65} (NM). In this paper, an alternative method based on a trust region approach (TR) is offered and compared to NM. On the standard collection of test functions for unconstrained optimization by \citebb{MoreGarbHill81}, TR performs substantially more robust than NM. If performance is measured by the number of function evaluations, TR is on the average twice as fast as NM.}, summary = {The optimization of noisy functions of a few variables is a commonly occurring problem in application areas such as finding the optimal choice of a few control parameters in chemical experiments. The traditional tool for the treatment of such problems is the method of \citebb{NeldMead65} (NM). An alternative method based on a trust-region approach (TR) is proposed and compared to NM. On a standard collection of test functions for unconstrained optimization by \citebb{MoreGarbHill81}, TR is substantially more robust than NM. If performance is measured by the number of function evaluations, TR is seen to be, on average, twice as fast as NM.}} @article{EskoSchn91, author = {E. Eskow and R. B. Schnabel}, title = {Algorithm 695: software for a new modified {C}holesky factorization}, journal = TOMS, volume = 17, number = 3, pages = {306--312}, year = 1991} @article{Evan68, author = {D. J. Evans}, title = {The use of pre-conditioning in iterative methods for solving linear equations with positive definite matrices}, journal = JIMA, volume = 4, pages = {295--314}, year = 1968} %%% F %%% @article{FaccLuci93, author = {F. Facchinei and S. Lucidi}, title = {Nonmonotone Bundle-Type Scheme for Convex Nonsmooth Minimization}, journal = JOTA, volume = 76, number = 2, pages = {241--257}, year = 1993} @inproceedings{FaccFiscKanz97, author = {F. Facchinei and A. Fischer and Ch. Kanzow}, title = {A Semismooth {N}ewton Method for Variational Inequalities: the Case of Box Constraints}, crossref = {FerrPang97}, pages = {76--90}} @article{FaccKanz97, author = {F. Facchinei and Ch. Kanzow}, title = {On unconstrained and constrained stationary points of the implicit {L}agrangian}, journal = JOTA, volume = 92, number = 1, pages = {99--115}, year = 1997} @article{FaccSoar97, author = {F. Facchinei and J. Soares}, title = {A new merit function for nonlinear complementarity problems and a related algorithm}, journal = SIOPT, volume = 7, number = 1, pages = {225--247}, year = 1997} @article{FaccJudiSoar98, author = {F. Facchinei and J. Judice and J. Soares}, title = {An active set {N}ewton algorithm for large-scale nonlinear programs with box constraints}, journal = SIOPT, volume = 8, number = 1, pages = { 158--186}, year = 1998, abstract = {A new algorithm for large-scale nonlinear programs with box constraints is introduced. The algorithm is based on an efficient identification technique of the active set at the solution and on a nonmonotone stabilization technique. It possesses global and superlinear convergence properties under standard assumptions. A new technique for generating test problems with known characteristics is also introduced. The implementation of the method is described along with computational results for large-scale problems.}, summary = {An algorithm for large-scale nonlinear programs with box constraints is introduced. The algorithm is based on an efficient identification technique of the active set at the solution and on a non-monotone stabilization technique. It possesses global and superlinear convergence properties. A technique for generating test problems with known characteristics is also introduced. The implementation of the method is described along with computational results for large-scale problems.}} @article{FanSarkLasd88, author = {Y. Fan and S. Sarkar and L. S. Lasdon}, title = {Experiments with successive quadratic programming algorithms}, journal = JOTA, volume = 56, number = 3, pages = {359--383}, year = 1988, abstract = {There are many variants of successive quadratic programming (SQP) algorithms. Important issues include: the choice of either line search or trust region strategies and the QP formulation to be used and how the QP is to be solved. The authors consider the QPs proposed by Fletcher and Powell and discuss a specialized reduced-gradient procedure for solving them. A computer implementation is described, and the various options are compared on some well-known test problems. Factors influencing robustness and speed are identified.}, summary = {Important issues in SQP methods include the choice of either linesearch or trust-region strategies and the QP formulation to be used and how the QP is to be solved. The QPs proposed by Fletcher and Powell are considered and a specialized reduced-gradient procedure discussed for solving them. The various options are compared on some well-known test problems.}} @article{FeiHuan98, author = {X. Fei and W. Huanchen}, title = {Integrated algorithm for bilevel nonsmooth optimization problems}, journal = {Journal of Shanghai Jiaotong University}, volume = 32, number = 12, pages = {115--119}, year = 1998, note = {(in Chinese)}, abstract = {This paper is concerned with a kind of 1 leader-$N$ followers bilevel nonsmooth optimization problems. An integrated algorithm is proposed which embeds adaptively DFP into the inner iteration of the trust region bundle method and makes the best use of the global convergence of the bundle method and the local fast convergence of the DFP. The Lipschitzian property of functions involved is researched. An approach of computing a subgradient of the objective functions of the problems is investigated. The basic idea and steps of the algorithm are discussed. Finally, the convergence analysis is given.}, summary = {A 1 leader-$N$ followers bilevel nonsmooth optimization problems is considered. A trust-region based bundle in which appropriate generailized second derivatives are obtained using a DFP-like formula is given, which combines the global convergence properties of the bundle method with the fast local convergence properties resulting from the use of approximate second derivatives.}} @inproceedings{Felg97, author = {U. Felgenhauer}, title = {Algorithmic stability analysis for certain trust region methods}, booktitle = {Mathematical Programming with Data Perturbations}, editor = {A. V. Fiacco}, publisher = {Marcel Dekker, Inc.}, address = {New York and Basel}, series = {Lecture Notes in Pure and Applied Mathematics}, number = 195, pages = {109--131}, year = 1997, summary = {Quasi-Newton trust-region methods for unconstrained and bound-constrained optimization are proven to be robust with respect to errors in the gradient. Global convergence and active constraint identifications are proved under the assumption that this error is bounded by a multiple of the trust-region radius and that the model's Hessians are bounded and non-zero.}} @techreport{Feng98, author = {G. Feng}, title = {Trust-region method with simplicial decomposition for linearly constrained problems}, institution = {Department of Applied Mathematics, Tongji University}, address = {Shanghai, China}, number = {December, 17}, year = 1998, abstract = {For the nonlinear programming problems, in which the objective function is continuously differentiable, pseudo-convex and the feasible set is a nonempty polyhedron, we develop an algorithm of trust region method using simplicial decomposition. The algorithm solves a linearly constrained problem in the subprogram and a master program iteratively. The subprogram is a linear programming similar to Frank-Wolfe linearization technique, but with a restricted stepsize, and produces feasible points defining simplices. the produced simplices are only subsets of the feasible region of the original programming. The master program is a trust region method on the produced simplex. According to the ratio of the actual and predicted reduction in the master program, we change the stepsize in the subproblem adaptively per iteration. The resulting algorithm is proved to be globally convergent. The advantage of the algorithm over the original trust region method is that the feasible region under consideration of the former is only a subset of the latter. But the algorithm must solve an additional linear programming problem which is relatively simpler.}, summary = {A trust-region method is presented for the solution of pseudoconvex optimization problems subject to linear constraints. The method uses restricted simplicial decomposition to produce successive simplices that are included in the feasible domain and a trust-region method is then employed to minimize the objective function on those simplices. }} @techreport{Feng99, author = {G. Feng}, title = {Combination of trust region method and simplicial decomposition for linearly constrained problems}, institution = {Department of Applied mathematics, Tongji University}, address = {Shanghai, China}, number = {March}, year = 1999, abstract = {The algorithm given here incorporates the restricted decomposition algorithm (RSD) into the trust-region method (TR). The global convergence is proved. The advantage of the presented algorithm over RSD is that the former is exact and finite in every iteration. In comparison with TR the feasible set of the master problem in the presented algorithm is only the subset of that in TR and has no restriction on step. Thus the former is much easier to solve than the latter.}, summary = {A variant of the method developed in \citebb{Feng98} is presented, where no restriction on the steplength is imposed on the master problem.}} @article{FerrPang97b, author = {M. C. Ferris and J. S. Pang}, title = {Engineering and Economic Applications of Complementarity Problems}, journal = SIREV, volume = 39, number = 4, pages = {669--713}, year = 1997} @techreport{FerrKanzMuns98, author = {M. C. Ferris and C. Kanzow and T. S. Munson}, title = {Feasible Descent Algorithms for Mixed Complementarity Problems}, institution = MADISON, address = MADISON-ADDRESS, type = {Mathematical Programming Technical Report}, number = {MP-TR-98-04}, year = 1998} @techreport{FerrZavr96, author = {M. C. Ferris and S. K. Zavriev}, title = {The linear convergence of a successive linear programming algorithm}, institution = MADISON, address = MADISON-ADDRESS, type = {Mathematical Programming Technical Report}, number = {MP-TR-96-12}, year = 1996, abstract = {We present a successive linear programming algorithm for solving constrained nonlinear optimization problems. The algorithm employs an Armijo procedure for updating a trust region radius. We prove the linear convergence of the method by relating the solutions of our subproblems to standard trust region and gradient projection subproblems and adapting an error bound analysis due to \citebb{LuoTsen93}. Computational results are provided for polyhedrally constrained nonlinear programs.}, summary = {A successive linear programming algorithm for solving constrained nonlinear optimization problems is presented. that usess an Armijo procedure for updating a trust region radius. Linear convergence of the method is proved by relating the solutions of the subproblems to standard trust-region and gradient projection subproblems and adapting an error bound analysis of \citebb{LuoTsen93}. Computational results are provided for polyhedrally constrained nonlinear programs.}} @article{Fiac76, author = {A. V. Fiacco}, title = {Sensitivity analysis for nonlinear programming using penalty methods}, journal = MP, volume = 10, number = 3, pages = {287--311}, year = 1976} @book{Fiac83, author = {A. V. Fiacco}, title = {Introduction to Sensitivity and Stability Analysis in Nonlinear Programming}, publisher = AP, address = AP-ADDRESS, series = {Mathematics in Science and Engineering}, volume = 165, year = 1983} @techreport{FiacMcCo63, author = {A. V. Fiacco and G. P. McCormick}, title = {Programming under Nonlinear Constraints by Unconstrained Optimization: a Primal-Dual Method}, institution = {Research Analysis Corporation}, address = {McLean, Virginia, USA}, number = {RAC-TP-96}, year = 1963} @article{FiacMcCo64a, author = {A. V. Fiacco and G. P. McCormick}, title = {The Sequential Unconstrained Minimization Technique for Nonlinear Programming: a Primal-Dual Method}, journal = {Management Science}, volume = 10, number = 2, pages = {360--366}, year = 1964} @article{FiacMcCo64b, author = {A. V. Fiacco and G. P. McCormick}, title = {Computational Algorithm for the Sequential Unconstrained Minimization Technique for Nonlinear Programming}, journal = {Management Science}, volume = 10, number = 4, pages = {601--617}, year = 1964} @book{FiacMcCo68, author = {A. V. Fiacco and G. P. McCormick}, title = {Nonlinear Programming: Sequential Unconstrained Minimization Techniques}, publisher = WILEY, address = WILEY-ADDRESS, year = 1968, note = {Reprinted as \emph{Classics in Applied Mathematics 4}, SIAM, Philadelphia, USA, 1990}} @article{Fisc92, author = {A. Fischer}, title = {A special {N}ewton-type optimization method}, journal = {Optimization}, volume = 24, number = {3--4}, pages = {269--284}, year = 1992} @inproceedings{Fisc95, author = {A. Fischer}, title = {An {NCP}-function and its use for the solution of complementarity problems}, crossref = {DuQiWome95}, pages = {88--105}} @article{Flet70, author = {R. Fletcher}, title = {A New Approach to Variable Metric Algorithms}, journal = COMPJ, volume = 13, pages = {317--322}, year = 1970} @incollection{Flet70b, author = {R. Fletcher}, title = {A class of methods for nonlinear programming with termination and convergence properties}, booktitle = {Integer and nonlinear programming}, editor = {J. Abadie}, publisher = NH, address = NH-ADDRESS, pages = {157--175}, year = 1970} @techreport{Flet70c, author = {R. Fletcher}, title = {An efficient, globally convergent, algorithm for unconstrained and linearly constrained optimization problems}, institution = HARWELL, address = HARWELL-ADDRESS, number = {TP 431}, year = 1970, abstract = {An algorithm for minimization of functions of many variables, subject possibly to linear constraints on the variables, is described. In it a subproblem is solved in which a quadratic approximation is made to the object function and minimized over a region in which the approximation is valid. A strategy for deciding when this region should be expanded or contracted is given. The quadratic approximation involves estimating the hessian of the object function by a matrix which is updated at each iteration by a formula recently reported by \citebb{Powe70a}. This formula enables global convergence of the algorithm to be proved. Use of such an approximation, as against using exact second derivatives, also enables a reduction of about 60\%\ to be made in the number of operations to solve the subproblem. Numerical evidence is reported showing that the algorithm is efficient in the number of function evaluations required to solve well known test problems.}, summary = {An algorithm is described for minimization of nonlinear functions, subject possibly to linear constraints on the variables. At each iteration, a quasi-Newton (PSB) quadratic approximation of the objective function is minimized over a region in which the approximation is valid. A strategy for deciding when this region should be expanded or contracted is given. Global convergence is proved and numerical tests show that the algorithm is efficient in the number of function evaluations.}} @article{Flet71, author = {R. Fletcher}, title = {A general quadratic programming algorithm}, journal = JIMA, volume = 7, pages = {76--91}, year = 1971} @techreport{Flet71b, author = {R. Fletcher}, title = {A modified {M}arquardt subroutine for nonlinear least-squares}, institution = HARWELL, address = HARWELL-ADDRESS, number = {AERE-R 6799}, year = 1971, abstract = {A Fortran subroutine is described for minimizing a sum of squares of functions of many variables. Such problems arise in nonlinear data fitting, and in the solution of nonlinear algebraic equations. The subroutine is based on an algorithm due to \citebb{Marq63}, but with modifications which improve the performance of the method in certain circumstances, yet which require negligible extra computer time and storage.}, summary = {A Fortran subroutine is described for minimizing a sum of squares of functions of many variables. Such problems arise in nonlinear data fitting, and in the solution of nonlinear algebraic equations. The subroutine is based on an algorithm due to \citebb{Marq63}, but with modifications which improve the performance of the method, yet which require negligible extra computer time and storage.}} @article{Flet73, author = {R. Fletcher}, title = {An exact penalty function for nonlinear programming with inequalities}, journal = MP, volume = 5, number = 2, pages = {129--150}, year = 1973} @article{Flet76, author = {R. Fletcher}, title = {Factorizing symmetric indefinite matrices}, journal = LAA, volume = 14, pages = {257--272}, year = 1976} @book{Flet80, author = {R. Fletcher}, title = {Practical Methods of Optimization: Unconstrained Optimization}, publisher = WILEY, address = WILEY-ADDRESS, year = 1980} @book{Flet81, author = {R. Fletcher}, title = {Practical Methods of Optimization: Constrained Optimization}, publisher = WILEY, address = WILEY-ADDRESS, year = 1981} @inproceedings{Flet82, author = {R. Fletcher}, title = {Second-order corrections for non-differentiable optimization}, booktitle = {Numerical Analysis, Proceedings Dundee 1981}, editor = {G. A. Watson}, publisher = SPRINGER, address = SPRINGER-ADDRESS, pages = {85--114}, year = 1982, note = {Lecture Notes in Mathematics 912}} @article{Flet82b, author = {R. Fletcher}, title = {A model algorithm for composite nondifferentiable optimization problems}, journal = MPS, volume = 17, pages = {67--76}, year = 1982, abstract = {Composite functions $\phi(x)=f(x) + h(c(x))$, where $f$ and $c$ are smooth and $h$ is convex, encompass many nondifferentiable optimization problems of interest including exact penalty functions in nonlinear programming, nonlinear min-max problems, best nonlinear $L_1$, $L_2$ and $L_\infty$ approximation and finding feasible points of nonlinear inequalities. The idea is used of making a linear approximation to $c(x)$ whilst including second order terms in a quadratic approximation to $f(x)$. This is used to determine a composite function $\psi$ which approximates $\phi(x)$ and a basic algorithm is proposed in which $\psi$ is minimized on each iteration. If the technique of step restriction (or trust region) is incorporated into the algorithm, then it is shown that global convergence can be proved. It is also described briefly how the above approximations ensure that a second order rate of convergence is achieved by the basic algorithm. }, summary = {Composite functions $\phi(x)=f(x) + h(c(x))$, where $f$ and $c$ are smooth and $h$ is convex, encompass many non-differentiable optimization problems of interest. Making a linear approximation to $c(x)$ whilst including second-order terms in a quadratic approximation to $f(x)$ is used to determine a composite function $\psi$ which approximates $\phi(x)$ , and an algorithm is proposed in which $\psi$ is minimized on each iteration. If the trust region technique is incorporated into the algorithm, then global convergence can be proved. It is also described how the above approximations ensure that a second-order rate of convergence is achieved.}} @inproceedings{Flet85, author = {R. Fletcher}, title = {An $\ell_1$ penalty method for nonlinear constraints}, crossref = {BoggByrdSchn85}, pages = {26--40}} @book{Flet87, author = {R. Fletcher}, title = {Practical Methods of Optimization}, publisher = WILEY, address = WILEY-ADDRESS, edition = {second}, year = 1987} @inproceedings{Flet87b, author = {R. Fletcher}, title = {Recent developments in linear and quadratic programming}, crossref = {IserPowe87}, pages = {213--243}} @article{Flet95, author = {R. Fletcher}, title = {An Optimal Positive Definite Update for Sparse {H}essian Matrices}, journal = SIOPT, volume = 5, number = 1, pages = {192--217}, year = 1995} @article{FletJack74, author = {R. Fletcher and M. P. Jackson}, title = {Minimization of a quadratic function of many variables subject only to lower and upper bounds}, journal = JIMA, volume = 14, number = 2, pages = {159--174}, year = 1974} @article{FletSain89, author = {R. Fletcher and Sainz de la Maza, E.}, title = {Nonlinear programming and nonsmooth optimization by successive linear programming}, journal = MP, volume = 43, number = 3, pages = {235--256}, year = 1989, abstract = {Methods are considered for solving nonlinear programming problems using an exact $\ell_1$ penalty function. LP-like subproblems incorporating a trust region constraint are solved successively both to estimate the active set and to provide a foundation for proving global convergence. In one particular method, second-order information is represented by approximating the reduced Hessian matrix, and \citebb{ColeConn82b} steps are taken. A criterion for accepting these steps is given which enables the superlinear convergence properties of the Coleman-Conn method to be retained whilst preserving global convergence and avoiding the \citebb{Mara78} effect. The methods generalize to solve a wide range of composite nonsmooth optimization problems and the theory is presented in this general setting. A range of numerical experiments on small test problems is described.}, summary = {Methods are considered for solving nonlinear programming problems using an exact $\ell_1$ penalty function. LP-like subproblems incorporating a trust-region constraint are solved successively both to estimate the active set and to provide a foundation for proving global convergence. In one particular method, second-order information is represented by approximating the reduced Hessian matrix, and \citebb{ColeConn82b} steps are taken. A criterion for accepting these steps is given which enables the superlinear convergence properties of the Coleman-Conn method to be retained whilst preserving global convergence and avoiding the \citebb{Mara78} effect. The methods generalize to solve a wide range of composite non-smooth optimization problems and the theory is presented in this general setting. A range of numerical experiments on small test problems is described.}} @techreport{FletLeyf97, author = {R. Fletcher and S. Leyffer}, title = {Nonlinear Programming without a penalty function}, institution = DUNDEE, address = DUNDEE-ADDRESS, type = {Numerical Analysis Report}, number = {NA/171}, year = 1997, abstract = {In this paper the solution of nonlinear programming problems by a Sequential Quadratic Programming (SQP) trust--region algorithm is considered. The aim of the present work is to promote global convergence without the need to use a penalty function. Instead, a new concept of a ``filter'' is introduced which allows a step to be accepted if it reduces either the objective function or the constraint violation function. Numerical tests on a wide range of test problems are very encouraging and the new algorithm compares favourably with LANCELOT and an implementation of S$l_1$QP.}, summary = {A Sequential Quadratic Programming (SQP) trust-region algorithm for nonlinear programming is considered, which is globally convergent without the need to use a penalty function. Instead, the concept of a ``filter'' is introduced which allows a step to be accepted if it reduces either the objective function or the constraint violation function. Numerical tests on a wide range of test problems are very encouraging and the new algorithm compares favourably with {\sf LANCELOT} and an implementation of S$l_1$QP.}} @techreport{FletLeyf98, author = {R. Fletcher and S. Leyffer}, title = {User Manual for filter{SQP}}, institution = DUNDEE, address = DUNDEE-ADDRESS, type = {Numerical Analysis Report}, number = {NA/181}, year = 1998} @techreport{FletLeyfToin98, author = {R. Fletcher and S. Leyffer and Ph. L. Toint}, title = {On the Global Convergence of an {SLP}-Filter Algorithm}, institution = FUNDP, address = FUNDP-ADDRESS, number = {98/13}, year = 1998, abstract = {A mechanism for proving global convergence in filter-type methods for nonlinear programming is described. Such methods are characterized by their use of the dominance concept of multiobjective optimization, instead of a penalty parameter whose adjustment can be problematic. The main interest is to demonstrate how convergence for NLP can be induced without forcing sufficient descent in a penalty-type merit function. The proof technique is presented in a fairly basic context, but the ideas involved are likely to be more widely applicable. The technique allows a wide range of specific algorithmic choices associated with updating the trust-region radius and with feasibility restoration.}, summary = {A mechanism for proving global convergence in filter-type trust-region methods for nonlinear programming is described. The main interest is to demonstrate how global convergence can be induced without forcing sufficient descent in a penalty-type merit function. The technique of proof allows a wide range of specific algorithmic choices associated with updating the trust-region radius and with feasibility restoration.}} @techreport{FletLeyfToin00, author = {R. Fletcher and S. Leyffer and Ph. L. Toint}, title = {On the Global Convergence of an {SQP}-Filter Algorithm}, institution = FUNDP, address = FUNDP-ADDRESS, number = {???}, year = 2000, abstract = {A mechanism for proving global convergence in filter-type methods for nonlinear programming is described. Such methods are characterized by their use of the dominance concept of multiobjective optimization, instead of a penalty parameter whose adjustment can be problematic. The main interest is to demonstrate how convergence for NLP can be induced without forcing sufficient descent in a penalty-type merit function. The proof technique is presented in a fairly basic context, but the ideas involved are likely to be more widely applicable. The technique allows a wide range of specific algorithmic choices associated with updating the trust-region radius and with feasibility restoration.}, summary = {A mechanism for proving global convergence in filter-type trust-region methods for nonlinear programming is described. The main interest is to demonstrate how global convergence can be induced without forcing sufficient descent in a penalty-type merit function. The technique of proof allows a wide range of specific algorithmic choices associated with updating the trust-region radius and with feasibility restoration.}} @techreport{FletGoulLeyfToin99, author = {R. Fletcher and N. I. M. Gould and S. Leyffer and Ph. L. Toint}, title = {Global Convergence of Trust-Region {SQP}-Filter Algorithms for Nonlinear Programming}, institution = FUNDP, address = FUNDP-ADDRESS, number = {99/03}, year = 1999, abstract = {Global convergence to first-order critical points is proved for two trust-region SQP-filter algorithms of the type introduced by \citebb{FletLeyf97}. The algorithms allow for an approximate solution of the quadratic subproblem and incorporate the safeguarding tests described in \citebb{FletLeyfToin98}. The first algorithm decomposes the step into its normal and tangential components, while the second replaces this decomposition by a stronger condition on the associated model decrease.}, summary = {Global convergence to first-order critical points is proved for two trust-region SQP-filter algorithms of the type introduced by \citebb{FletLeyf97}. The algorithms allow for an approximate solution of the quadratic subproblem and incorporate the safeguarding tests described in \citebb{FletLeyfToin98}. The first algorithm decomposes the step into its normal and tangential components, while the second replaces this decomposition by a stronger condition on the associated model decrease.}} @article{FletWats80, author = {R. Fletcher and G. A. Watson}, title = {First and second order conditions for a class of nondifferentiable optimization problems}, journal = MP, volume = 18, number = 3, pages = {291--307}, year = 1980} @article{FlipJans96, author = {O. E. Flippo and B. Jansen}, title = {Duality and Sensitivity in nonconvex quadratic Optimization over an Ellipsoid}, journal = {European Journal of Operational Research}, volume = 94, number = 1, pages = {167--178}, year = 1996, abstract = {In this paper, a duality framework is discussed for the problem of optimizing a nonconvex quadratic function over an ellipsoid. Additional insight is obtained from the observation that this nonconvex problem is in a sense equivalent to a convex problem of the same type, from which known necessary and sufficient conditions for optimality readily follow. Based on the duality results, some existing solution procedures are interpreted as in fact solving the dual. The duality relations are also shown to provide a natural framework for sensitivity analysis.}, summary = {A duality framework for the problem of optimizing a non-convex quadratic function over an ellipsoid is described. Additional insight is obtained by observing that this non-convex problem is in a sense equivalent to a convex problem of the same type, from which known necessary and sufficient conditions for optimality readily follow. Based on the duality results, some existing solution procedures are interpreted as in fact solving the dual. The duality relations also provide a natural framework for sensitivity analysis.}} @article{Font90, author = {R. Fontecilla}, title = {Inexact Secant Methods for Nonlinear Constrained Optimization}, journal = SINUM, volume = 27, number = 1, pages = {154--165}, year = 1990} @article{FontSteiTapi87, author = {R. Fontecilla and T. Steihaug and R. A. Tapia}, title = {A convergence theory for a class of quasi-{N}ewton methods for constrained optimization}, journal = SINUM, volume = 24, number = 5, pages = {1133--1151}, year = 1987} @book{FortGlow82, author = {M. Fortin and R. Glowinski}, title = {M\'ethodes de {L}agrangien Augment\'e}, publisher = {Dunod}, address = {Paris, France}, number = 9, series = {M\'{e}thodes math\'{e}matiques de l'informatique}, year = 1982} @phdthesis{Fort00, author = {Ch. Fortin}, title = {A Survey of the Trust-Region Subproblem within a Semidefinite Framework}, school = {University of Waterloo}, address = {Waterloo, Ontario, Canada}, year = 2000, abstract ={Trust region subproblems arise within a class of unconstrained methods called trust region methods. The subproblems consist of minimizing a quadratic function subject to a norm constraint. This thesis is a survey of different methods developed to find an approximate solution to the subproblem. We study the well-known method of \citebb{MoreSore83} and two recent methods for large sparse problems: the so-called Lanczos method of \citebb{GoulLuciRomaToin99} and the \citebb{RendWolk97} algorithm. The common ground to explore these methods will be semi-definite programming. This approach has been used by \citebb{RendWolk97} to explain their method and the Mor\'{e}-Sorensen algorithm; we extend this work to the Lanczos method. The last chapter of this thesis is dedicated to some improvements done to the Rendl-Wolkowicz algorithm and the comparisons between the Lanczos method and the Rendl and Wolkowicz algorithm. In particular, we show some weakness of the Lanczos method and show that the Rendl-Wolkowicz algorithm is more robust.}, summary = {A survey of different methods developed to find an approximate solution to the trust-region subproblem is presented. The well-known method of \citebb{MoreSore83} and two recent methods for large sparse problems, namely the so-called Lanczos method of \citebb{GoulLuciRomaToin99} and the \citebb{RendWolk97} algorithm, are studied. The common ground to explore these methods is semi-definite programming. This approach has been used by \citebb{RendWolk97} to explain their method and the Mor\'{e}-Sorensen algorithm; this work is extended to the Lanczos method. Some improvements to the Rendl-Wolkowicz algorithm are also described and the Lanczos method and the Rendl and Wolkowicz algorithm compared. Some weakness of the Lanczos method is discussed and the Rendl-Wolkowicz algorithm is argued to be more robust.}} @article{FourMehr93, author = {R. Fourer and S. Mehrotra}, title = {Solving symmetrical indefinite systems for an interior-point method for linear programming}, journal = MPA, volume = 62, number = 1, pages = {15--39}, year = 1993} @article{FoxHallSchr78, author = {P. A. Fox and A. D. Hall and N. L. Schryer}, title = {The {PORT} mathematical subroutine library}, journal = TOMS, volume = 4, number = 2, pages = {104--126}, year = 1978} @techreport{Fral89, author = {C. Fraley}, title = {Software Performance on Nonlinear Least-Squares Problems}, institution = STANFORD, address = STANFORD-ADDRESS, number = {CS-TR-89-1244}, year = 1989, abstract = {This paper presents numerical results for a large and varied set of problems using software that is widely available and has undergone extensive testing. The algorithms implemented in this software include Newton-based linesearch and trust-region methods for unconstrained optimization, as well as Gauss-Newton, Levenberg-Marquardt, and special quasi-Newton methods for nonlinear least squares. Rather than give a critical assessment of the software itself, our original purpose was to use the best available software to compare the underlying algorithms, to identify classes of problems for each method on which the performance is either very good or very poor and to provide benchmarks for future work in nonlinear least squares and unconstrained optimization. The variability in the results made it impossible to meet either of the first two goals; however the results are significant as a step toward explaining why these aims are so difficult to accomplish.}, summary = {Numerical results are presented for a large set of problems using software that is widely available and has undergone extensive testing. The algorithms implemented include Newton-based linesearch and trust-region methods for unconstrained optimization, as well as Gauss-Newton, Levenberg-Morrison-Marquardt, and special quasi-Newton methods for nonlinear least-squares. Rather than give a critical assessment of the software itself, the original intention was to use the best available software to compare the underlying algorithms, to identify classes of problems for each method on which the performance is either very good or very poor and to provide benchmarks for future work in nonlinear least-squares and unconstrained optimization. The variability in the results makes it impossible to meet either of the first two goals; however the results are significant as a step toward explaining why these aims are so difficult to accomplish.}} @article{FranWolf56, author = {M. Frank and P. Wolfe}, title = {An algorithm for quadratic programming}, journal = {Naval Research Logistics Quarterly}, volume = 3, pages = {95--110}, year = 1956} @article{Freu91, author = {R. M. Freund}, title = {Theoretical efficiency of a shifted-barrier-function algorithm for linear programming}, journal = LAA, volume = 152, pages = {19--41}, year = 1991} @article{FrieMart94, author = {A. Friedlander and J. M. Mart\'{\i}nez}, title = {On the Maximization of a Concave Quadratic Function with Box Constraints}, journal = SIOPT, volume = 4, number = 1, pages = {177--192}, year = 1994} @article{FrieMartRayd95, author = {A. Friedlander and J. M. Mart\'{\i}nez and M. Raydan}, title = {A new method for large-scale box constrained convex quadratic minimization problems}, journal = OMS, volume = 5, number = 1, pages = {57--74}, year = 1995} @article{FrieMartSant94, author = {A. Friedlander and J. M. Mart\'{\i}nez and S. A. Santos}, title = {A new Trust Region Algorithm for Bound Constrained Minimization}, journal = {Applied Mathematics and Optimization}, volume = 30, number = 3, pages = {235--266}, year = 1994, abstract = {A new method for maximizing a concave quadratic function with bounds on the variables is described. The new algorithm combines conjugate gradients with gradient projection techniques, as the algorithm of \citebb{MoreTora91} and other well-known method do. A new strategy for the decision of leaving the current face is introduced that makes it possible to obtain finite convergence even for a singular Hessian and in the presence of dual degeneracy. Numerical experiments are presented.}, summary = {A method for maximizing a concave quadratic function with bounds on the variables is described, which combines conjugate gradients with gradient projection techniques, as in \citebb{MoreTora91}. A strategy for the decision of leaving the current face is introduced that ensures finite convergence even for a singular Hessian and in the presence of dual degeneracy. Numerical experiments are presented.}} @article{FrieMartSant94b, author = {A. Friedlander and J. M. Mart\'{\i}nez and S. A. Santos}, title = {On the resolution of linearly constrained convex minimization problems}, journal = SIOPT, volume = 4, number = 2, pages = {331--339}, year = 1994, abstract = {The problem of minimizing a twice continuously differentiable convex function $f$ is considered, subject to $Ax=b$, $x \geq 0$, where $A \in \Re^{m \times n}$, $m$, $n$ are large and the feasible region is bounded. It is proven that this problem is equivalent to a ``primal-dual'' box-constrained problem with $2n+m$ variables. This problem is solved using an algorithm for bound constrained minimization that can deal with many variables. Numerical experiments are presented.}, summary = {The problem of minimizing a twice continuously differentiable convex function $f$ is considered, subject to $Ax=b$, $x \geq 0$, where $A \in \Re^{m \times n}$, $m$, $n$ are large and the feasible region is bounded. It is proven that this problem is equivalent to a ``primal-dual'' box-constrained problem with $2n+m$ variables. This problem is solved using an algorithm for bound constrained minimization that can deal with many variables. Numerical experiments are presented.}} @techreport{Fris54, author = {K. R. Frisch}, title = {Principles of Linear Programming---With Particular Reference to the Double Gradient Form of the Logarithmic Potential Method}, institution = {University Institute for Economics}, address = {Oslo}, type = {Memorandum of October 18}, year = 1954} @techreport{Fris55, author = {K. R. Frisch}, title = {The Logarithmic Potential Function for Convex Programming}, institution = {University Institute for Economics}, address = {Oslo}, type = {Memorandum of May 13}, year = 1955} @mastersthesis{Fugg96, author = {P. Fugger}, title = {Trust region subproblems and comparison with quasi-{N}ewton methods}, school = {Technical University of Graz}, address = {Graz, Austria}, year = 1996, abstract = {In the field of unconstrained nonlinear optimization Quasi-Newton methods (e.g. the DFP and BFGS formulae) are well-known and extensively studied in the literature. Another approach to the minimization of nonlinear problems is done by the so-called restricted step or trust region methods. One possibility for solving the Trust Region subproblem (TRS) is to apply a parametric eigenvalue problem (Rendl's method). The choice of the parameter of Rendl's method is decisive for the number of iterations needed to obtain a minimizer. The computational amount of the method is compared with the costs of the DFP algorithm}, summary = {One possibility for solving the trust-region subproblem is to apply a parametric eigenvalue problem (Rendl's method). The choice of the parameter of Rendl's method is decisive for the number of iterations needed to obtain a minimizer. The computational efficiency of this method is compared with that of the DFP algorithm}} @article{FujiKojiNaka97, author = {K. Fujisawa and M. Kojima and K. Nakata}, title = {Exploiting sparsity in primal-dual interior-point methods for semidefinite programming}, journal = MPB, volume = 79, number = {1--3}, pages = {235--253}, year = 1997} @article{Fuku86b, author = {M. Fukushima}, title = {A successive quadratic-programming algorithm with global and superlinear convergence properties}, journal = MP, volume = 35, number = 3, pages = {253--264}, year = 1986} @article{Fuku92, author = {M. Fukushima}, title = {Equivalent differentiable optimization problems and descent methods for asymmetric variational inequality problems}, journal = MPA, volume = 53, number = 1, pages = {99--110}, year = 1992} @inproceedings{Fuku96, author = {M. Fukushima}, title = {Merit functions for variational inequality and complementarity problems}, crossref = {DiPiGian96}, pages = {155--170}} @article{FukuHaddvanStroSugiYama96, author = {M. Fukushima and M. Haddou and Nguyen, H. v. and J. J. Strodiot and T. Sugimoto and E. Yamakawa}, title = {A parallel descent algorithm for convex programming}, journal = COAP, volume = 5, number = 1, pages = {5--37}, year = 1996, abstract = {In this paper, we propose a parallel decomposition algorithm for solving a class of convex optimization problems, which is broad enough to contain ordinary convex programming problems with a strongly convex objective function. The algorithm is a variant of the trust region method applied to the Fenchel dual of the given problem. We prove global convergence of the algorithm and report some computational experience with the proposed algorithm on the Connection Machine Model CM-5.}, summary = {A parallel decomposition algorithm for solving a class of convex optimization problem is proposed that contains convex programming problems with a strongly convex objective function. The algorithm is a variant of the trust-region method applied to the Fenchel dual of the problem. We prove global convergence and report computational experience on the Connection Machine Model CM-5.}} @article{FukuYama86, author = {M. Fukushima and Y. Yamamoto}, title = {A second-order algorithm for continuous-time nonlinear optimal control problems}, journal = {IEEE Transactions on Automatic Control}, volume = {AC-31}, number = 7, pages = {673--676}, year = 1986, abstract = {A second-order algorithm is presented for the solution of continuous-time nonlinear optimal control problems. The algorithm is an adaptation of the trust region modifications of Newton's method and solves at each iteration a linear-quadratic control problem with an additional constraint. Under some assumptions, the proposed algorithm is shown to possess a global convergence property. A numerical example illustrates the method.}, summary = {A second-order algorithm is presented for the solution of continuous-time nonlinear optimal control problems. The algorithm is a trust-region variant of Newton's method and solves at each iteration a linear-quadratic control problem with an additional constraint. The algorithm is globally convergent. A numerical example illustrates the method.}} @techreport{FuLuoYe96, author = {M. Fu and Z. Q. Luo and Y. Ye}, title = {Approximation algorithms for quadratic programming}, institution = {Department of Management Science, University of Iowa}, year = 1996, abstract = {We consider the problem of approximating the global minimum of a general quadratic program (QP) with $n$ variables subject to $m$ ellipsoidal constraints. For $m=1$, we rigorously show that and $\epsilon$-minimizer, where error $\epsilon \in (0,1)$, can be obtained in polynomial time, meaning that the number of arithmetic operations is a polynomial in $n$, $m$ and $\log(1/\epsilon)$. For $m \geq 2$, we present a polynomial-time $(1- \frac{1}{m^2})$-approximation algorithm as well as a semidefinite programming relaxation for this problem. In addition, we present approximation algorithms for solving QP under the box constraints and the assignment polytope constraints.}, summary = {The problem of approximating the global minimum of a general quadratic program (QP) with $n$ variables subject to $m$ ellipsoidal constraints is considered. For $m=1$, it is shown that and $\epsilon$-minimizer, where error $\epsilon \in (0,1)$, can be obtained in polynomial time, meaning that the number of arithmetic operations is a polynomial in $n$, $m$ and $\log(1/\epsilon)$. For $m \geq 2$, a polynomial-time $(1- \frac{1}{m^2})$-approximation algorithm is presented, as well as a semidefinite programming relaxation for this problem. In addition, approximation algorithms for solving QP under the box constraints and the assignment polytope constraints are given.}} @article{Fure93, author = {B. P. Furey}, title = {A sequential quadratic programming-based algorithm for optimization of gas networks}, journal = {Automatica}, volume = 29, number = 6, pages = {1439-1450}, year = 1993, abstract = {British Gas uses a complex, heavily looped network of pipes and controllable units (compressors and regulators) to transmit gas from coastal supply terminals to regional demand points. Computer algorithms are required for efficient management of the system. This paper describes an algorithm for optimal control over periods of up to a day. The problem is large scale and highly nonlinear in both objective function and constraints. The method is based on sequential quadratic programming and takes account of the structure of the pipeflow equations by means of a reduced gradient technique which eliminates most of the variables from the quadratic subproblems. The latter involve only simple bound constraints, which are handled efficiently by a conjugate gradient-active set algorithm. Trust region techniques permit use of the exact Hessian, preserving sparsity. More general constraints are handled at an outer level by a truncated augmented Lagrangian method. Results are included for some realistic problems. The algorithm is generally applicable to problems with a control structure.}, summary = {An algorithm for optimal control of the British Gas network of pipes and controllable units over periods of up to a day is described. The problem is large-scale and highly nonlinear in both objective function and constraints. The method is based on sequential quadratic programming and takes account of the structure of the pipeflow equations by means of a reduced gradient technique. which eliminates most of the variables from the quadratic subproblems. The latter involve only simple bound constraints, which are handled efficiently by a conjugate gradient-active set algorithm. Trust-region techniques permit use of the exact Hessian, preserving sparsity. More general constraints are handled at an outer level by a truncated augmented Lagrangian method. Results are included for some realistic problems.}} %%% G %%% @techreport{GabrMore95, author = {S. A. Gabriel and J. J. Mor\'{e}}, title = {Smoothing of mixed complementarity problems}, institution = ANL, address = ANL-ADDRESS, number = {MCS-P541-00995}, year = 1995} @inproceedings{GabrPang94, author = {S. A. Gabriel and J. S. Pang}, title = {A trust region method for constrained nonsmooth equations}, crossref = {HageHearPard94}, pages = {155--181}, abstract = {In this paper, we develop and analyze the convergence of a fairly general trust region method for solving a system of nonsmooth equations subject to some linear constraints. The method is based on the existence of an iteration function for the nonsmooth equations and involves the solution of a sequence of subproblems defined by this function. A particular realization of the method leads to an arbitrary-norm trust region method. Applications of the latter method to the nonlinear complementarity and related problems are discussed. Sequential convergence of the method and its rate of convergence are established under certain regularity conditions similar to those used in the NE/SQP method and its generalization. Some computational results are reported.}, summary = {The convergence of a trust-region method for solving a system of non-smooth equations subject to linear constraints is considered. The method is based on the existence of an iteration function for the non-smooth equations and involves the solution of a sequence of subproblems defined by this function. A particular realization of the method leads to an arbitrary-norm trust-region method. Applications of the latter method to the nonlinear complementarity and related problems are discussed. Global convergence of the method and its rate of convergence are established under certain regularity conditions similar to those used in the NE/SQP method and its generalization. Computational results are reported.}} @phdthesis{Gand78, author = {W. Gander}, title = {On the linear least squares problem with a quadratic constraint}, school = {Computer Science Department, Stanford University}, address = {Stanford, California, USA}, number = {STAN-CS-78-697}, year = 1978} @article{Gand81, author = {W. Gander}, title = {Least squares with a quadratic constraint}, journal = NUMMATH, volume = 36, pages = {291--307}, year = 1981, abstract = {We present the theory of the linear least squares problem with a quadratic constraint. New theorems characterizing properties of the solutions are given. A numerical application is discussed.}, summary = {Properties of the solutions of the linear least-squares problem with a quadratic constraint are given and a numerical applcation discussed. The paper summarizes \citebb{Gand78}}} @inproceedings{Gao98, author = {L. Gao}, title = {Using {H}uber Method to solve $L_1$-norm problem}, crossref = {Yuan98}, pages = {263--272}, abstract = {The nondifferentiable $L_1$ function is approximated by the Huber function, such that the original $L_1$ estimation problem is transformed to a sequence of unconstrained minimization problems. An algorithm is consider for the Huber problem. Numerical experiments are reported and comparisons within different methods are made}, summary = {The non-differentiable $\ell_1$ estimation problem is replaced by a sequence of smooth minimization problems using the Huber function to approximate the absolute value. An algorithm is proposed that uses a trust-region method to solve each of the subproblems. Numerical comparisons are made with competing approaches.}} @book{GareJohn79, author = {M. R. Garey and D. S. Johnson}, title = {Computers and Intractibility}, publisher = FREEMAN, address = FREEMAN-ADDRESS, year = 1979} @article{Garf90, author = {E. Garfield}, title = {The most cited papers of all time, {SCI} 1945--1988. {P}art 3. {A}nother 100 from the Citation Classics Hall of Fame}, journal = {Current Contents}, volume = 34, pages = {3--13}, year = 1990} @article{Gay81, author = {D. M. Gay}, title = {Computing optimal locally constrained steps}, journal = SISSC, volume = 2, pages = {186--197}, year = 1981, abstract = {In seeking to solve an unconstrained minimization problem, one often computes steps based on a quadratic approximation $q$ to the objective function. A reasonable way to choose such steps is by minimizing $q$ constrained to a neighbourhood of the current iterate. This paper considers ellipsoidal neighbourhoods and presents a new way to handle certain computational details when the Hessian of $q$ is indefinite, paying particular attention to a special case which may then arise. The proposed step computing algorithm provides an attractive way to deal with negative curvature. Implementations of this algorithm have proved very satisfactory in the nonlinear least-squares solver NL2SOL.}, summary = {The solution of the trust-region subproblem with ellipsoidal norms is considered and a way to handle certain computational details when the Hessian of $q$ is indefinite is presented, paying particular attention to the hard case. The proposed step computing algorithm provides an attractive way to deal with negative curvature. Implementations of the algorithm have proved very satisfactory in the nonlinear least-squares solver NL2SOL.}} @techreport{Gay82, author = {D. M. Gay}, title = {On the convergence in Model/Trust-Region Algorithms for Unconstrained Optimization}, institution = BELLLABS, address = BELLLABS-ADDRESS, type = {Computing Science Technical Report}, number = 104, year = 1982, abstract = {This paper discusses convergence tests in the context of model/trust-region algorithms for solving unconstrained optimization problems. It presents a general theorem that supports a diagnostic test for possible convergence to a minimizer at which the Hessian of the objective function is singular. (This is an event with which one must be prepared to deal in problems that arise from fitting a mathematical model to data.) Given just continuity of the objective function's gradient, the theorem assures that either the ``singular convergence'' test is satisfied infinitely often for any positive convergence tolerance, or else the lengths of the steps taken tend to zero; moreover, if the model Hessians are locally bounded, then any limit point of the iterates is a critical point. One can use this information in a suite of convergence tests that in my opinion involve easily understood tolerances and provide helpful diagnostics.}, summary = {Convergence tests in the context of model/trust-region algorithms for solving unconstrained optimization problems are discussed. A general theorem that supports a diagnostic test for possible convergence to a minimizer at which the Hessian of the objective function is singular is given. If the gradient of the objective function is continuous, the theorem assures that either the ``singular convergence'' test is satisfied infinitely often for any positive convergence tolerance, or else the lengths of the steps taken tend to zero; moreover, if the model Hessians are locally bounded, then any limit point of the iterates is a critical point. This information can be used in a suite of convergence tests that involve easily understood tolerances and which provide helpful diagnostics.}} @article{Gay83b, author = {D. M. Gay}, title = {Algorithm 611: subroutines for unconstrained minimization using a model/trust-region approach}, journal = TOMS, volume = 9, number = 4, pages = {503--524}, year = 1983, summary = {A set of subroutines for the minimization of a smooth function are provided. These codes work with exact or finite-difference gradients, and exact or secant approximations to Hessians, using the reverse-communication paradigm. An approximation to the trust-region subproblem is computed using the double-dogleg technique of \citebb{DennMei79}.}} @inproceedings{Gay84, author = {D. M. Gay}, title = {A trust region approach to linearly constrained optimization}, booktitle = {Numerical Analysis: Proceedings Dundee 1983}, editor = {D. F. Griffiths}, publisher = SPRINGER, address = SPRINGER-ADDRESS, pages = {72--105}, year = 1984, note = {Lecture Notes in Mathematics 1066}, abstract = {This paper suggests a class of trust-region algorithms for solving linearly constrained optimization problems. The algorithms use a ``local'' active-set strategy to select the steps they try. This strategy is such that degeneracy and zero Lagrange multipliers do not slow convergence (to a first-order stationary point) and that no anti-zigzagging precautions are necessary. (Unfortunately, when there are zero Lagrange multipliers, convergence to a point failing to satisfy second-order necessary conditions remains possible.) We discuss specialization of the algorithms to the case of simple bounds on the variables and report preliminary computational experience.}, summary = {A class of trust-region algorithms for solving linearly constrained optimization problems is suggested. The algorithms use a ``local'' active-set strategy to select their steps. This strategy is such that degeneracy and zero Lagrange multipliers do not slow convergence (to a first-order stationary point) and that no anti-zigzagging precautions are necessary---when there are zero Lagrange multipliers, convergence to a point failing to satisfy second-order necessary conditions remains possible. Specialization of the algorithms to the case of simple bounds on the variables are discussed, and preliminary computational experience reported.}} @inproceedings{GayOverWrig98, author = {D. M. Gay and M. L. Overton and M. H. Wright}, title = {A Primal-Dual Interior Method for Nonconvex Nonlinear Programming}, crossref = {Yuan98}, pages = {31--56}} @article{GermToin99, author = {M. Germain and Ph. L. Toint}, title = {An Iterative Process for International Negotiations on Acid Rain in Northern Europe Using a General Convex Formulations}, journal = {Environmental and Resource Economics}, note = {To appear.}, year = 1999, abstract = {This paper proposes a game theoretical approach of international negotiations on transboundary pollution. This approach is distinguished by a discrete time formulation and by a suitable formulation of the local information assumption on cost and damage functions: at each stage of the negotiation, the parties assign the best possible cooperative state, given the available information, as an objective for the next stage. It is shown that the resulting sequences of states converges from a non-cooperative situation to an international optimum in a finite number of stages. Furthermore, a financial transfer structure is also presented that makes the desired sequence of states individually rational and strategically stable. The concepts are applied in a numerical simulation of the $SO_2$ transboundary pollution problem related to acid rain in Northern Europe.}, summary = {A game theoretical approach of international negotiations on transboundary pollution is proposed, that uses a discrete time formulation. The resulting sequences of states is shown to converge from a non-cooperative situation to an international optimum in a finite number of stages. A financial transfer structure is also presented that makes the desired sequence of states individually rational and strategically stable. The concepts are applied in a numerical simulation of the $SO_2$ transboundary pollution problem related to acid rain in Northern Europe, using a trust-region method to calculate the optimum at each negotiation stage.}} @article{GeorLiu79, author = {A. George and J. W. H. Liu}, title = {The design of a user interface for a sparse matrix package}, journal = TOMS, volume = 5, number = 2, pages = {139--162}, year = 1979} @book{GeorLiu81, author = {A. George and J. W. H. Liu}, title = {Computer Solution of Large Sparse Positive Definite Systems}, publisher = PH, address = PH-ADDRESS, year = 1981} @phdthesis{Gert99, author = {E. M. Gertz}, title = {Combination Trust-Region Line-Search Methods for Unconstrained Optimization}, school = {Department of Mathematics, University of California}, address = {San Diego, California, USA}, year = 1999, abstract = {Many important problems may be expressed in terms of nonlinear multivariate unconstrained optimization. The basic unconstrained optimization problem is to minimize a real-valued function $f(x)$ over all vectors $x \in \Re^n$. Many techniques for solving these types of problems are available if $f$ is twice continuously differentiable. Two broad classes of algorithms for the unconstrained minimization problem are trst-region algorithms and line-search algorithms. These two classes may be combined by performing a line search in the direction proposed by the solution to the trust-region subproblem. We develop three combination methods which require that a sufficient decrease condition is met at each step. The first of the new algorithms uses a backtracking line search based on the Armijo condition. In all these algorithms the line search is used to control the trust-region radius. We present strong first and second order convergence theorems for these new methods, an analysis of their asymptotic convergence properties and the results of numerical experiments using the new algorithms. It is possible to use the Wolfe-condition based algorithms to define quasi-Newton methods which use the BFGS update. The quasi-Newton methods are robust and efficient.}, summary = {Three unconstrained minimization methods are presented that combine trust-region and linesearch techniques, in the sense that a linesearch is performed in the direction obtained from the solution of the trust-region subproblem. The linesearch is also used to control the trust-region radius. Strong global convergence to first- and second-order points is proved and the asymptotic convergence properties of the algorithms analyzed. Numerical results are presented, that include a quasi-Newton BFGS variant of the algorithms.}} @article{Gilb91, author = {J. Ch. Gilbert}, title = {Maintaining the positive definiteness of the matrices in reduced secant methods for equality constrained optimization}, journal = MP, volume = 50, number = 1, pages = {1--28}, year = 1991} @article{GillGoulMurrSaunWrig84, author = {P. E. Gill and N. I. M. Gould and W. Murray and M. A. Saunders and M. H. Wright}, title = {A weighted {G}ram-{S}chmidt method for convex quadratic programming}, journal = MP, volume = 30, number = 2, pages = {176--195}, year = 1984} @article{GillMurr74, author = {P. E. Gill and W. Murray}, title = {{N}ewton-type methods for unconstrained and linearly constrained optimization}, journal = MP, volume = 7, number = 3, pages = {311--350}, year = 1974} @techreport{GillMurr76, author = {P. E. Gill and W. Murray}, title = {Minimization subject to bounds on the variables}, institution = NPL, address = NPL-ADDRESS, type = {NPL Report}, number = {NAC 72}, year = 1976} @article{GillMurr78, author = {P. E. Gill and W. Murray}, title = {Numerically stable methods for quadratic programming}, journal = MP, volume = 14, number = 3, pages = {349--372}, year = 1978} @article{GillMurrPoncSaun92, author = {P. E. Gill and W. Murray and D. B. Poncele\'{o}n and M. A. Saunders}, title = {Preconditioners for indefinite systems arising in optimization}, journal = SIMAA, volume = 13, number = 1, pages = {292--311}, year = 1992} @article{GillMurrSaunWrig83, author = {P. E. Gill and W. Murray and M. A. Saunders and M. H. Wright}, title = {Computing forward-difference intervals for numerical optimization}, journal = SISSC, volume = 4, number = 2, pages = {310-321}, year = 1983} @article{GillMurrSaunWrig85, author = {P. E. Gill and W. Murray and M. A. Saunders and M. H. Wright}, title = {Some issues in implementing a sequential quadratic programming algorithm}, journal = {SIGNUM Newsletter}, volume = 20, number = 2, pages = {13--19}, year = 1985} @article{GillMurrSaunStewWrig85, author = {P. E. Gill and W. Murray and M. A. Saunders and G. W. Stewart and M. H. Wright}, title = {Properties of a representation of a basis for the null space}, journal = MP, volume = 33, number = 2, pages = {172--186}, year = 1985} @article{GillMurrSaunTomlWrig86, author = {P. E. Gill and W. Murray and M. A. Saunders and J. A. Tomlin and M. H. Wright}, title = {On projected {N}ewton barrier methods for linear programming and an equivalence to {K}armarkar's projective method}, journal = MP, volume = 36, number = 2, pages = {183--209}, year = 1986} @techreport{GillMurrSaunWrig88, author = {P. E. Gill and W. Murray and M. A. Saunders and M. H. Wright}, title = {Shifted barrier methods for linear programming}, institution = STANFORD, address = STANFORD-ADDRESS, type = {Technical Report}, number = {SOL88-9}, year = 1988} @inproceedings{GillMurrSaunWrig90, author = {P. E. Gill and W. Murray and M. A. Saunders and M. H. Wright}, title = {A {S}chur-complement method for sparse quadratic programming}, crossref = {CoxHamm90}, pages = {113--138}} @article{GillMurrSaunWrig91, author = {P. E. Gill and W. Murray and M. A. Saunders and M. H. Wright}, title = {Inertia-controlling methods for general quadratic programming}, journal = SIREV, volume = 33, number = 1, pages = {1--36}, year = 1991} @inproceedings{GillMurrSaunWrig92, author = {P. E. Gill and W. Murray and M. A. Saunders and M. H. Wright}, title = {Some theoretical properties of an augmented {L}agrangian merit function}, booktitle = {Advances in Optimizations and Parallel Computing}, editor = {P. M. Pardalos}, publisher = {Elsevier}, address = {Amsterdam}, pages = {127--143}, year = 1992} @book{GillMurrWrig81, author = {P. E. Gill and W. Murray and M. H. Wright}, title = {Practical Optimization}, publisher = AP, address = AP-ADDRESS, year = 1981} @article{GladPola79, author = {T. Glad and E. Polak}, title = {A multiplier method with automatic limitation of penalty growth}, journal = MP, volume = 17, number = 2, pages = {140--155}, year = 1979} @article{Goem97, author = {M. X. Goemans}, title = {Semidefinite programming in combinatorial optimization}, journal = MPB, volume = 79, number = {1--3}, pages = {143--161}, year = 1997} @article{Gold70, author = {D. Goldfarb}, title = {A Family of Variable Metric Methods Derived by Variational Means}, journal = MC, volume = 24, pages = {23--26}, year = 1970} @techreport{Gold80, author = {D. Goldfarb}, title = {The Use of Negative Curvature in Minimization Algorithms}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR80-412}, year = 1980, abstract = {In this paper we examine existing algorithms for minimizing a nonlinear function of many variables which make use of negative curvature. These algorithms can all be viewed as modified versions of Newton's method and their merits and drawbacks are discussed to help identify new and more promising methods. The algorithms considered include ones which compute and search along nonascent directions of negative curvature and ones which search along curvi-linear paths generated by these directions and descent directions. Versions of the \citebb{GoldQuanTrot66}, or equivalently, methods based upon a trust region strategy, and gradient path methods are also considered. When combined with the numerically stable \citebb{BuncParl71} factorization of a symmetric indefinite matrix the latter two approaches give rise to new, and what appears to be, efficient and robust minimization methods which can take advantage of negative curvature when it is encountered. Several suggestions are made for further research in this area.}, summary = {Algorithms for minimizing a nonlinear function of many variables which make use of negative curvature are examined. These algorithms can all be viewed as modified versions of Newton's method and their merits and drawbacks are discussed to help identify new and more promising methods. The algorithms considered include ones which compute and search along non-ascent directions of negative curvature and ones which search along curvilinear paths generated by these directions and descent directions. Versions of the \citebb{GoldQuanTrot66}, or equivalently, methods based upon a trust-region strategy, and gradient path methods are also considered. When combined with the numerically stable \citebb{BuncParl71} factorization of a symmetric indefinite matrix the latter two approaches give rise to efficient and robust minimization methods which can take advantage of negative curvature.}} @article{GoldLiuWang91, author = {D. Goldfarb and S. C. Liu and S. Wang}, title = {A Logarithmic Barrier Function Algorithm for Quadratically Constrained Convex Quadratic Programs}, journal = SIOPT, volume = 1, number = 2, pages = {252--267}, year = 1991} @article{GoldWang93, author = {D. Goldfarb and S. Wang}, title = {Partial-update {N}ewton methods for unary, factorable and partially separable optimization}, journal = SIOPT, volume = 3, number = 2, pages = {383--397}, year = 1993} @article{GoldIdna83, author = {D. Goldfarb and A. Idnani}, title = {A numerically stable dual method for solving strictly convex quadratic programs}, journal = MP, volume = 27, number = 1, pages = {1--33}, year = 1983} @article{GoldLiu93, author = {D. Goldfarb and S. C. Liu}, title = {An $O(n^3 {L})$ primal dual potential reduction algorithm for solving convex quadratic programs}, journal = MP, volume = 61, number = 2, pages = {161--170}, year = 1993} @article{GoldToin84, author = {D. Goldfarb and Ph. L. Toint}, title = {Optimal Estimation of {J}acobian and {H}essian Matrices That Arise in Finite Difference Calculations}, journal = MC, volume = 43, number = 167, pages = {69--88}, year = 1984} @article{GoldQuanTrot66, author = {S. M. Goldfeldt and R. E. Quandt and H. F. Trotter}, title = {Maximization by quadratic hill-climbing}, journal = {Econometrica}, volume = 34, pages = {541--551}, year = 1966, abstract = {The purpose of this paper is to describe a new gradient method for maximizing general functions. After a brief discussion of various known gradient methods the mathematical foundation is laid for the new algorithm which rests on maximizing a quadratic approximation to the function on a suitably chosen spherical region. The method requires no assumptions about the concavity of the function to be maximized and automatically modifies the step size in the light of the success of the quadratic approximation to the function. The paper further discusses some practical problems of implementing the algorithm and presents recent computational experience with it.}, summary = {A gradient method for maximizing general functions is discussed. After a brief discussion of various known gradient methods the mathematical foundation is laid for the algorithm which rests on maximizing a quadratic approximation to the function on a suitably chosen spherical region. The method requires no assumptions about the concavity of the function to be maximized and automatically modifies the step size in the light of the success of the quadratic approximation to the function. Practical problems of implementing the algorithm are discussed, and computational experience presented.}} @article{Gold64, author = {A. A. Goldstein}, title = {Convex programming in {H}ilbert space}, journal = {Bull. Amer. Math. Soc.}, volume = 70, pages = {709--710}, year = 1964} @article{GoluOLea89, author = {G. H. Golub and D. P. O'Leary}, title = {Some history of the conjugate gradient and {L}anczos methods}, journal = SIREV, volume = 31, number = 1, pages = {50--102}, year = 1989} @book{GoluvanL89, author = {G. H. Golub and Van Loan, C. F.}, title = {Matrix Computations}, publisher = {Johns Hopkins University Press}, address = {Baltimore}, edition = {second}, year = 1989} @article{GoluvonM91, author = {G. H. Golub and U. von Matt}, title = {Quadratically constrained least squares and quadratic problems}, journal = NUMMATH, volume = 59, pages = {561--580}, year = 1991} @article{GomeMaciMart99, author = {F. A. M. Gomes and M. C. Maciel and J. M. Mart\'{\i}nez}, title = {Nonlinear Programming algorithms using trust regions and augmented {L}agrangians with nonmonotone penalty parameters}, journal = MP, volume = 84, number = 1, pages = {161--200}, year = 1999, abstract = {A model algorithm based on the successive quadratic programming method for solving the general nonlinear programming problem is presented. The objective function and the constraints of the problem are only required to be differentiable and their gradients to satisfy a Lipschitz condition. The strategy for obtaining global convergence is based on the trust region approach. The merit function is a type of augmented Lagrangian. A new updating scheme is introduced for the penalty parameter, by means of which monotone increase is not necessary. Global convergence results are proved and numerical experiments are presented.}, summary = {An algorithm based on the SQP method for solving the general nonlinear programming problem is presented. The objective function and the constraints of the problem are only required to be differentiable and their gradients to satisfy a Lipschitz condition. The strategy for obtaining global convergence is based on a trust-region. The merit function is a type of augmented Lagrangian. An updating scheme is introduced for the penalty parameter, by means of which monotone increase is not necessary. Global convergence results are proved and numerical experiments are presented.}} @article{Gonz91, author = {C. C. Gonzaga}, title = {An interior trust region method for linearly constrained optimization}, journal = {COAL Newsletter}, volume = 19, pages = {55--66}, year = 1991, summary = {The link between the ellipsoids associated with interior-point methods scaling and trust regions is exposed.}} @article{GonzTapiPotr98, author = {M. D. Gonzalez{-}Lima and R. A. Tapia and F. A. Potra}, title = {On Effectively Computing the Analytic Center of the Solution Set by Primal-Dual Interior-Point Methods}, journal = SIOPT, volume = 8, number = 1, pages = {1--25}, year = 1998} @article{GopaBieg97, author = {V. Gopal and L. T. Biegler}, title = {Nonsmooth dynamic simulation with linear programming based methods}, journal = {Computers and Chemical Engineering}, volume = 21, number = 7, pages = {675--689}, year = 1997, abstract = {Process simulation has emerged as a valuable tool for process design, analysis and operation. In this work, we extend the capabilities of iterated linear programming (LP) for dealing with problems encountered in dynamic nonsmooth process simulation. A previously developed LP method is refined with the addition of a new descent strategy which combines line search with a trust region approach. This adds more stability and efficiency to the method. The LP method has the advantage of naturally dealing with profile bounds as well. This is demonstrated to avoid the computational difficulties which arise from the iterates going into physically unrealistic regions. A new method for the treatment of discontinuities occurring in dynamic simulation problems is also presented in this paper. The method ensures that any event which has occurred within the time interval in consideration is detected and if more than one event occurs, the detected one is indeed the earliest one. A specific class of implicitly discontinuous process simulation problems, phase equilibrium calculations, is also examined. A new formulation is introduced to solve multiphase problems.}, summary = {A previously developed LP method is refined with the addition of a descent strategy which combines line search with a trust-region approach. The LP method has the advantage of naturally dealing with additional profile bounds. A method for the treatment of discontinuities occurring in dynamic simulation problems is also presented. The method ensures that any event which has occurred within the time interval in consideration is detected and if more than one event occurs, the detected one is indeed the earliest one. A specific class of implicitly discontinuous process simulation problems, phase equilibrium calculations, is also examined. A formulation is introduced to solve multiphase problems.}} @article{GopaBieg98, author = {V. Gopal and L. T. Biegler}, title = {A successive linear programming approach for initialization and reinitialization after discontinuities of differential-algebraic equations }, journal = SISC, volume = 20, number = 2, pages = {447--467}, year = 1998, abstract = {Determination of consistent initial conditions is an important aspect of the solution of differential-algebraic equations (DAEs). Specification of inconsistent initial conditions, even if they are only slightly inconsistent, often leads to a failure in the initialization problem. We present a successive linear programming (SLP) approach for the solution of the DAE derivative array equations for the initialization problem. The SLP formulation handles roundoff errors and inconsistent user specifications, among other things, and allows for reliable convergence strategies that incorporate variable bounds and trust region concepts. A new consistent set of initial conditions is obtained by minimizing the deviation of the variable values from the specified ones. For problems with discontinuities caused by a step change in the input functions, a new criterion is presented for identifying the subset of variables which are continuous across the discontinuity. The SLP formulation is then applied to determine a consistent set of initial conditions for further solution of the problem in the domain after the discontinuity. Numerous example problems are solved to illustrate these concepts.}, summary = {Determination of consistent initial conditions is an important aspect of the solution of differential-algebraic equations (DAEs). Specification of inconsistent initial conditions, even if they are only slightly inconsistent, often leads to a failure in the initialization problem. A successive linear programming (SLP) approach for the solution of the DAE derivative array equations for the initialization problem is proposed. The SLP formulation handles roundoff errors and inconsistent user specifications, among other things, and allows for reliable convergence strategies that incorporate variable bounds and trust region concepts. A new consistent set of initial conditions is obtained by minimizing the deviation of the variable values from the specified ones. For problems with discontinuities caused by a step change in the input functions, a new criterion is presented for identifying the subset of variables which are continuous across the discontinuity. The SLP formulation is then applied to determine a consistent set of initial conditions for further solution of the problem in the domain after the discontinuity. Numerous example problems are solved to illustrate these concepts.}} @article{Goul85, author = {N. I. M. Gould}, title = {On practical conditions for the existence and uniqueness of solutions to the general equality quadratic-programming problem}, journal = MP, volume = 32, number = 1, pages = {90--99}, year = 1985} @article{Goul86, author = {N. I. M. Gould}, title = {On the accurate determination of search directions for simple differentiable penalty functions}, journal = IMAJNA, volume = 6, pages = {357--372}, year = 1986} @article{Goul89, author = {N. I. M. Gould}, title = {On the convergence of a sequential penalty function method for constrained minimization}, journal = SINUM, volume = 26, number = 1, pages = {107--128}, year = 1989} @article{Goul91, author = {N. I. M. Gould}, title = {An algorithm for large-scale quadratic programming}, journal = IMAJNA, volume = 11, number = 3, pages = {299--324}, year = 1991} @article{Goul99, author = {N. I. M. Gould}, title = {On modified factorizations for large-scale linearly-constrained optimization}, journal = SIOPT, volume = 9, number = 4, pages = {1041--1063}, year = 1999} @inproceedings{Goul99b, author = {N. I. M. Gould}, title = {Iterative methods for ill-conditioned linear systems from optimization}, crossref = {DiPiGian99}, pages = {123--142}} @inproceedings{GoulLuciRomaToin98, author = {N. I. M. Gould and S. Lucidi and M. Roma and Ph. L. Toint}, title = {A linesearch algorithm with memory for unconstrained optimization}, crossref = {DeLeMurlPardTora98}, pages = {207--223}} @article{GoulLuciRomaToin99, author = {N. I. M. Gould and S. Lucidi and M. Roma and Ph. L. Toint}, title = {Solving the trust-region subproblem using the {L}anczos method}, journal = SIOPT, volume = 9, number = 2, pages = {504--525}, year = 1999, abstract = {The approximate minimization of a quadratic function within an ellipsoidal trust region is an important subproblem for many nonlinear programming methods. When the number of variables is large, the most widely-used strategy is to trace the path of conjugate gradient iterates either to convergence or until it reaches the trust-region boundary. In this paper, we investigate ways of continuing the process once the boundary has been encountered. The key is to observe that the trust-region problem within the currently generated Krylov subspace has very special structure which enables it to be solved very efficiently. We compare the new strategy with existing methods. The resulting software package is available as {\tt HSL\_VF05} within the \citebb{HSL00}.}, summary = {When the number of variables is large, the most widely-used strategy for solving the trust-region subproblem is to trace the path of conjugate gradient iterates either to convergence or until it reaches the trust-region boundary. Means of continuing the process once the boundary has been encountered are investigated. One observes that the trust-region problem within the currently generated Krylov subspace has very special structure which enables it to be solved efficiently. The proposed strategy is compared with existing methods.}} @inproceedings{GoulNoce98, author = {N. I. M. Gould and J. Nocedal}, title = {The modified absolute-value factorization norm for trust-region minimization}, crossref = {DeLeMurlPardTora98}, pages = {225--241}, abstract = {A trust-region method for unconstrained minimization, using a trust-region norm based upon a modified absolute-value factorization of the model Hessian, is proposed. It is shown that the resulting trust-region subproblem may be solved using a single factorization. In the convex case, the method reduces to a backtracking Newton linesearch procedure. The resulting software package is available as {\tt HSL\_VF06} within the \citebb{HSL00}. Numerical evidence shows that the approach is effective in the nonconvex case.}, summary = {A trust-region method for unconstrained minimization, using a trust-region norm based upon a modified absolute-value factorization of the model Hessian, is proposed. It is shown that the resulting trust-region subproblem may be solved using a single factorization. In the convex case, the method reduces to a backtracking Newton linesearch procedure. Numerical experience suggests that the approach is effective in the non-convex case.}} @techreport{GoulOrbaSartToin99, author = {N. I. M. Gould and D. Orban and A. Sartenaer and Ph. L. Toint}, title = {On the Practical Dependency of a Trust-Region Algorithm on its Parameters}, institution = FUNDP, address = FUNDP-ADDRESS, number = {(in preparation)}, year = 1999, abstract = {In this paper, it is shown through numerical tests that commonly accepted values for the parameters of a trust-region algorithm might not be the best ones. Better ranges of values for these parameters are exhibited on a statistical basis. It is also shown what improvements can be hoped for when using a band preconditioner.}, summary = {An extensive numerical study of the statistically best ranges for the trust-region algorithm parameters is described, whose conclusions differ from folklore knowledge. The impact of preconditioning on parameter choice and performance is also discussed.}} @techreport{GoulOrbaSartToin99b, author = {N. I. M. Gould and D. Orban and A. Sartenaer and Ph. L. Toint}, title = {On the Local Convergenve of a Primal-Dual Trust-Region Interior-Point Algorithm for Constrained Nonlinear Programming}, institution = FUNDP, address = FUNDP-ADDRESS, number = {(in preparation)}, year = 1999, abstract = {The local convergence properties of the trust-region interior-point method described in \citebb{ConnGoulOrbaToin99} are analyzed. It is shown that the method asymptotically require a single inner iteration per outer iteration and converges 2-step supercubically.}, summary = {The local convergence properties of the trust-region interior-point method described in \citebb{ConnGoulOrbaToin99} are analyzed. It is shown that the method asymptotically require a single inner iteration per outer iteration and converges 2-step supercubically.}} @article{GoulToin99, author = {N. I. M. Gould and Ph. L. Toint}, title = {A Note on the Second-Order Convergence of Optimization Algorithms Using Barrier Functions}, journal = MP, volume = 85, number = 2, pages = {433--438}, year = 1999} @inproceedings{GoulToin99b, author = {N. I. M. Gould and Ph. L. Toint}, title = {{SQP} methods for large-scale nonlinear programming}, booktitle = {System Modelling and Optimization, Methods, Theory and Applications}, editor = {M. J. D. Powell and S. Scholtes}, publisher = KLUWER, address = KLUWER-ADDRESS, pages = {149--178}, year = 2000, abstract = {We compare and contrast a number of recent sequential quadratic programming (SQP) methods that have been proposed for the solution of large-scale nonlinear programming problems. Both line-search and trust-region approaches are considered, as are the implications of interior-point and quadratic programming methods.}, summary = {A comparison is proposed of a number of recent SQP methods for the solution of large-scale nonlinear programming problems. Both linesearch and trust-region approaches are considered, as are the implications of interior-point and quadratic programming methods.}} @techreport{GoulHribNoce98, author = {N. I. M. Gould and M. E. Hribar and J. Nocedal}, title = {On the solution of equality constrained quadratic problems arising in optimization}, institution = RAL, address = RAL-ADDRESS, number = {RAL-TR-98-069}, year = 1998} @article{GoulToll72, author = {F. J. Gould and J. W. Tolle}, title = {Geometry of optimality conditions and constraint qualifications}, journal = MP, volume = 2, number = 1, pages = {1--18}, year = 1972} @article{GowGuoLiuLuci97, author = {A. S. Gow and X. Z. Guo and D. L. Liu and A. Lucia}, title = {Simulation of refrigerant phase equilibria}, journal = {Industrial and Engineering Chemistry Research}, volume = 36, number = 7, pages = {2841--2848}, year = 1997, abstract = {Vapor-liquid equilibria for refrigerant mixtures modeled by an equation of state are studied. Phase behavior calculated by the Soave-Redlich-Kwong (SRK) equation with a single adjustable binary interaction parameter is compared with experimental data for binary refrigerant mixtures, two with a supercritical component and one that exhibits azeotropic behavior. It is shown that the SRK equation gives an adequate description of the-phase envelope for binary refrigerant systems. The complex domain trust region methods of Lucia and co- workers (\bciteb{LuciGuoWang93}, \bciteb{LuciXu94}) are applied to fixed vapor, isothermal flash model equations, with particular attention to root finding and root assignment at the equation of state (EOS) level of the calculations, and convergence in the retrograde and azeotropic regions of the phase diagram. Rules far assigning roots to the vapor and liquid phases in the case where all roots to the EOS are complex- valued are proposed and shown to yield correct results, even in retrograde regions. Convergence of the flash model equations is also studied. It is shown that the complex domain trust region algorithms outperform Newton's method in singular regions of the phase diagram (i.e., at near azeotropic conditions and in the retrograde loop), primarily due to the eigenvalue-eigenvector decomposition strategy given in Sridhar and Lucia (1995). A variety of geometric figures are used to illustrate salient points.}, summary = {Vapor-liquid equilibria for refrigerant mixtures modeled by an equation of state are studied. Phase behaviour calculated by the Soave-Redlich-Kwong (SRK) equation with a single adjustable binary interaction parameter is compared with experimental data for binary refrigerant mixtures, two with a supercritical component and one that exhibits azeotropic behaviour. It is shown that the SRK equation gives an adequate description of the-phase envelope for binary refrigerant systems. The complex domain trust-region methods of \citebb{LuciGuoWang93} and \citebb{LuciXu94} are applied to fixed vapor, isothermal flash model equations, with particular attention to root finding and root assignment at the equation of state (EOS) level of the calculations, and convergence in the retrograde and azeotropic regions of the phase diagram. Rules for assigning roots to the vapor and liquid phases in the case where all roots to the EOS are complex- valued yield correct results, even in retrograde regions. Convergence of the flash model equations is also studied. It is shown that the complex domain trust-region algorithms outperform Newton's method in singular regions of the phase diagram. A variety of geometric figures are used to illustrate salient points.}} @article{GreeStra92, author = {A. Greenbaum and Z. Strako\v{s}}, title = {Predicting the behaviour of finite precision {L}anczos and conjugate gradient computations}, journal = SIMAA, volume = 13, number = 1, pages = {121--137}, year = 1992} @article{Gree67, author = {J. Greenstadt}, title = {On the relative efficiencies of gradient methods}, journal = MC, volume = 21, pages = {360--367}, year = 1967} @book{Gree97, author = {A. Greenbaum}, title = {Iterative Methods for Solving Linear Systems}, publisher = SIAM, address = SIAM-ADDRESS, year = 1997} @inproceedings{Grie89, author = {A. Griewank}, title = {On automatic differentiation}, crossref = {IriTana89}, pages = {83--108}} @inproceedings{Grie94, author = {A. Griewank}, title = {Computational Differentiation and Optimization}, booktitle = {Mathematical Programming: State of the Art 1994}, editor = {J. R. Birge and K. G. Murty}, publisher = {The University of Michigan}, address = {Ann Arbor, USA}, pages = {102--131}, year = 1994} @book{GrieCorl91, author = {A. Griewank and G. Corliss}, title = {Automatic Differentiation of Algorithms: Theory, Implementation and Application}, publisher = SIAM, address = SIAM-ADDRESS, year = 1991} @inproceedings{GrieToin82a, author = {A. Griewank and Ph. L. Toint}, title = {On the unconstrained optimization of partially separable functions}, crossref = {Powe82}, pages = {301--312}} @article{GrieToin82b, author = {A. Griewank and Ph. L. Toint}, title = {Partitioned variable metric updates for large structured optimization problems}, journal = NUMMATH, volume = 39, pages = {119--137}, year = 1982} @article{GrifStew61, author = {R. E. Griffith and R. A. Stewart}, title = {A nonlinear programming technique for the optimization of continuous processing systems}, journal = {Management Science}, volume = 7, pages = {379--392}, year = 1961, abstract = {A description is given of a method for solving some nonlinear programming problems. The mathematics of this method are quite simple and are easy to apply to electronic computation. A numerical example, a model construction example, and a description of a particular existing computer system are included in order to clarify the mode of operation of the method.}, summary = {A simple method for solving nonlinear programming problems is given. A numerical example, a model construction example, and a description of a particular existing computer system are included in order to clarify the mode of operation of the method.}} @article{GripLampLuci86, author = {L. Grippo and F. Lampariello and S. Lucidi}, title = {A nonmonotone line search technique for {N}ewton's method}, journal = SINUM, volume = 23, number = 4, pages = {707--716}, year = 1986} @article{GripLampLuci89, author = {L. Grippo and F. Lampariello and S. Lucidi}, title = {A Truncated {N}ewton Method with Nonmonotone Line Search for Unconstrained Optimization}, journal = JOTA, volume = 60, number = 3, pages = {401--419}, year = 1989} @article{GripLampLuci91, author = {L. Grippo and F. Lampariello and S. Lucidi}, title = {A class of nonmonotone stabilization methods in unconstrained optimization}, journal = NUMMATH, volume = 59, pages = {779--805}, year = 1991} @techreport{GrotMcKi98, author = {A. Grothey and K. McKinnon}, title = {A Superlinearly Convergent Trust Region Bundle Method}, institution = {Department of Mathematics and Statistics}, address = {Univeristy of Edinburgh, Scotland}, number = {MS 98-015}, year = 1998, abstract = {Bundle methods for the minimization of non-smooth functions have been around for almost 20 years. Numerous variations have been proposed. But until very recently they all suffered from the drawback of only linear convergence. The aim of this paper is to show how exploiting an analogy with SQP gives rise to a superlinearly convergent bundle method. Our algorithm features a trust region philosophy and is expected to converge superlinearly even for non-convex problems.}, summary = {Current bundle methods for the minimization of non-smooth functions converge at a linear rate. A superlinearly convergent bundle method, using a trust region, is proposed for nonconvex problems. Numerical experience on a power-generation problem is reported.}} @article{Gurw94, author = {C. Gurwitz}, title = {Local Convergence of a Two-Piece Update of a Projected {H}essian Matrix}, journal = SIOPT, volume = 4, number = 3, pages = {461--485}, year = 1994} @book{GruvSach80, author = {W. A. Gruver and E. W. Sachs}, title = {Algorithmic Methods in Optimal Control}, publisher = {Pitman}, address = {Boston, USA}, year = 1980} %%% H %%% @book{Hack94, author = {W. Hackbusch}, title = {Iterative Solution of Large Sparse Systems of Equations}, publisher = SPRINGER, address = SPRINGER-ADDRESS, series = {Springer Series in Applied Mathematical Sciences}, year = 1994} @article{Hage87, author = {W. W. Hager}, title = {Dual techniques for constrained optimization}, journal = JOTA, volume = 55, pages = {37--71}, year = 1987} @article{Hage99, author = {W. W. Hager}, title = {Stabilized sequential quadratic programming}, journal = COAP, volume = 12, number = {1--2}, pages = {253--273}, year = 1999} @techreport{Hage99b, author = {W. W. Hager}, title = {Minimizing a Quadratic Over a Sphere}, institution = {Mathematics Department, University of Florida}, address = {Gainesville, Florida, USA}, month = {May}, year = 1999, abstract = {A new method, the sequential subspace method (SSM), is developed for minimizing a quadratic over a sphere. In each iteration of the scheme, the quadratic is minimized over a subspace that contains the prior iterate, a sequential quadratic programming iterate, and a projected gradient. A low dimensional subspace with these properties is obtained using the transformed minimal residual algorithm (TMRES). We prove that the SSM is locally quadratically convergent. Numerical experiments indicate that the SSM requires far fewer matrix-vector operations than other recently developed algorithms.}, summary = {A sequential subspace method (SSM), is developed for minimizing a quadratic over a sphere. In each iteration, the quadratic is minimized over a subspace that contains the prior iterate, a sequential quadratic programming iterate, and a projected gradient. A low dimensional subspace with these properties is obtained using the transformed minimal residual algorithm (TMRES). The SSM is proved to be locally quadratically convergent. Numerical experiments indicate that the SSM requires few matrix-vector operations.}} @article{Han77, author = {S. P. Han}, title = {A Globally Convergent Method for Nonlinear Programming}, journal = JOTA, volume = 22, number = 3, pages = {297--309}, year = 1977} @article{HanMang79, author = {S. P. Han and O. L. Mangasarian}, title = {Exact penalty functions in nonlinear programming}, journal = MP, volume = 17, number = 3, pages = {251--269}, year = 1979} @article{HanMang83, author = {S. P. Han and O. L. Mangasarian}, title = {A dual differentiable exact penalty-function}, journal = MP, volume = 25, number = 3, pages = {293--306}, year = 1983} @article{HanPardYe92, author = {C. Han and P. Pardalos and Y. Ye}, title = {On the solution of indefinite quadratic problems using an interior point method}, journal = {Informatica}, volume = 3, pages = {474--496}, year = 1992} @techreport{HanHan99, author = {Q. Han and J. Han}, title = {Modified Quasi-{N}ewton Method with Collinear Scaling for Unconstrained Optimization}, institution = {Institute of Computational Mathematics and Scientific/Engineering Computing, Chinese Academy of Sciences}, address = {Beijing}, number = {February}, year = 1999, abstract = {It is well known that among the current methods for unconstrained optimization problems, Newton or quasi-Newton method with global strategy may be the most efficient method, which have local quadratic or superlinear convergence. However, when the iterate point is far away from the a solution of the problem, Newton or quasi-Newton method may proceed slowly for the general nonlinear objective function. In the paper, we present a modified quasi-Newton method with trust region using the collinear scaling for unconstrained optimization. Not only the gradient information but the values of the objective function are used to construct the local model at the current iteration point. Moreover, the information about the super steepest descent direction is embedded into the local model. the amount of computation in each iteration of the modified quasi-Newton method algorithm with trust region is the same as that of the standard quasi-Newton method with trust region. And some numerical results show that the modified method needs very fewer iterations to reach the solution of the optimization problem. Global and local convergence of the method is also analyzed.}, summary = {A trust-region method is proposed for unconstrained minimization, where the model is obtained by a conic quasi-Newton update. The trust region is defined not in the original space but in the space of collinearly scaled variables. Limited numerical experience illustrates the practical potential of the method.}} @article{Hank97, author = {M. Hanke}, title = {A regularizing {L}evenberg-{M}arquardt scheme, with applications to inverse groundwater filtration problems}, journal = {Inverse Problems}, volume = 13, number = 1, pages = {79--95}, year = 1997, abstract = {The first part of this paper studies a Levenberg-Marquardt scheme for nonlinear inverse problems where the corresponding Lagrange (or regularization) parameter is chosen from an inexact Newton strategy. While the convergence analysis of standard implementations based on trust region strategies always requires the invertibility of the Frechet derivative of the nonlinear operator at the exact solution, the new Levenberg-Marquardt scheme is suitable for ill-posed problems as long as the Taylor remainder is of second order in the interpolating metric between the range and domain topologies. Estimates of this type are established in the second part of the paper for ill-posed parameter identification problems arising in inverse groundwater hydrology. Both transient and steady-state data are investigated. Finally, the numerical performance of the new Levenberg-Marquardt scheme is studied and compared to a usual implementation on a realistic but synthetic two-dimensional model problem from the engineering literature.}, summary = {A Levenberg-Morrison-Marquardt scheme for nonlinear inverse problems is considered, where the corresponding Lagrange parameter is chosen from an inexact Newton strategy. This scheme is suitable for ill-posed problems as long as the Taylor remainder is of second order in the interpolating metric between the range and domain topologies. Estimates of this type are established for ill-posed parameter identification problems arising in inverse groundwater hydrology. Both transient and steady-state data are investigated. The performance of the scheme is compared to a usual implementation on a two-dimensional engineering model problem.}} @article{HansKrog92, author = {R. J. Hanson and F. T. Krogh}, title = {A Quadratic-Tensor Model Algorithm for Nonlinear Least-Squares Problems with Linear Constraints}, journal = TOMS, volume = 18, number = 2, pages = {115--133}, year = 1992, abstract = {A new algorithm is presented for solving nonlinear least-squares and nonlinear equation problems. The algorithm is based on approximating the nonlinear functions using the quadratic-tensor model proposed by Schnabel and Frank (1984). The problem statement may include simple bounds or more general linear constraints on the unknowns. The algorithm uses a trust-region defined by a box containing the current values of the unknowns. The objective function (Euclidean length of the functions) is allowed to increase at intermediate steps. These increases are allowed as long as the predictor indicates that a new set of best values exists in the trust-region. There is logic provided to retreat to the current best values, should that be required. The computations for the model-problem require a constrained nonlinear least-squares solver. This is done using a simpler version of the algorithm. In its present form the algorithm is effective for problems with linear constraints and dense Jacobian matrices. Results on standard test problems are presented in the Appendix. The new algorithm appears to be efficient in terms of function and Jacobian evaluations.}, summary = {A new algorithm is presented for solving linearly constrained nonlinear least-squares and nonlinear equation problems, based on approximating the nonlinear functions using the quadratic-tensor model. The algorithm uses a box-shaped trust-region. The objective function is allowed to increase at intermediate steps, as long as the predictor indicates that a new set of best values exists in the trust-region. There is logic provided to retreat to the current best values, if necessary. The algorithm is effective for problems with linear constraints and dense Jacobian matrices and appears to be efficient in terms of function and Jacobian evaluations.}} @article{HarkPang90, author = {P. T. Harker and J. S. Pang}, title = {Finite-dimensional variational inequality and nonlinear complementarity problems: a survey of theory, algorithms and applications}, journal = MPB, volume = 48, number = 2, pages = {161--220}, year = 1990} @article{HarkXiao90, author = {P. T. Harker and B. Xiao}, title = {{N}ewton's method for the nonlinear complementarity problem: a {B}-differentiable equation approach}, journal = MPB, volume = 48, number = 3, pages = {339--358}, year = 1990} @article{HeDiaoGao97, author = {G. He and B. Diao and Z. Gao}, title = {An {SQP} algorithm with nonmonotone line search for general nonlinear constrained optimization problem}, journal = JCM, volume = 15, number = 2, pages = {179--192}, year = 1997} @techreport{Hebd73, author = {M. D. Hebden}, title = {An Algorithm for Minimization Using Exact Second Derivatives}, institution = HARWELL, address = HARWELL-ADDRESS, number = {T.P. 515}, year = 1973, abstract = {A review of the methods currently available for the minimization of a function whose first and second derivatives can be calculated shows either that the method requires the eigensolution of the Hessian, or with one exception that a simple example can be found which causes the method to fail. In this paper one of the successful methods that requires the eigensolution is modified so that at each iteration the solution of a number (approximately two) of systems of linear equations is required, instead of the eigenvalue calculation.}, summary = {A method for the solution of the $\ell_2$ trust-region subproblem is proposed so that at each iteration the solution of a number (roughly two) of systems of linear equations is required, instead of the eigenvalue calculation.}} @article{Hein93, author = {M. Heinkenschloss}, title = {Mesh Independence for Nonlinear Least Squares Problems with Norm Constraints}, journal = SIOPT, volume = 3, number = 1, pages = {81--117}, year = 1993} @article{Hein94, author = {M. Heinkenschloss}, title = {On the solution of a two ball trust region subproblem}, journal = MP, volume = 64, number = 3, pages = {249--276}, year = 1994, abstract = {In this paper we investigate the structure of a two ball trust region subproblem arising in nonlinear parameter identification problems and propose a method for its solution. The method decomposes the subproblem and allows the application of efficient, well studied methods for the solution of the trust region subproblems arising in unconstrained optimization. In the discussion of the structure we focus on the case where both constraints are active and on the treatment of the unconstrained problem.}, summary = {The structure of a two-ball trust-region subproblem arising in nonlinear parameter identification problems is investigated, and a method for its solution is proposed. The method decomposes the subproblem, and allows the application of efficient methods for the solution of the trust-region subproblems. The discussion of the structure focuses on the case where both constraints are active and on the treatment of the unconstrained problem.}} @article{Hein98, author = {M. Heinkenschloss}, title = {A Trust-Region Method for Norm Constrained Problems}, journal = SINUM, volume = 35, number = 4, pages = {1594--1620}, year = 1998, abstract = {In this paper a trust region method for the solution of nonlinear optimization problems with norm constraints is presented and analyzed. Such problems often arise in parameter identification or nonlinear eigenvalue problems. The algorithms studied here allow for inexact gradient information and the use of subspace methods for the approximate solution of subproblems. Characterizations and the descent properties of trust region steps are given, criteria for the existence of successful iterations under inexact gradient information and under the use of subspace methods are established, and global convergence of the method is proven.}, summary = {A trust-region method for the solution of nonlinear optimization problems with norm constraints is presented and analyzed. Such problems often arise in parameter identification or nonlinear eigenvalue problems. The algorithms studied allow for inexact gradient information and the use of subspace methods for the approximate solution of subproblems. Characterizations and the descent properties of trust-region steps are given, criteria for the existence of successful iterations under inexact gradient information and under the use of subspace methods are established, and global convergence of the method is proven.}} @techreport{HeinUlbrUlbr97, author = {M. Heinkenschloss and M. Ulbrich and S. Ulbrich}, title = {Superlinear and quadratic convergence of affine-scaling interior-point {N}ewton methods for problems with simple bounds without strict complementarity}, institution = CAAM, address = RICE-ADDRESS, number = {TR97-30}, year = 1997, abstract = {A class of affine-scaling interior-point methods for bound constrained optimization problems is introduced which are locally q-superlinearly or q-quadratically convergent. It is assumed that the strong second order sufficient optimality conditions at the solution are satisfied, but strict complementarity is not required. The methods are modifications of the affine-scaling interior-point Newton methods introduced by \citebb{ColeLi94}. There are two modifications. One is a modification of the scaling matrix, the other one is the use of a projection of the step to maintain strict feasibility rather than a simple scaling of this step. A comprehensive local convergence analysis is given. A few simple examples are presented to illustrate the pitfalls of the original approach of \citeauthor{ColeLi94} in the degenerate case and to demonstrate the performance of the fast converging modifications developed in this paper.}, summary = {A class of affine-scaling interior-point methods for bound-constrained optimization problems is introduced which are locally Q-superlinearly or Q-quadratically convergent, even without assuming strict complementarity. The methods are derived from \citebb{ColeLi94} but use a different scaling matrix and a projection of the step to maintain strict feasibility. Simple examples are presented to illustrate the pitfalls of \citeauthor{ColeLi94}'s approach in the degenerate case and to demonstrate the performance of the fast converging modifications.}} @techreport{HeinVice95, author = {M. Heinkenschloss and L. N. Vicente}, title = {Analysis of Inexact Trust Region Interior-Point {SQP} Algorithms}, institution = CRPC, address = RICE-ADDRESS, type = {Technical Report}, number = {CRPC-TR95546}, year = 1995, abstract = {In this paper we analyze inexact trust-region interior-point (TRIP) sequential quadratic programming (SQP) algorithms for the solution of optimization problems with nonlinear equality constraints and simple bound constraints on some of the variables. Such problems arise in many engineering applications, in particular in optimal control problems with bounds on the control. The nonlinear constraints often come from the discretization of partial differential equations. In such cases the calculation of derivative information and the solution of the linearized equations is expensive. Often, the solution of linear systems and directional derivatives are computed inexactly yielding nonzero residuals. This paper analyzes the effect of the inexactness onto the convergence of TRIP SQP and gives practical rules to control the size of the residuals of these inexact calculations. It is shown that if the size of the residuals is of the order of both the size of the constraints and the trust region radius, then the TRIP SQP algorithms are globally first-order convergent. Numerical experiments with two optimal control problems governed by nonlinear partial differential equations are reported.}, summary = {Inexact trust-region interior-point (TRIP) sequential quadratic programming (SQP) algorithms for the solution of optimization problems with nonlinear equality constraints and simple bound constraints are analysed. The effect of the inexactness in the computation of derivative information on the convergence of TRIP SQP is analysed, and practical rules to control the size of the associated residuals are given. It is shown that if the size of the residuals is of the order of both the size of the constraints and the trust region radius, then the TRIP SQP algorithms are globally first-order convergent. Numerical experiments with two optimal control problems governed by nonlinear partial differential equations are reported.}} %also %institution = CAAM, address = RICE-ADDRESS, %number = {TR95-18}, year = 1995, @techreport{HeinVice99, author = {M. Heinkenschloss and L. N. Vicente}, title = {Analysis of Inexact Trust Region {SQP} Algorithms}, institution = COIMBRA, address = COIMBRA-ADDRESS, number = {99-15}, year = 1999, abstract = {In this paper we study the global convergence behavior of a class of composite-step trust-region SQP methods that allow inexact problem information. The inexact problem information can result from iterative linear systems solves within the trust-region SQP method or from approximations of first-order derivatives. Accuracy requirements in our trust-region SQP methods are adjusted based on feasibility and optimality of the iterates. In the absence of inexactness, our global convregence theory is equal to that of \citebb{DennElAlMaci97}. If all iterates are feasible, i.e.\ if all iterates satisfy the equality constraints, then our results are related to the known convergence analyses for trust-region methods with inexact gradient information for unconstrained optimization.}, summary = {The global convergence behaviour of a class of composite-step trust-region SQP methods that allow inexact problem information is studied. This inexact information can result from iterative linear systems solves within the trust-region SQP method or from approximations of first-order derivatives. Accuracy requirements are based on feasibility and optimality of the iterates. In the absence of inexactness, the analysis reduces to that of \citebb{DennElAlMaci97}. If all iterates satisfy the equality constraints, then the results are related to the known convergence properties for trust-region methods with inexact gradient information in unconstrained optimization.}} @article{HeinSpel94, author = {J. Heinz and P. Spellucci}, title = {A successful implementation of the {P}antoja-{M}ayne {SQP} method}, journal = OMS, volume = 4, number = 1, pages = {1--28}, year = 1994} @article{HelfZwic95, author = {H.-P. Helfrich and D. Zwick}, title = {Trust region algorithms for the nonlinear least distance problem}, journal = {Numerical Algorithms}, volume = 9, number = {1-2}, pages = {171--179}, year = 1995, abstract = {The nonlinear least distance problem is a special case of equality constrained optimization. Let a curve or surface be given in implicit form via the equation $f(x)=0$, $x \in \Re^d$, and let $z \in $R^d$ be a fixed data point. We discuss two algorithms for solving the following problem: Find a point $x^*$ such that $f(x^*)=0$ and $\|z-x^*\|_2$ is minimal among all such $x$. The algorithms presented use the trust region approach in which, at each iteration, an approximation to the objective function or merit function is minimized in a given neighborhood (the trust region) of the current iterate. Among other things, this allows one to prove global convergence of the algorithm.}, summary = {Trust-region algorithms are presented for the nonlinearly constrained least-distance problem. Global convergence is proved.}} @article{HelfZwic96, author = {H. P. Helfrich and D. Zwick}, title = {A trust region algorithm for parametric curve and surface fitting}, journal = JCAM, volume = 73, number = {1--2}, pages = {119--134}, year = 1996, abstract = {Let a family of curves or surfaces be given in parametric form via the model equation $x=f(s,\beta)$ where $x \in \Re^n$, $\beta \in \Re^m$, and $s \in S \subset \Re^d$, $dor=3) image Jacobian (J) without any prior information, while carrying out a 3 DOF manipulation task. For 6 and higher DOF manipulation, a rough initial estimate of J is beneficial. We also verified that redundant visual information is valuable. Errors due to imprecise tracking and goal specification were reduced as the number of visual features, m, was increased. Furthermore highly redundant systems allow us to detect outliers in the feature vector and deal with partial occlusion.}, summary = {An experimental evaluation of adaptive and non-adaptive visual servoing in 3, 6 and 12 degrees of freedom (DOF) is compared to traditional joint feedback control. The main results are: positioning of a 6 axis PUMA 762 arm is up to 5 times more precise under visual control than under joint control and positioning of a Utah/MIT dextrous hand is better under visual control than under joint control by a factor of 2 and and a trust-region-based adaptive visual feedback controller is very robust. For $m$ tracked visual features, the algorithm can successfully estimate online the $3m$ ($m \geq 3$) image Jacobian ($J$) without any prior information, while carrying out a 3 DOF manipulation task. For 6 and higher DOF manipulation, a rough initial estimate of $J$ is beneficial. Redundant visual information is also shown to be valuable. Errors due to imprecise tracking and goal specification were reduced as the number of visual features, $m$, was increased. Furthermore highly redundant systems allow the detection of outliers in the feature vector and dealing with partial occlusion.}} @inproceedings{JainMcClSark86, author = {V. K. Jain and T. E. McClellan and T. K. Sarkar}, title = {Half-{F}ourier transform and application to radar signals}, booktitle = {ICASSP 86 Proceedings. IEEE-IECEJ-ASJ International Conference on Acoustics, Speech and Signal Processing}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 1, pages = {241--244}, year = {1986}, abstract = {The authors discuss the half Fourier transform (HFT) and explore its application to radar-return signals with specular components. It is shown that this transform enables the desired part to be separated from the specular impulsive components. The effectiveness of this technique and the computer program developed is demonstrated by simulation examples. The program uses an optimization package which minimizes a nonlinear sum-of-squares functional with a model trust region strategy.}, summary = {The half Fourier transform is discussed and its application to radar-return signals with specular components examined. This transform enables the desired part to be separated from the specular impulsive components. The problem is numerically solved by applying a trust-region algorithm to a nonlinear least-squares formulation.}} @article{JaraMack87, author = {H. Jarausch and W. Mackens}, title = {Solving large nonlinear systems of equations by an adaptive condensation process}, journal = NUMMATH, volume = 50, number = 6, pages = {633--653}, year = 1987, abstract = {The authors present an algorithm which efficiently solves large nonlinear systems of the form $Au=F(u)$, $u \in \Re^n$ whenever an (iterative) solver '$A^{-1}$' for the symmetric positive definite matrix $A$ is available and $F'(u)$ is symmetric. Such problems arise from the discretization of nonlinear elliptic partial differential equations. By means of an adaptive decomposition process the authors split the original system into a low dimensional system-to be treated by any sophisticated solver-and a remaining high-dimensional system, which can easily be solved by fixed point iteration. Specifically the authors choose a Newton-type trust region algorithm for the treatment of the small system. They show global convergence under natural assumptions on the nonlinearity. The convergence results typical for trust-region algorithms carry over to the full iteration process. The only large systems to be solved are linear ones with the fixed matrix $A$. Thus existing software for positive definite sparse linear systems can be used.}, summary = {The authors present an algorithm which efficiently solves large nonlinear systems of the form $Au=F(u)$, $u \in \Re^n$ whenever an (iterative) solver '$A^{-1}$' for the symmetric positive definite matrix $A$ is available and $F'(u)$ is symmetric. Such problems arise from the discretization of nonlinear elliptic partial differential equations. By means of an adaptive decomposition process the original system is split into a low-dimensional system-to and a remaining high-dimensional system, which can easily be solved by fixed point iteration. A Newton-type trust-region algorithm is chosen for the treatment of the small system. Convergence results typical for trust-region algorithms carry over to the full iteration process.}} @article{Jarr91, author = {F. Jarre}, title = {On the convergence of the method of analytic centers when applied to convex quadratic programs}, journal = MP, volume = 49, number = 3, pages = {341--358}, year = 1991} @misc{Jarr98, author = {F. Jarre}, title = {An {QQP}-Minimization Method for Semidefinite and Smooth nonconvex programs}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998, abstract = {Recent international research has focussed on semidefinite programs where the entries of an unknown matrix $X$ need to be chosen in a certain optimal way under the constraint that $X$ be symmetric and positive semi-definite. The present frameworks for solving semidefinite programs allow further convex constraints on the matrix entries. In many real world applications, however, also smooth nonconvex constraints occur. For such problems the standard approach still often used to date is to impose an equality constraint $X=LL^T$ where $L$ is lower tringular. It is not surprising that standard nonlinear optimization software fails for such a formulation; the Cholesky factor $L$ is not unique when $X$ is singular; it lies in a nonlinear nonconvex manifold, and methods based on linearizing the equation $=LL^T$ perform very poorly in this situation. We present an interior-point approach for solving such problems, discuss local convergence properties, and present some preliminary numerical results.}, summary = {An interior-point approach is presented for problems where the entries of a positive semidefinite matrix $X$ have to be optimally determined in the presence of non-convex constraints on the entries of $X$. The method combines ideas of a predictor-corrector interior-point method, of the SQP method and of trust-region methods. Some convergence results are given and very preliminary numerical experiments discussed.}} @article{JarrSaun95, author = {F. Jarre and M. A. Saunders}, title = {A practical interior-point method for convex programming}, journal = SIOPT, volume = 5, number = 1, pages = {149--171}, year = 1995} @article{JensAgre86, author = {J. J. A. Jensen and H. Agren}, title = {A direct, restricted-step, second-order {MC} {SCF} program for large scale \emph{ab initio} calculations}, journal = {Chemical Physics}, volume = 104, number = 2, pages = {229--250}, year = 1986, abstract = {A general purpose MC SCF program with a direct, fully second-order and step-restricted algorithm is presented. The direct character refers to the solution of an MC SCF eigenvalue equation by means of successive linear transformations where the norm-extended hessian matrix is multiplicated onto a trial vector without explicitly constructing the hessian. This allows for applications to large wavefunctions. In the iterative solution of the eigenvalue equation a norm-extended optimization algorithm is utilized in which the number of negative eigenvalues of the Hessian is monitored. The step control is based on the trust region concept and is accomplished by means of a simple modification of the Davidson-Liu simultaneous expansion method (1978) for iterative calculation of an eigenvector. Convergence to the lowest state of a symmetry is thereby guaranteed, and test calculations also show reliable convergence for excited states. The authors outline the theory and describe in detail an efficient implementation, illustrated with sample calculations.}, summary = {A general purpose MC SCF program with a direct, fully second-order and step-restricted algorithm is presented. The step control is based on the trust-region concept. Convergence to the lowest state of a symmetry is guaranteed, and test calculations also show reliable convergence for excited states. }} @article{JensPoly94, author = {D. Jensen and R. Polyak}, title = {On the convergence of a modified barrier method for convex programming}, journal = {IBM J. Res. Develop.}, volume = 38, number = 3, pages = {307--320}, year = 1994} @techreport{JensPolySchn92, author = {D. Jensen and R. Polyak and R. Schneur}, title = {Numerical experience with modified barrier functions method for linear programming}, institution = IBMWATSON, address = IBMWATSON-ADDRESS, type = {Research Report}, number = {RC 18415}, year = 1992} @article{JiKritAbouTont99, author = {X. S. Ji and W. Kritpiphat and A. Aboudheir and P. Tontiwachwuthikul}, title = {Mass transfer parameter estimation using optimization technique: Case study in {CO}2 absorption with chemical reaction}, journal = {Canadian Journal of Chemical Engineering}, volume = 77, number = 1, pages = {69--73}, year = 1999, abstract = {This paper proposes a new approach of applying an optimization technique to simultaneously determine a physical liquid-film mass transfer coefficient ($k(L)(o)$) and effective interfacial area ($a(v)$) from a pilot plant data. The mass transfer mechanism of the CO2-NaOH system was modeled using the two-film theory to represent the behaviors of packed absorbers. The model presents an overall absorption rate (R-v) as a function of $k(L)(o)$ and $a(v)$. The optimization algorithm used in this study follows a modified Levenberg-Marquardt method with a trust region approach. The R-v predictions from the model are in good agreement with the experimental data, with an average error of 6.5\%.}, summary = {A new approach of applying an optimization technique to simultaneously determine a physical liquid-film mass transfer coefficient ($k(L)(o)$) and effective interfacial area ($a(v)$) from a pilot plant data is considered. The mass transfer mechanism of the CO2-NaOH system is modeled using the two-film theory to represent the behaviors of packed absorbers. The model presents an overall absorption rate (R-v) as a function of $k(L)(o)$ and $a(v)$. The optimization algorithm used in this study follows a modified Levenberg-Morrison-Marquardt method with a trust region approach. The R-v predictions from the model are in good agreement with the experimental data, with an average error of 6.5\%.}} @article{JianQi96, author = {H. Jiang and L. Qi}, title = {Globally and superlinearly convergent trust-region algorithms for convex {SC$^1$}-minimization problems and its application to stochastic programs}, journal = JOTA, volume = 90, number = 3, pages = {649--669}, year = 1996, abstract = {A function mapping from R(n) to R is called an SC1-function if it is differentiable and its derivative is semismooth. A convex SC1-minimization problem is a convex minimization problem with an SC objective function and linear constraints. Applications of such minimization problems include stochastic quadratic programming and minimax problems. In this paper, we present a globally and superlinearly convergent trust-region algorithm for solving such a problem. Numerical examples are given on the application of this algorithm to stochastic quadratic programs.}, summary = {A globally and superlinearly convergent trust-region algorithm for solving SC1 problems is presented. Numerical examples are given on the application of this algorithm to stochastic quadratic programs.}} @article{JianQi97, author = {H. Jiang and L. Qi}, title = {A new nonsmooth equations approach to nonlinear complementarity problems}, journal = SICON, volume = 35, number = 1, pages = {178--193}, year = 1997} @article{JianFukuQiSun98, author = {H. Jiang and M. Fukushima and L. Qi and D. Sun}, title = {A trust region method for solving generalized complementarity problems}, journal = SIOPT, volume = 8, number = 1, pages = {140--158}, year = 1998, abstract = {Based on a semismooth equation reformulation using Fischer's function, a trust region algorithm is proposed for solving the generalized complementarity problem (GCP). The algorithm uses a generalized Jacobian of the function involved in the semismooth equation and adopts the squared natural residual of the semismooth equation as a merit function. The proposed algorithm is applicable to the nonlinear complementarity problem, because the latter problem is a special case of the GCP. Global convergence and, under a nonsingularity assumption, local Q-superlinear (or quadratic) convergence of the algorithm are established. Moreover, calculation of a generalized Jacobian is discussed and numerical results are presented.}, summary = {Based on a semi-smooth equation reformulation using Fischer's function, a trust-region algorithm is proposed for solving the generalized complementarity problem (GCP). It uses a generalized Jacobian of the function involved in the semi-smooth equation and adopts the squared natural residual of the semi-smooth equation as a merit function. Global convergence and, under a nonsingularity assumption, a local Q-superlinear (or quadratic) rate of convergence are established. Calculation of a generalized Jacobian is discussed and numerical results presented.}} @article{JittOsbo80, author = {K. Jittorntrum and M. R. Osborne}, title = {A modified barrier function method with improved rate of convergence for degenerate problems}, journal = {Journal of the Australian Mathematical Society (Series B)}, volume = 21, pages = {305--329}, year = 1980} @article{JonsLars90, author = {O. Jonsson and T. Larsson}, title = {A note on step-size restrictions in approximation procedures for structural optimization}, journal = {Computers and Structures}, volume = 37, number = 3, pages = {259--263}, year = 1990, abstract = {Different possibilities are discussed for the restriction of the step size in the well known iterative approximation concept for solving structural optimization problems. Such restrictions might for some problem instances be necessary to stabilize the behaviour of the solution procedure and to ensure convergence. There are two basic means to achieve such a restriction: either by using trust region constraints or by inducting a penalty on getting remote from the approximation point. In structural optimization solution procedures, the first of these possibilities is commonly used. It is demonstrated how a penalty term can be used instead, still making it possible to use the efficient dual concept. Relations to other mathematical programming methods are outlined and a small numerical example is presented.}, summary = {Different possibilities are discussed for the restriction of the step size in iterative approximation methods for structural optimization problems: by using either trust-region constraints or a penalty on the distance from the approximation point. The first approach is commonly used. It is demonstrated how a penalty term can be used instead.}} @article{JudiPire89, author = {J. J. J\'{u}dice and F. M. Pires}, title = {Direct methods for convex quadratic programs subject to box constraints}, journal = {Investigaci\'{o}n Operacional}, volume = 9, pages = {23--56}, year = 1989} %%% K %%% @article{Kani66, author = {S. Kaniel}, title = {Estimates for some computational techniques in linear algebra}, journal = MC, volume = 20, number = 95, pages = {369--378}, year = 1966} @article{Kant48, author = {L. Kantorovich}, title = {Functional analysis and applied mathematics}, journal = {Uspehi Matematicheskih Nauk}, volume = 3, pages = {89--185}, year = 1948} @article{KanzYamaFuku97, author = {Ch. Kanzow and N. Yamashita and M. Fukushima}, title = {New {NCP}-Functions and Their Properties}, journal = JOTA, volume = 94, number = 1, pages = {115--135}, year = 1997} @inproceedings{KanzZupk98, author = {Ch. Kanzow and M. Zupke}, title = {Inexact Trust-Region Methods for Nonlinear Complementarity Problems}, crossref = {FukuQi98}, pages = {211--235}, abstract = {In order to solve the nonlinear complementarity problem, we first reformulate it as a nonsmooth system of equations by using a recently introduced NCP-function. We then apply a trust-region-type method to this system of equations. Our trust-region method allows an inexact solution of the trust-region-subproblem. We show that the algorithm is well-defined for a general nonlinear complementarity problem and that it has some nice global and local convergence properties. Numerical results indicate that the new method is quite promising.}, summary = {The nonlinear complementarity problem is reformulated as a non-smooth system of equations by using a recently introduced NCP-function. A trust-region-type method is then applied to the resulting system of equations, that allows an inexact solution of the trust-region subproblem. The algorithm is well-defined for a general nonlinear complementarity problem and has global and local convergence properties. Numerical results are discussed.}} @inproceedings{KariRendWolk94, author = {S. E. Karisch and F. Rendl and H. Wolkowicz}, title = {Trust regions and relaxations for the quadratic assignment problem}, editor = {P. M. Pardalos and H. Wolkowicz}, booktitle = {Quadratic Assignment and Related Problems. DIMACS Workshop.}, publisher = AMS, address = AMS-ADDRESS, pages = {199--219}, year = 1994, abstract = {General quadratic matrix minimization problems, with orthogonal constraints, arise in continuous relaxations for the (discrete) quadratic assignment problem (QAP). Currently, bounds for QAP are obtained by treating the quadratic and linear parts of the objective function, of the relaxations, separately. This paper handles general objectives as one function. The objectives can be both nonhomogeneous and nonconvex. The constraints are orthogonal or Loewner partial order (positive semidefinite) constraints. Comparisons are made to standard trust region subproblems. Numerical results are obtained using a parametric eigenvalue technique.}, summary = {General quadratic matrix minimization problems, with orthogonal constraints, arise in continuous relaxations for the (discrete) quadratic assignment problem (QAP). Currently, bounds for QAP are obtained by treating the quadratic and linear parts of the objective function, of the relaxations, separately. It is shown how to handle general objectives as one function. The objectives can be both non-homogeneous and non-convex. The constraints are orthogonal or Loewner partial order (positive semidefinite) constraints. Comparisons are made with standard trust-region subproblems. Numerical results are obtained using a parametric eigenvalue technique.}} @article{Karm84, author = {N. Karmarkar}, title = {A new polynomial-time algorithm for linear programming}, journal = {Combinatorica}, volume = 4, pages = {373--395}, year = 1984} @mastersthesis{Karu39, author = {W. Karush}, title = {Minima of functions of several variables with inequalities as side conditions}, school = {Department of Mathematics}, address = {University of Chicago, Illinois, USA}, year = 1939} @article{Kauf00, author = {L. Kaufman}, title = {A reduced storage, quasi-{N}ewton trust region approach to function optimization}, journal = SIOPT, volume = 10, number = 1, pages = {56--69}, year = 2000, abstract = {In this paper we consider several algorithms for reducing the storage when using a quasi-Newton method in a dogleg trust region setting for minimizing functions of many variables. Secant methods require 0 (n 2 ) locations to store an approximate Hessian and 0 (n 2 ) operations per iteration when minimizing a function of n variables. This storage requirement becomes worrisome when n becomes large. Our algorithms use a BFGS update and require kn storage and 4kn + 0 (k 2 ) operations per iteration, but may require more iterations than the standard trust region techniques. Typically k is between 10 and 100. Our dogleg trust region strategies involve expressions with matrix products with both the inverse of this Hessian and with the Hessian itself. Our techniques for updating expressions for the Hessian and its inverse can be used to improve the performance of line search, limited memory algorithms.}, summary = {A limited-memory quasi-Newton BFGS algorithm for unconstrained optimization is described that uses a dogleg trust-region scheme. This technique uses products both with the approximate Hessian and its inverse.}} @article{KeHan95a, author = {X. Ke and J. Han}, title = {A class of nonmonotone trust region algorithms for constrained optimizations}, journal = CSB, volume = 40, number = 16, pages = {1321--1324}, year = 1995, abstract = {In this note, we consider the following constrained optimization problem (CQP), where $f: R''. R$ is continuously differentiable function on a closed convex set $\Omega$. For the constrained optimization problem (CQP), a class of nonmonotone trust region algorithms is proposed in sec. 1. In sec.2, the global convergence of this class of algorithms is proved. In sec. 3, some results about the Cauchy point are provided. The nonmonotone technique in this algorithm differs from those in existing nonmonotone algorithms, i.e. nonmonotone algorithms with linear search for unconstrained and constrained optimizations , and the nonmonotone trust region algorithm for unconstrained optimization.}, summary = {The constrained optimization problem of minimizing a continuously differentiable function over a closed convex set is considered. A class of globally convergent non-monotone trust-region algorithms is proposed for this problem.}} @article{KeHan95b, author = {X. Ke and J. Han}, title = {A nonmonotone trust region algorithm for equality constrained optimization}, journal = {Science in China Series A-Mathematics Physics Astronomy and Technological Sciences}, volume = 38, number = 6, pages = {683--695}, year = 1995, abstract = {A trust region algorithm for equality constrained optimization is proposed, which is a nonmonotone one in a certain sense. The augmented Lagrangian function is used as a merit function. Under certain conditions, the global convergenece theorems of the algorithm are proved.}, summary = {A non-monotone trust region algorithm is proposed for the minimization of smooth functions subject to nonlinear equality constraints. It handles feasibility as \citebb{CeliDennTapi85} and allows non-monotonicity in the augmented Lagrangian which is used as a merit function.}} @article{KeHan96, author = {X. Ke and J. Han}, title = {A nonmonotone trust region algorithm for unconstrained nonsmooth optimization}, journal = CSB, volume = 41, number = 3, pages = {197--201}, year = 1996, summary = {A non-monotone trust-region method is presented for the solution of non-smooth unconstrained problems. This algorithm uses the concept of ``iteration functions'' of \icitebb{QiSun94}. Global convergence to a Dini stationary point is proved.}} @inproceedings{KehtWinMull87, author = {N. Kehtarnavaz and M. Z. Win and N. Mullani}, title = {Estimation of diastole to systole changes from cardiac {PET} images}, booktitle = {Proceedings of the Ninth Annual Conference of the IEEE Engineering in Medicine and Biology Society}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 2, pages = {850--851}, year = {1987}, abstract = {The changes in the myocardium thickness, left ventricle diameter and tracer activity between diastole and systole are estimated from cardiac positron-emission-tomography (PET) images. A comparative study has been carried out between the widely used international mathematical subroutine library (IMSL) and the model-trust-region (MTR) parameter estimation algorithm. It has been shown that the MTR algorithm converges regardless of the initial parameter values (IPV) chosen. To reduce the number of iterations, a preprocessor has been developed to provide close IPV to the true parameter values (TPV). Myocardial, left ventricular and tracer activity changes are plotted as functions of bipolar angle. The graphs can be used as a diagnostic tool for abnormal heart conditions.}, summary = {The changes in the myocardium thickness, left ventricle diameter and tracer activity between diastole and systole are estimated from cardiac positron-emission-tomography (PET) images. A comparative study is carried out between the IMSL mathematical subroutine library and the model-trust-region (MTR) parameter estimation algorithm. It is shown that the MTR algorithm converges regardless of the initial parameter values chosen. To reduce the number of iterations, a preprocessor has been developed to provide better starting values. The method is used as a diagnostic tool for abnormal heart conditions.}} @article{Kell73, author = {E. L. Keller}, title = {The general quadratic programming problem}, journal = MP, volume = 5, number = 3, pages = {311--337}, year = 1973} @article{KellSach87, author = {C. T. Kelley and E. W. Sachs}, title = {Quasi-{N}ewton methods and unconstrained optimal control problems}, journal = SICON, volume = 25, number = 6, pages = {1503--1517}, year = 1987} @article{KellSach99, author = {C. T. Kelley and E. W. Sachs}, title = {A trust region method for parabolic boundary control problems}, journal = SIOPT, volume = 9, number = 4, pages = {1064--1081}, year = 1999, abstract = {In this paper, we develop a trust region algorithm for constrained parabolic boundary control problems. The method is a projected form of the Steihaug trust-region CG method with a smoothing step added at each iteration to improve performance in the global phase and provide mesh-independent sup-norm convergence in the terminal phase.}, summary = {A trust-region algorithm for constrained parabolic boundary control problems is developed. The method is a projected form of the Steihaug-Toint method with a smoothing step added at each iteration to improve performance in the global phase and provide mesh-independent sup-norm convergence in the terminal phase.}} @article{KellKeye98, author = {C. T. Kelley and D. E. Keyes}, title = {Convergence analysis of pseudo-transient continuation}, journal = SINUM, volume = 35, number = 2, pages = {508--523}, year = 1998, abstract = {Pseudo-transient continuation is a well-known and physically motivated technique for computation of steady-state solutions of time-dependent partial differential equations. Standard globalization strategies such as line search or trust-region methods often stagnate at local minima. Pseudo-transient continuation succeeds in many of these cases by taking advantage of the underlying PDE structure of the problem. Though widely employed, the convergence of this scheme is rarely discussed. In this paper we prove convergence for a generic form of pseudo-transient continuation and illustrate it with two practical strategies.}, summary = {Pseudo-transient continuation is a well-known and physically motivated technique for computation of steady-state solutions of time-dependent partial differential equations. Standard globalization strategies such as linesearch or trust-region methods often stagnate at local minima. Pseudo-transient continuation succeeds in many of these cases by taking advantage of the underlying PDE structure of the problem. Convergence for a generic form of pseudo-transient continuation is proved, and illustrated with two practical strategies.}} @article{KeLiuXu96, author = {X. Ke and G. Liu and D. Xu}, title = {A nonmonotone trust region algorithm for unconstrained nonsmooth optimization}, journal = CSB, volume = 41, number = 3, pages = {197--201}, year = 1996, summary = {A globally convergent trust-region algorithm is proposed for unconstrained minimization of locally Lipschitzian functions, that generalizes the approach of \citebb{QiSun94} by allowing a non-monotone sequence of objective function values.}} @techreport{KhalByrdSchn99, author = {H. F. Khalfan and R. H. Byrd and R. B. Schnabel}, title = {Retaining Convergence Properties of Trust Region Methods Without Extra Gradient Evaluations}, institution = {Department of Mathematics and Computer Science, University of Colorado}, address = {Boulder, Colorado, USA}, year = 1999, abstract = {Several recent computational studies have shown that trust-region quasi-Newton methods using the SR1, PSB, and BFGS updates are effective methods for solving unconstrained optimization problems. In addition, the analyses in \citebb{Powe75} and \citebb{ByrdKhalSchn96} demonstrate strong convergence properties for some trust-region quasi-Newton methods. A computational disadvantage of the methods analyzed in these papers, for which the strongest convergence properties among trust-region quasi-Newton methods have been shown, is that the update at rejected points requires a gradient evaluation that would not otherwise be made. In this paper, we propose a modification of the PSB method that uses only the function value at rejected points to make the update at those points. We then show how to modify Powell's analysis of the PSB method to prove the same $q$-superlinear convergence result for the new method. Finally, we discuss the issues and difficulties involved in extending this approach to trust region methods using updates in the Broyden class, such as the BFGS, SR1 and DFP.}, summary = {A modification of a trust-region method based on the PSB quasi-Newton update is proposed method that uses only the function value at rejected points to make the update at those points. The same $q$-superlinear convergence result as for the original algorithm holds for the new method.}} @inproceedings{Kiwi89, author = {K. C. Kiwiel}, title = {A survey of bundle methods for non-differentiable optimization}, crossref = {IriTana89}, pages = {263-282}} @article{Kiwi89b, author = {K. C. Kiwiel}, title = {An ellipsoid trust region bundle method for nonsmooth convex minimization}, journal = SICON, volume = 27, number = 4, pages = {737--757}, year = 1989, abstract = {This paper presents a bundle method of descent for minimizing a convex (possibly nonsmooth) function f of several variables. At each iteration the algorithm finds a trial point by minimizing a polyhedral model of f subject to an ellipsoid trust region constraint. The quadratic matrix of the constraint, which is updated as in the ellipsoid method, is intended to serve as a generalized 'Hessian' to account for 'second-order' effects, thus enabling faster convergence. The interpretation of generalized Hessians is largely heuristic, since so far this notion has been made precise by J.L. Goffin only in the solution of linear inequalities. Global convergence of the method is established and numerical results are given.}, summary = {A bundle method of descent for minimizing a convex (possibly non-smooth) function of several variables. At each iteration the algorithm finds a trial point by minimizing a polyhedral model subject to an ellipsoid trust-region constraint. The quadratic matrix of the constraint, which is updated as in the ellipsoid method, is interpreted as a generalized 'Hessian' to account for 'second-order' effects, thus enabling faster convergence. Global convergence of the method is established and numerical results are given.}} @article{Kiwi96, author = {K. C. Kiwiel}, title = {Restricted step and {L}evenberg-{M}arquardt techniques in proximal Bundle Methods for nonconvex nondifferentiable optimization}, journal = SIOPT, volume = 6, number = 1, pages = {227--249}, year = 1996, abstract = {Two methods are given for minimizing locally Lipschitzian upper semidifferentiable functions. They employ extensions of restricted step (trust region) and Levenberg-Marquardt techniques that are widely used in other contexts. Extensions to linearly constrained optimization are discussed. Preliminary numerical experience is reported.}, summary = {Two methods are given for minimizing locally Lipschitzian upper semidifferentiable functions. They employ extensions of restricted step (trust region) and Levenberg-Morrison-Marquardt techniques. Extensions to linearly constrained optimization are discussed. Preliminary numerical experience is reported.}} @phdthesis{Knot83, author = {O. Knoth}, title = {{M}arquardt-\"{a}hnliche {V}erfahren zur {M}inimierung nichtlinearer {F}unktionen}, school = {Martin-Luther University}, address = {Halle-Wittenberg, Germany}, year = 1983, note = {(in German)}, summary = {Accumulation points of the trust-region Newton method for unconstrained optimization are proved to satisfy second order optimality conditions. The use of negative curvature in a ``projected'' Marquardt algorithm is also introduced. Finally, the characterization of local minima for the $\ell_2$ trust-region subproblem is considered together with the use of an exact penalty function for this subproblem.}} @book{Knut73, author = {D. E. Knuth}, title = {The Art of Computer Programming, Volume 3, Sorting and Searching}, publisher = ADW, address = ADW-ADDRESS, year = 1973} @article{Knut76, author = {D. E. Knuth}, title = {Big {O}micron and {B}ig {O}mega and {B}ig {T}heta}, journal = {ACM SIGACT News}, volume = 8, number = 2, pages = {18--24}, year = 1976} @article{Koji93, author = {F. Kojima}, title = {Back-propagation learning using the trust region algorithm and application to nondestructive testing in applied electromagnetics}, journal = {International Journal of Applied Electromagnetics in Materials}, volume = 4, number = 7, pages = {27--33}, year = 1993, abstract = {An artificial neural network is applied by nondestructive inspections in aerospace materials. The use of an artificial neural network is presented for classifying testing data as corresponding to sample materials with defect and without defect. The back-propagation learning for a multi-layer feed-forward neural network is applied to this classification. The trust region method is adopted to the back-propagation learning problem. Results of numerical tests are summarized.}, summary = {An artificial neural network is applied by non-destructive inspections in aerospace materials. The back-propagation learning for a multi-layer feed-forward neural network is applied to the resulting classification problem. The trust-region method is adapted to the back-propagation learning problem. Numerical tests are discussed.}} @inproceedings{KojiKawa93, author = {F. Kojima and H. Kawaguchi}, title = {Backpropagation learning algorithm for nondestructive testing by thermal imager (aerospace materials)}, booktitle = {IJCNN '93-Nagoya. Proceedings of 1993 International Joint Conference on Neural Networks}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 1, pages = {955--958}, year = 1993, abstract = {An artificial neural network is applied to nondestructive inspections using thermal imager. The use of an artificial neural network is presented for classifying test data as corresponding to bonded and disbonded regions in sample materials. The backpropagation learning for a multi-layer feedforward neural network is applied to this classification. The trust region method is adopted to the backpropagation learning problem. Results of numerical tests are summarized.}, summary = {An artificial neural network is applied to non-destructive inspections using thermal imager. The use of an artificial neural network is presented for classifying test data as corresponding to bonded and disbonded regions in sample materials. The backpropagation learning for a multi-layer feedforward neural network is applied to this classification. The trust-region method is adopted to the backpropagation learning problem. Numerical results are summarized.}} @inproceedings{KrukWolk98, author = {S. Kruk and H. Wolkowicz}, title = {{SQ}${}^2${P}, Sequential Quadratic Constrained Quadratic Programming}, crossref = {Yuan98}, pages = {177--204}, abstract = {We follow the popular approach for unconstrained minimization, i.e. we develop a local quadratic model at a current approximate minimizer in conjunction with a trust region. We then minimize this local model in order to find the next approximate minimizer. Asymptotically, finding the local minimizer of the quadratic model is equivalent to applying Newton's method to the stationarity condition. For constrained problems, the local quadratic model corresponds to minimizing a quadratic expression of the objective subject to quadratic approximations of the constraints (Q${}^2$P), with an additional trust region. This quadratic model is intractable in general and is usually handled by using linear approximations of the constraints and modifying the Hessian of the objective function using the Hessian of the Lagrangian, i.e. a SQP approach. Instead, we solve the Lagrangian relaxation of Q${}^2$P using semi-definite programming. We develop this framework and present an example which illustrates the advantages over the standard SQP approach.}, summary = {An algorithm is proposed for constrained nonlinear programming in which a quadratic model of the objective function is minimized, at each iteration, subject to quadratic approximations of the constraints (Q${}^2$P) and an additional trust region. As this subproblem is in general intractable, the Lagrangian relaxation of Q${}^2$P is instead solved using semi-definite programming. An example illustrates the advantages over the standard SQP approach.}} @inproceedings{KuhnTuck51, author = {H. W. Kuhn and A. W. Tucker}, title = {Nonlinear Programming}, booktitle = {Proceedings of the second Berkeley symposium on mathematical statistics and probability}, editor = {J. Neyman}, publisher = {University of Berkeley Press}, address = {California, USA}, year = 1951} @article{KwokKamaWats85, author = {H. H. Kwok and M. P. Kamat and L. T. Watson}, title = {Location of stable and unstable equilibrium-configurations using a model trust region quasi-{N}ewton method and tunnelling}, journal = {Computers and Structures}, volume = 21, number = 5, pages = {909--916}, year = 1985, abstract = {A hybrid method for locating multipole equilibrium configurations has been proposed recently. The hybrid method combined the efficiency of a quasi-Newton method capable of locating stable and unstable equilibrium solutions with a robust homotopy method capable of tracking equilibrium paths, with turning points and exploiting sparsity of the Jacobian matrix at the same time. A quasi-Newton method in conjunction with a deflation technique is proposed here as an alternative to the hybrid method. The proposed method not only exploits sparsity and symmetry, but also represents an improvement in efficiency}, summary = {The conjunction of a quasi-Newton method with a deflation technique is proposed as an alternative to the hybrid method for locating multipole equilibrium configurations. The proposed method not only exploits sparsity and symmetry, but also represents an improvement in efficiency. It uses a double dogleg globalization strategy.}} %%% L %%% @article{LaleNocePlan98, author = {M. Lalee and J. Nocedal and T. D. Plantenga}, title = {On the implementation of an algorithm for large-scale equality constrained optimization}, journal = SIOPT, volume = 8, number = 3, pages = {682--706}, year = 1998, abstract = {This paper describes a software implementation of Byrd and \citebb{Omoj89}'s trust-region algorithm for solving nonlinear equality constrained optimization problems. The code is designed for the efficient solution of large problems and provides the user with a variety of linear algebra techniques for solving the subproblems occurring in the algorithm. Second derivative information can be used, but when it is not available, limited memory quasi-Newton approximations are made. The performance of the code is studied using a set of difficult test problems from the {\sf CUTE} collection.}, summary = {A software implementation of Byrd and \citebb{Omoj89}'s trust-region algorithm for nonlinear equality constrained optimization is described. The code is designed for the efficient solution of large problems and provides the user with a variety of linear algebra techniques for solving the subproblems occurring in the algorithm. Second derivative information can be used, as well as limited memory quasi-Newton approximations. The performance of the code is studied using a set of difficult test problems from the {\sf CUTE} collection.}} @book{LancTism85, author = {P. Lancaster and M. Tismenetsky}, title = {The Theory of Matrices}, publisher = AP, address = AP-ADDRESS, edition = {second}, year = 1985} @article{Lanc50, author = {C. Lanczos}, title = {An iteration method for the solution of the eigenvalue problem of linear differential and integral operators}, journal = {Journal of research of the National Bureau of Standards B}, volume = 45, pages = {225--280}, year = 1950} @article{Lann97, author = {A. Lannes}, title = {Phase-closure imaging in algebraic graph theory: a new class of phase-calibration algorithms}, journal = {Journal of the Optical Society of America A-Optics Image Science and Vision}, volume = 15, number = 2, pages = {419--429}, year = 1997, abstract = {A new class of phase-calibration algorithms is presented. The originality of these algorithms, as well as their efficiency, results from certain particular structures, the analysis of which calls on algebraic graph theory. The corresponding optimization process, which is based on the principle of the trust-region methods, proves to be well suited to these structures. The main message that emerges from the study is very clear: the traditional notions of phase closure imaging can be understood and refined in a wider framework. The implications of this research therefore concern all the fields in which the notion of phase closure plays a key role: weak-phase imaging in optical interferometry, radio imaging, remote sensing by aperture synthesis, etc.}, summary = {A class of phase-calibration algorithms is presented. Their originality, as well as their efficiency, results from certain particular structures, the analysis of which calls on algebraic graph theory. The corresponding optimization process, which is based on the principle of the trust-region methods, proves to be well suited to these structures. The main result is that the traditional notions of phase closure imaging can be understood and refined in a wider framework. This has implications in all the fields where the notion of phase closure plays a key role, such as weak-phase imaging in optical interferometry, radio imaging and remote sensing by aperture synthesis.}} @article{Lann98, author = {A. Lannes}, title = {Weak-phase imaging in optical interferometry}, journal = {Journal of the Optical Society of America A-Optics Image Science and Vision}, volume = 15, number = 4, pages = {811--824}, year = 1998, abstract = {The first imaging devices of optical interferometry are likely to be of weak phase, typically: a set of three-element arrays independently observing the same object. The study of their imaging capabilities refers to appropriate optimization methods, which essentially address the self-calibration process and its stability. A general survey of these techniques is given, and it is shown, in particular, how the related algorithms can be used for examining the imaging capabilities of weak-phase interferometric devices. The phase-calibration algorithm involved in the self-calibration cycles is based on the principle underlying the trust-region methods. It benefits from certain remarkable properties, the analysis of which appeals to algebraic graph theory. The Fourier synthesis operation, which is also involved in these cycles, is performed by means of WIPE, a methodology recently introduced in radio imaging and optical interferometry. (WIPE is reminiscent Of CLEAN, a widely used technique in astronomy). In the related theoretical framework the stability of the image-reconstruction process is controlled by considering certain elements of the singular-value decomposition of the derivative of the self- calibration operator. For example, the largest singular value of this derivative, which depends on the interferometric configuration and on the object thus imaged, provides a key indication of the observational limits of these experimental devices.}, summary = {The first imaging devices of optical interferometry are likely to be of weak phase, typically: a set of three-element arrays independently observing the same object. The study of their imaging capabilities refers to appropriate optimization methods, which essentially address the self-calibration process and its stability. A general survey of these techniques is given, and it is shown, in particular, how the related algorithms can be used for examining the imaging capabilities of weak-phase interferometric devices. The phase-calibration algorithm involved in the self-calibration cycles is based on the principle underlying the trust-region methods. Its remarkable properties are shown using the algebraic graph theory. The Fourier synthesis operation, which is also involved in these cycles, is performed by means of WIPE, a methodology introduced in radio imaging and optical interferometry. In the related theoretical framework the stability of the image-reconstruction process is controlled by considering certain elements of the singular-value decomposition of the derivative of the self- calibration operator. For example, the largest singular value of this derivative, which depends on the interferometric configuration and on the object thus imaged, provides a key indication of the observational limits of these experimental devices.}} @article{LasdPlumYu95, author = {L. S. Lasdon and J. Plummer and G. Yu}, title = {Primal-dual and primal interior point algorithms for general nonlinear programs}, journal = ORSAC, volume = 7, number = 3, pages = {321--332}, year = 1995, abstract = {An interior point algorithm for general nonlinear programs is presented. Inequality constraints are converted to equalities with slack variables. All bounds are handled with a barrier term in the objective. The Kuhn-Tucker system of the resulting equality constrained barrier problem is solved directly by Newton's method. Primal-dual, primal, and primal-dual with trust region variants are developed and evaluated. An implementation which utilizes the true Lagrangian Hessian and exploits Jacobian and Hessian sparsity is described. Computational results are presented and discussed.}, summary = {An interior point algorithm for general nonlinear programs is presented. Inequality constraints are converted to equalities with slack variables. All bounds are handled with a barrier term in the objective. The Kuhn-Tucker system of the resulting equality constrained barrier problem is solved directly by Newton's method. Primal-dual, primal, and primal-dual with trust-region variants are developed and evaluated. An implementation which utilizes the true Lagrangian Hessian and exploits Jacobian and Hessian sparsity is described. Computational results are presented and discussed.}} @book{LawsHans74, author = {C. L. Lawson and R. J. Hanson}, title = {Solving Least Squares Problems}, publisher = PH, address = PH-ADDRESS, year = 1974, note = {Reprinted as \emph{Classics in Applied Mathematics 15}, SIAM, Philadelphia, USA, 1995}} @misc{LeibSach99, author = {F. Leibfritz and E. W. Sachs}, title = {Optimal Static Output Feedback Design using a Trust Region Interior Point Method}, howpublished = {Presentation at the First Workshop on Nonlinear Optimization ``Interior-Point and Filter Methods'', Coimbra, Portugal}, year = 1999, abstract = {We consider the problem of designing feedback control laws when a complete set of state variables is not available. The resulting nonlinear and nonconvex matrix optimization problem including SDP-constraints for determining the optimal feedback gain will be solved by a trust region interior point approach. The algorithm will be discussed in some details. Finally, using test examples from optimal output feedback design we demonstrate the usefulness of this approach numerically.}, summary = {The problem of designing feedback control laws is considered when a complete set of state variables is not available. The resulting nonlinear and non-convex matrix optimization problem including semi-definiteness constraints for determining the optimal feedback gain is solved by a trust-region interior-point approach. Test examples from optimal output feedback design numerically demonstrate the usefulness of the approach.}} @inproceedings{LemaZowe94, author = {C. Lemar\'{e}chal and J. Zowe}, title = {A condensed introduction to bundle methods in nonsmooth optimizations}, crossref = {Sped94}, pages = {357--382}} @article{Lesc91, author = {M. Lescrenier}, title = {Convergence of trust region algorithms for optimization with bounds when strict complementarity does not hold}, journal = SINUM, volume = 28, number = 2, pages = {476--495}, year = 1991, abstract = {\citebb{ConnGoulToin88a} have proposed a class of trust region algorithms for minimizing nonlinear functions whose variables are subjected to simple bound constraints. In their convergence analysis, they show that if the strict complementarity condition holds, the considered algorithms reduce to an unconstrained calculation after finitely many iterations, allowing fast asymptotic rates of convergence. This paper analyzes the behaviour of these iterative processes in the case where the strict complementarity condition is violated. It is proved that inexact Newton methods lead to superlinear or quadratic rates of convergence, even if the set of active bounds at the solution is not entirely detected. Practical criteria for stopping the inner iterations of the algorithms are deduced, ensuring these rates of convergence.}, summary = {The behaviour of the trust-region algorithms of \citebb{ConnGoulToin88a} for optimization with simple bounds is analyzed in the case where the strict complementarity condition is violated. It is proved that inexact Newton methods lead to superlinear or quadratic rates of convergence, even if the set of active bounds at the solution is not entirely detected. Practical criteria for stopping the inner iterations of the algorithms are deduced.}} @book{LeTaGlow89, author = {Le Tallec, P. and Glowinski, R.}, title = {Augmented Lagrangian and Operator-Splitting Methods in Nonlinear Mechanics}, publisher = SIAM, address = SIAM-ADDRESS, year = 1989} @article{Leve44, author = {K. Levenberg}, title = {A Method For The Solution Of Certain Problems In Least Squares}, journal = {Quarterly Journal on Applied Mathematics}, volume = 2, pages = {164--168}, year = 1944, summary = {The standard method for solving least-squares problems which leads to nonlinear normal equations depends on a reduction of the residuals to linear form by first-order Taylor approximations. taken about a trial solution for the parameters. If the usual least-squares procedure with these linear approximations yields new values for the parameters which are not sufficiently close to the trial values, the neglect of second and higher order terms may invalidate the process. This difficulty may be alleviated by limiting the absolute values of the parameters and to simultaneously minimize the sum of squares of the approximating residuals under these ``damped'' conditions.}} @article{LeviPoly66, author = {E. S. Levitin and B. T. Polyak}, title = {Constrained minimization problems}, journal = {U.S.S.R. Comput. Math. Math. Phys.}, volume = 6, pages = {1--50}, year = 1966} @misc{Lewi96, author = {R. M. Lewis}, title = {A trust region framework for managing approximation models in engineering optimization}, howpublished = {AIAA paper 96-4101, presented at the Sixth AIAA/NASA/ISSMO Symposium on Multidisplinary Analysis and Design, Bellevue, Washington}, year = 1996, summary = {Non-quadratic models are proposed for the trust-region minimization of expensive functions from engineering applications.}} @techreport{Li93, author = {Y. Li}, title = {Centering, trust region, reflective techniques for nonlinear minimization subject to bounds}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR93-1385}, year = 1993, abstract = {Bound-constrained nonlinear minimization problems occur frequently in practice. Most existing methods belong to an active set type which can be slow for large scale problems. Recently, we proposed a new approach (\citebb{ColeLi94}, \citebb{ColeLi96} \citebb{ColeLi96b}) which generates iterates within the strictly feasible region. The method in \citebb{ColeLi96b} is a trust region type and, unlike the existing trust region method for bound-constrained problems, the conditions for its strong convergence properties are consistent with algorithm implementation. A reflective technique can be included in the method. In this paper, we motivate techniques which are important for our new approach. Numerical experience on some medium size problems is included.}, summary = {Motivation is provided for techniques which are important for the method of \citebb{ColeLi96b}. Numerical experience on some medium size problems is included.}} @techreport{Li94a, author = {Y. Li}, title = {A Trust-Region and Affine Scaling Method for Nonlinearly Constrained Minimization}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR94-1463}, year = 1994, abstract = {A nonlinearly constrained minimization problem can be solved by the exact penalty approach involving nondifferentiable functions $\sum_i|c_i(x)|$ and $\sum_i \max(0,c_i(x))$. In this paper, a trust region approach based on a 2-norm subproblem is proposed for solving a nonlinear $\ell_1$ problem. The (quadratic) approximation and the trust region subproblem are defined using affine scaling techniques. Explicit sufficient decrease conditions based on the approximations are suggested for obtaining a limit point satisfying complementarity, Kuhn-Tucker conditions, and the second order necessary conditions. The global convergence of the method is presented in \citebb{Li94b}.}, summary = {A trust-region approach based on a $\ell_2$ norm subproblem is proposed for solving a nonlinear $\ell_1$ problem. The (quadratic) approximation and the trust-region subproblem are defined using affine scaling techniques. Explicit sufficient decrease conditions based on the approximations are suggested for obtaining a limit point satisfying complementarity, Kuhn-Tucker conditions, and the second order necessary conditions.}} @techreport{Li94b, author = {Y. Li}, title = {On Global Convergence of a Trust-Region and Affine Scaling Method for Nonlinearly Constrained Minimization}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR94-1462}, year = 1994, abstract = {A nonlinearly constrained minimization problem can be solved by the exact penalty approach involving non-differentiable functions $\sum_i|c_i(x)|$ and $\sum_i \max(0,c_i(x))$. In \citebb{Li94a}, a trust region affine scaling approach based on a 2-norm subproblem is proposed for solving a nonlinear $\ell_1$ problem. The (quadratic) approximation and the trust region subproblem are defined using affine scaling techniques. Explicit sufficient decrease conditions based on the approximations are proposed to obtain a limit point satisfying complementarity, Kuhn-Tucker conditions, and the second order necessary conditions. In this paper, we present the global convergence properties of this new approach.}, summary = {Global convergence properties of the method by \citebb{Li94a} are presented.}} @article{Li96, author = {W. Li}, title = {Differentiable Piecewise Quadratic Exact Penalty Functions for Quadratic Programs With Simple Bound Constraints}, journal = SIOPT, volume = 6, number = 2, pages = {299--315}, year = 1996} @inproceedings{Li97, author = {W. Li}, title = {A Merit Function and a {N}ewton-Type Method for Symmetric Linear Complementarity Problems}, crossref = {FerrPang97}, pages = {181--203}} @article{LianXu97, author = {X. Liang and C. Xu}, title = {A trust region algorithm for bound constrained minimization}, journal = {Optimization}, volume = 41, number = 3, pages = {279--289}, year = 1997, abstract = {A trust region algorithm is proposed for box constrained nonlinear optimization. At each step of the algorithm a quadratic model problem in box is minimized. Global convergence and quadratic convergence rate to a strong local minimizer are given. Computational results are presented to show the efficiency of the algorithm.}, summary = {A trust region algorithm is proposed for box constrained nonlinear optimization. At each step of the algorithm a quadratic model problem is minimized in a box. Global convergence and quadratic convergence rate to a strong local minimizer are given. Computational results are presented to show the efficiency of the algorithm.}} @techreport{Liao95, author = {A. Liao}, title = {Solving unconstrained discrete-time optimal-control-problems using trust method}, institution = ACRI-CORNELL, address = CORNELL-ADDRESS, number = {CTC95TR230}, year = 1995, abstract = {trust-region method for a class of large-scale minimization problems, the unconstrained discrete-time optimal control (DTOC) problems, is considered. We show that the trust-region subproblem can be solved within an acceptable accuracy without forming the Hessian explicitly. The new approach is based on the inverse power method for eigenvalue problem and possesses the ability to handle the hard case. Our proposed approach leads to more efficient algorithms for DTOC problems.}, summary = {A trust-region method is considered for solving unconstrained discrete-time optimal control (DTOC) problems, in which the trust-region subproblem can be solved within an acceptable accuracy without forming the Hessian explicitly. The approach is based on the inverse power method for eigenvalue problem and can handle the hard case. It leads to more efficient algorithms for DTOC problems.}} @article{Liao97, author = {A. Liao}, title = {Some efficient algorithms for unconstrained discrete-time optimal control problems}, journal = {Applied Mathematics and Computation}, volume = 87, number = {2-3}, pages = {175--198}, year = 1997, abstract = {The differential dynamic programming algorithm (DDP) and the stagewise Newton procedure are two typical examples of efficient local procedures for discrete-time optimal control (DTOC) problems. It is desirable to generalize these local procedures to globally convergent methods. One successful globalization was recently proposed by \citebb{ColeLiao95} which combines the trust region idea with \citebb{Pant88}'s stagewise Newton procedure. In this paper we propose several algorithms for DTOC problems which combine a modified ''dogleg'' algorithm with DDP or Pantoja's Newton procedure. These algorithms possess advantages of both the dogleg algorithm and the DDP or the stagewise procedure, i.e., they have strong global and local convergence properties yet remain economical. Numerical results are presented to compare these algorithms and the Coleman-Liao algorithm.}, summary = {Several algorithms for discrete time optimal control problems are proposed, which combine a modified dogleg algorithm with the differential dynamic programming method or \citebb{Pant88}'s Newton procedure. These algorithms possess advantages of both the dogleg algorithm and the DDP or the stagewise procedure, i.e., they have strong global and local convergence properties yet remain economical. Numerical results are presented to compare these algorithms and the \citebb{ColeLiao95} algorithm.}} @article{LinMore99, author = {C. Lin and J. J. Mor\'{e}}, title = {{N}ewton's Method for Large Bound-Constrained Optimization Problems}, journal = SIOPT, volume = 9, number = 4, pages = {1100--1127}, year = 1999, abstract = {We analyze a trust region version of Newton's method for bound-constrained problems. Our approach relies on the geometry of the feasible set, not on the particular representation in terms of constraints. The convergence theory holds for linearly-constrained problems, and yields global and superlinear convergence theory without assuming neither strict complementarity nor linear independence of the active constraints. We also show that the convergence theory leads to an efficient implementation for large bound-constrained problems.}, summary = {A trust region version of Newton's method for bound-constrained problems is proposed, that relies on the geometry of the feasible set, not on the particular representation in terms of constraints. The convergence theory holds for linearly-constrained problems, and yields global and superlinear convergence theory without assuming neither strict complementarity nor linear independence of the active constraints. The theory also leads to an efficient implementation for large bound-constrained problems.}} %institution = ANL, address = ANL-ADDRESS, %number = {MCS-P724-0898}, year = 1998, @article{LiuHanWang98, author = {G. Liu and J. Han and S. Wang}, title = {A trust region algorithm for bilevel programming problems}, journal = {Chinese Science Bulletin}, volume = 43, number = 10, pages = {820--824}, year = 1998, abstract = {A trust region algorithm is proposed for solving bilevel programming problems where the lower level programming problem is a strongly convex programming problem with linear constraints. This algorithm is based on a trust region algorithm for nonsmooth unconstrained optimization problems, and its global convergence is also proved.}, summary = {A trust region algorithm is proposed for solving bilevel programming problems where the lower level programming problem is a strongly convex programming problem with linear constraints. This algorithm is based on a trust region algorithm for non-smooth unconstrained optimization problems, and its global convergence is also proved.}} @article{LiuNoce89, author = {D. Liu and J. Nocedal}, title = {On the limited memory {BFGS} method for large scale optimization}, journal = MPB, volume = 45, number = 3, pages = {503--528}, year = 1989} @misc{LiuYuan98, author = {X. Liu and Y. Yuan}, title = {A robust trust-region algorithm for solving general nonlinear programming problems}, howpublished = {Presentation at the International Conference on Nonlinear Programming and Variational Inequalities, Hong Kong}, year = 1998, abstract = {The trust-region approach has been extended to solving nonlinear constrained optimization. Most of these extensions consider only equality constraints and require strong global regularity assumptions. In this report, a trust-region algorithm for solving general nonlinear programming is presented, which solves a trust-region subproblem and a quadratic programming trust-region subproblem at each iteration. The algorithm is similar to the methods presented by \citebb{Burk92} and \citebb{Yuan95}. For the equality constrained case, our algorithm is similar to the methods of \citebb{DennElAlMaci97} and \citebb{DennVice97}. A new penalty parameter updating procedure is introduced. Under very milds conditions, the global convergence results are proved. Some local convergence results are also proved. The preliminary numerical results show that our algorithm is comparable to {\tt VF02AD}.}, summary = {A trust-region methods for general constrained optimization is discussed, that uses a composite step technique. The normal step is obtained by solving an ordinary trust-region subproblem, while the tangential step results from a trust-region constrained quadratic program. The method has similarities with those of \citebb{Burk92}, \citebb{Yuan95}, \citebb{DennElAlMaci97} and \citebb{DennVice97}, but features a new updating rule for the penalty parameter. It is globally convergent. Numerical results indicate that its efficiency is comparable to that of {\tt VF02AD}.}} @article{LiSwet93, author = {W. Li and J. Swetits}, title = {A {N}ewton Method for Convex Regression, Data Smoothing, and Quadratic Programming with Bounded Constraints}, journal = SIOPT, volume = 3, number = 3, pages = {466--488}, year = 1993} @article{Loot69, author = {F. A. Lootsma}, title = {Hessian matrices of penalty functions for solving constrained optimization problems}, journal = {Philips Research Reports}, volume = 24, pages = {322--331}, year = 1969} @article{Lots84, author = {P. L\"{o}tstedt}, title = {Solving the minimal least squares problem subject to bounds on the variables}, journal = BIT, volume = 24, pages = {206--224}, year = 1984} @article{LuciGuoWang93, author = {A. Lucia and X. Z. Guo and X. Wang}, title = {Process Simulation in the complex-domain}, journal = {Aiche Journal}, volume = 39, number = 3, pages = {461--470}, year = 1993, abstract = {The asymptotic behavior of fixed-point methods in the complex domain is studied. Both direct substitution and Newton's method exhibit stable periodic and aperiodic behavior from real- or complex-valued starting points. Moreover, multiple stable periodic orbits can exist for direct substitution. Traditional trust region (or dogleg) methods, on the other hand, often terminate at singular points, which correspond to nonzero-valued saddlepoints in the least-squares function that can be arbitrarily far from a solution. Furthermore, the basins of attraction of these singular points are usually dispersed throughout the basin boundaries in the complex domain, clearly illustrating that singular points (via the dogleg strategy) also attract either real- or complex-valued starting points. In light of this, an extension of the dogleg strategy to the complex domain, based on a simple norm-reducing, singular point perturbation, is proposed. This extended trust region method removes all forms of nonconvergent behavior and always terminates at a fixed point, even from critical point (worst-case) initial values. Many numerical results and geometric illustrations using chemical process simulation examples are presented.}, summary = {The asymptotic behaviour of fixed-point methods in the complex domain is studied. Both direct substitution and Newton's method exhibit stable periodic and aperiodic behaviour from real- or complex-valued starting points. Moreover, multiple stable periodic orbits can exist for direct substitution. Traditional trust-region (or dogleg) methods, on the other hand, often terminate at singular points, which correspond to non-zero-valued saddlepoints in the least-squares function that can be arbitrarily far from a solution. Furthermore, the basins of attraction of these singular points are usually dispersed throughout the basin boundaries in the complex domain, clearly illustrating that singular points (via the dogleg strategy) also attract either real- or complex-valued starting points. In this light, an extension of the dogleg strategy to the complex domain, based on a simple norm-reducing, singular point perturbation, is proposed. This extended trust-region method always terminates at a fixed point, even from critical point (worst-case) initial values. Numerical results and geometric illustrations using chemical process simulation examples are presented.}} @article{LuciXu90, author = {A. Lucia and J. Xu}, title = {Chemical process optimization using {N}ewton-like methods}, journal = {Computers and Chemical Engineering}, volume = 14, number = 2, pages = {119--138}, year = 1990, abstract = {Various interrelated issues that effect the reliability and efficiency of Newton-like methods for chemical process optimization are studied. An algorithm for solving large, sparse quadratic programming (QP) problems that is based on an active set strategy and a symmetric, indefinite factorization is presented. The QP algorithm is fast and reliable. A simple asymmetric trust region method is proposed for improving the reliability of successive QP methods. Ill-defined QP subproblems are avoided by adjusting the size of the trust region in an automatic way. Finally, it is shown that reliable initial values of the unknown variables and multipliers can be generated automatically using generic problem information, short-cut techniques and simulation tools. Many relevant numerical results and illustrations are presented.}, summary = {An algorithm for solving large, sparse quadratic programming (QP) problems that is based on an active set strategy and a symmetric, indefinite factorization is presented. A simple asymmetric trust-region method is proposed for improving the reliability of successive QP methods. Ill-defined QP subproblems are avoided by adjusting the size of the trust region in an automatic way. Finally, it is shown that reliable initial values of the unknown variables and multipliers can be generated automatically using generic problem information, short-cut techniques and simulation tools. Relevant numerical results and illustrations are presented.}} @article{LuciXu94, author = {A. Lucia and J. Xu}, title = {Methods of successive quadratic programming}, journal = {Computers and Chemical Engineering}, volume = 18, pages = {S211-215}, year = 1994, abstract = {The occurrence of nondescent directions in successive quadratic programming is studied. It is shown that simple chemical process examples can be constructed that exhibit nondescent as a consequence of the projected indefiniteness of the Hessian matrix of the Lagrangian function. Moreover, in situations where multiple Kuhn-Tucker points for the quadratic programming sub-problems exist, the global optimum need not necessarily provide a direction of descent. Thus search for a global solution is unjustified. To circumvent these difficulties, a linear programming-based trust region method is proposed to guarantee descent for any arbitrary merit function, provided such a direction exists. Geometric illustrations are used to elucidate the main ideas.}, summary = {Simple chemical process examples are constructed that exhibit non-descent in successive quadratic programming as a consequence of the projected indefiniteness of the Hessian matrix of the Lagrangian function. Moreover, in situations where multiple Kuhn-Tucker points for the quadratic programming sub-problems exist, the global optimum need not necessarily provide a direction of descent. Thus search for a global solution is unjustified. To circumvent these difficulties, a linear programming-based trust region method is proposed to guarantee descent for any arbitrary merit function, provided such a direction exists. Geometric illustrations are used to elucidate the main ideas.}} @article{Luci92, author = {S. Lucidi}, title = {New Results on a Continuously Differentiable Exact Penalty Function}, journal = SIOPT, volume = 2, number = 4, pages = {558--574}, year = 1992} @techreport{LuciPalaRoma94, author = {S. Lucidi and L. Palagi and M. Roma}, title = {Quadratic programs with a quadratic constraint: characterisation of {KKT} points and equivalence with an unconstrained problem}, institution = {University of Rome "La Sapienza"}, address = {Rome}, type = {Technical Report}, number = {24-94}, year = 1994, abstract = {In this paper, we consider the problem of minimizing a quadratic function with a quadratic constraint. We point out some new properties of the problem. In particular, in the first part of the paper, we show that (i) the number of values of the objective function at KKT points is bounded by $3n+1$ where $n$ is the dimension of the problem; (ii) given a KKT point that is not global minimizer, it is easy to find a ``better'' feasible point; (iii) strict complementarity holds at the local-nonglobal minimum point. In the second part, we show that the original constrained problem is equivalent to the unconstrained minimization of a piecewise quartic merit function. Using the unconstrained formulation, we give, in the nonconvex case, a new second order necessary condition for global minimum points. A possible algorithmic application of the preceding results is briefly outlined.}, summary = {The technical report associated with \citebb{LuciPalaRoma98}, but containing more technical detail.}} @article{LuciPalaRoma98, author = {S. Lucidi and L. Palagi and M. Roma}, title = {On some properties of quadratic programs with a convex quadratic constraint}, journal = SIOPT, volume = 8, number = 1, pages = {105--123}, year = 1998, abstract = {In this paper, we consider the problem of minimizing a (possibly nonconvex) quadratic function with a quadratic constraint. We point out some new properties of the problem. In particular, in the first part of the paper, we show that (i) given a KKT point that is not global minimizer, it is easy to find a ``better'' feasible point; (ii) strict complementarity holds at the local-nonglobal minimum point. In the second part, we show that the original constrained problem is equivalent to the unconstrained minimization of a piecewise quartic merit function. Using the unconstrained formulation, we give, in the nonconvex case, a new second order necessary condition for global minimum points. In the third part, algorithmic applications of the preceding results are briefly outlined, and some preliminary numerical experiments are reported.}, summary = {The problem of minimizing a non-convex quadratic function with a quadratic constraint is considered, and properties of the problem identified. In particular, (i) given a KKT point that is not global minimizer, it is easy to find a ``better'' feasible point; and (ii) strict complementarity holds at the local-non-global minimum point. It is also shown that the original constrained problem is equivalent to the unconstrained minimization of a piecewise quartic merit function. Using this formulation, a second order necessary condition for global minimum points is given in the non-convex case. Algorithmic applications are outlined, and preliminary numerical experiments reported.}} @article{LuciXuLayn96, author = {A. Lucia and J. Xu and K. M. Layn}, title = {Nonconvex Process Optimization}, journal = {Computers and Chemical Engineering}, volume = 20, number = 12, pages = {1375--1398}, year = 1996, abstract = {Difficulties associated with nonconvexity in successive quadratic programming (SPQ) methods are studied. It is shown that projected indefiniteness of the Hessian matrix of the Lagrangian function can (i) place restrictions on the order in which inequalities can be added or deleted from the active set, (ii) generate redundant active sets whose resolution is nontrivial, (iii) give rise to quadratic programming (QP) subproblems that have multiple Kuhn-Tucker points, and (iv) produce nondescent directions in the SQP method that can lead to failure. Related issues concerned with the use of feasible or infeasible starting points for the iterative quadratic programs, forcing positive definiteness to ensure convexity and using iterative methods to solve the linear Kuhn-Tucker conditions associated with the QP subproblems are also studied. A new active set strategy that (i) monitors projected indefiniteness to guide the addition of constraints to the active set, (ii) permits line searching for negative values of the line search parameter, and (iii) does not necessarily delete active constraints with incorrect Kuhn-Tucker multipliers is proposed. Constraint redundancy is circumvented using an algorithm that identifies all nontrivial redundant subsets of smallest size and determines which, if any, exchanges are justified. Nondescent in the NLP's is resolved using a linear programming (LP)-based trust region method that guarantees descent regardless of merit function. It is also shown that there is no justification for using feasible starting points at the QP level of the calculations, that forcing positive definiteness to ensure convexity can cause termination at undesired solutions, and that the use of iterative methods to solve the linear Kuhn-Tucker equations for the QP's can cause a deterioration in numerical performance. Many small chemical process examples are used to highlight difficulties so that geometric illustrations can be used while heat exchange network design and distillation operations examples are used to show that these same difficulties carry over the larger problems.}, summary = {Difficulties associated with non-convexity in successive quadratic programming (SPQ) methods are studied. It is shown that projected indefiniteness of the Hessian matrix of the Lagrangian function can (i) place restrictions on the order in which inequalities can be added or deleted from the active set, (ii) generate redundant active sets whose resolution is non-trivial, (iii) give rise to quadratic programming (QP) subproblems that have multiple Kuhn-Tucker points, and (iv) produce non-descent directions in the SQP method that can lead to failure. Related issues concerned with the use of feasible or infeasible starting points for the iterative quadratic programs, forcing positive definiteness to ensure convexity and using iterative methods to solve the linear Kuhn-Tucker conditions associated with the QP subproblems are also studied. An active set strategy that (i) monitors projected indefiniteness to guide the addition of constraints to the active set, (ii) permits line searching for negative values of the linesearch parameter, and (iii) does not necessarily delete active constraints with incorrect Kuhn-Tucker multipliers is proposed. Constraint redundancy is circumvented using an algorithm that identifies all non-trivial redundant subsets of smallest size and determines which, if any, exchanges are justified. Non-descent in the NLP's is resolved using a linear programming (LP)-based trust-region method that guarantees descent regardless of merit function. It is also shown that there is no justification for using feasible starting points at the QP level of the calculations, that forcing positive definiteness to ensure convexity can cause termination at undesired solutions, and that the use of iterative methods to solve the linear Kuhn-Tucker equations for the QP's can cause a deterioration in numerical performance. Many small chemical process examples are used to highlight difficulties so that geometric illustrations can be used while heat exchange network design and distillation operations examples are used to show that these same difficulties carry over the larger problems.}} @book{Luen69, author = {D. G. Luenberger}, title = {Optimization by Vector Space Methods}, publisher = WILEY, address = WILEY-ADDRESS, year = 1969} @book{Luen84, author = {D. G. Luenberger}, title = {Linear and Nonlinear Programming}, publisher = ADW, address = ADW-ADDRESS, edition = {second}, year = 1984} @article{Luks93, author = {L. Luk\v{s}an}, title = {Inexact Trust Region Method for Large Sparse Nonlinear Least-Squares}, journal = {Kybernetica}, volume = 29, number = 4, pages = {305--324}, year = 1993, abstract = {The main purpose of this paper is to show that linear least squares methods based on bidiagonalization, namely the LSQR algorithm, can be used for generation of trust region path. This property is a basis for an inexact trust region method which uses the LSQR algorithm for direction determination. This method is very efficient for large sparse nonlinear least squares as it is supported by numerical experiments.}, summary = {It is shown that linear least-squares methods based on the LSQR algorithm can be used for generation of trust-region path. This property is a basis for an inexact trust-region method. Numerical experiments suggest that this method is efficient for large sparse nonlinear least-squares.}} @article{Luks94, author = {L. Luk\v{s}an}, title = {Inexact Trust Region Method for Large Sparse Systems of Nonlinear Equations}, journal = {Journal of Optimization Theory and Applications}, volume = 81, number = 3, pages = {569--590}, year = 1994, abstract = {The main purpose of this paper is to prove the global convergence of the new trust region method based on the smoothed CGS algorithm. This method is surprisingly convenient for the numerical solution of large sparse systems of nonlinear equations, as is demonstrated by numerical experiments. A modification of the proposed trust region method does not use matrices, so it can be used for large dense systems of nonlinear equations.}, summary = {The global convergence of the a trust-region method based on the smoothed CGS algorithm is proved. Numerical experiments indicate that the method is surprisingly convenient for the numerical solution of large sparse systems of nonlinear equations. A modification of the method does not use matrices, and can be used for large dense systems of nonlinear equations.}} @article{Luks96a, author = {L. Luk\v{s}an}, title = {Hybrid methods for large sparse nonlinear least-squares}, journal = JOTA, volume = 89, number = 3, pages = {575--595}, year = 1996, abstract = {Hybrid methods are developed for improving the Gauss-Newton method in the case of large residual or ill-conditioned nonlinear least-square problems. These methods are used usually in a form suitable for dense problems. But some standard approaches are unsuitable, and some new possibilities appear in the sparse case. We propose efficient hybrid methods for various representations of the sparse problems. After describing the basic ideas that help deriving new hybrid methods, we are concerned with designing hybrid methods for sparse Jacobian and sparse Hessian representations of the least-square problems. The efficiency of hybrid methods is demonstrated by extensive numerical experiments.}, summary = {Hybrid methods are developed for improving the Gauss-Newton method in the case of large residual or ill-conditioned nonlinear least-square problems. Hybrid methods for sparse Jacobian and sparse Hessian representations of the least-square problems are considered. The efficiency of these methods is illustrated by extensive numerical experiments. }} @article{Luks96b, author = {L. Luk\v{s}an}, title = {Combined Trust Region Methods for Nonlinear Least-Squares}, journal = {Kybernetica}, volume = 32, number = 2, pages = {121--138}, year = 1996, abstract = {Trust region realizations of the Gauss-Newton method are commonly used for obtaining solution of nonlinear least squares problems. We propose three efficient algorithms which improve standard trust region techniques : multiple dog-leg strategy for dense problems and two combined conjugate gradient Lanczos strategies for sparse problem. Efficiency of these methods is demonstrated by extensive numerical experiments.}, summary = {Trust-region realizations of the Gauss-Newton method are commonly used for obtaining solution of nonlinear least-squares problems. Three algorithms which improve standard trust-region techniques are proposed, comprising a multiple dog-leg strategy for dense problems and two combined conjugate-gradient Lanczos strategies for sparse problem. Efficiency of these methods is illustrated by extensive numerical experiments.}} @article{Luks96c, author = {L. Luk\v{s}an}, title = {Efficient Trust Region Method for nonlinear least-squares}, journal = {Kybernetica}, volume = 32, number = 2, pages = {105--120}, year = 1996, abstract = {The main purpose of this paper is to show that suitable transformations and decompositions lead to an efficient trust region method that uses one decomposition in each iteration only. Convergence properties of the trust region method with optimal locally constrained step (OLCS) that uses more than one decomposition in each iteration and, therefore, that needs a longer time for obtaining results. This fact is demonstrated by numerical experiments.}, summary = {Suitable transformations and decompositions lead to an efficient trust-region method that uses a single factorization at each iteration. This is compared to the optimal locally constrained step that uses more than one decomposition per iteration. Numerical experiments suggest that the former approach is more efficient.}} @article{LuksVlce96, author = {L. Luk\v{s}an and J. Vl\v{c}ek}, title = {Optimization of dynamical-systems}, journal = {Kybernetica}, volume = 32, number = 5, pages = {465--482}, year = 1996, abstract = {Consider an optimization problem where the objective function is an integral containing the solution of a system of ordinary differential equations. Suppose we have efficient optimization methods available as well as efficient methods for initial value problems for ordinary differential equations. The main purpose of this paper is to show how these methods can be efficiently applied to a considered problem. First, the general procedures for the evaluation of gradients and Hessian matrices are described. Furthermore, the new efficient Gauss-Newton-like approximation of the Hessian matrix is derived for the special case when the objective function is an integral of squares. This approximation is used for deriving the Gauss-Newton-like trust region method, with which global and superlinear convergence properties are proved. Finally several optimization methods are proposed and computational experiments illustrating their efficiency are shown.}, summary = {Optimization problems where the objective function is an integral containing the solution of a system of ordinary differential equations are considered. It is shown that optimization methods and methods for initial value problems for ordinary differential equations can be efficiently combined. General procedures for the evaluation of gradients and Hessian matrices are described. An efficient Gauss-Newton-like approximation of the Hessian matrix is derived for the special case when the objective function is an integral of squares. This approximation is used to derive a Gauss-Newton-like trust-region method, for which global and superlinear convergence properties are proved. Finally several methods are proposed and illustrated by computational experiments.}} @article{LuksVlce97, author = {L. Luk\v{s}an and J. Vl\v{c}ek}, title = {Truncated trust region methods based on preconditioned iterative subalgorithms for large sparse systems of nonlinear equations}, journal = JOTA, volume = 95, number = 3, pages = {637--658}, year = 1997, abstract = {This paper is devoted to globally convergent methods for solving large sparse systems of nonlinear equations with an inexact approximation of the Jacobian matrix. These methods include difference versions of the Newton method and various quasi-Newton methods. We propose a class of trust region methods together with a proof of their global convergence and describe an implementable globally convergent algorithm which can be used as a realization of these methods. Considerable attention is concentrated on the application of conjugate gradient-type iterative methods to the solution of linear subproblems. We prove that both the GMRES and the smoothed CGS well-preconditioned methods can be used for the construction of globally convergent trust region methods. The efficiency of our algorithm is demonstrated computationally by using a large collection of sparse test problems.}, summary = {Globally convergent methods for solving large sparse systems of nonlinear equations with an inexact approximation of the Jacobian matrix are studied. These methods include difference versions of the Newton method and various quasi-Newton methods. A class of trust region methods is proposed together with a proof of their global convergence and an implementable globally convergent algorithm described which can be used as a realization of these methods. Emphasis is put on the application of conjugate gradient-type iterative methods to the solution of linear subproblems. We prove that both the GMRES and the smoothed CGS well-preconditioned methods can be used for the construction of globally convergent trust region methods. The efficiency of our algorithm is demonstrated computationally by using a large collection of sparse test problems.}} @article{LuoTsen93, author = {Z. Q. Luo and P. Tseng}, title = {Error bounds and convergence analysis of feasible direction methods: a general approach}, journal = AOR, volume = 46, pages = {157--178}, year = 1993} @inproceedings{LuoTsen97, author = {Z. Q. Luo and P. Tseng}, title = {A New Class of Merit Functions for the Nonlinear Complemetarity Problem}, crossref = {FerrPang97}, pages = {204--225}} @article{LyleSzul94, author = {S. Lyle and M. Szularz}, title = {Local Minima of the Trust Region Problem}, journal = JOTA, volume = 80, number = 1, pages = {117--134}, year = 1994, abstract = {We consider the minimization of a quadratic form $z^TVz + 2z^Tq$ subject to the two-norm constraint parallel to $||z||=\alpha$. The problem received considerable attention in the literature, notably due to its applications to a class of trust region methods in nonlinear optimization. While the previous studies were concerned with just the global minimum of the problem, we investigate the existence of all local minima. The problem is approached via the dual Lagrangian, and the necessary and sufficient conditions for the existence of all local minima are derived. We also examine the suitability of the conventional numerical techniques used to solve the problem to a class of single-instruction multiple-data computers known as processor arrays (in our case, AMT DAP 610). Simultaneously, we introduce certain hardware-oriented multisection algorithms, showing their efficiency in the case of small to medium size problems.}, summary = {The minimization of a quadratic form subject to the two-norm constraint is considered. The existence of local minima is investigated. The problem is approached via the dual Lagrangian, and necessary and sufficient conditions for the existence of all local minima are derived. The suitability of the conventional numerical techniques used to solve the problem on processor arrays is examined. Hardware-oriented multisection algorithms are considered, and their efficiency demonstrated on small to medium size problems.}} %%% M %%% @article{Maan87, author = {Z. A. Maany}, title = {A new algorithm for highly curved constrained optimization}, journal = MPS, volume = 31, pages = {139--154}, year = 1987, abstract = {This paper describes a new algorithm for highly curved constrained optimisation. The algorithm under discussion makes use of the second derivatives of both the objective function and constraints. At every iteration a subproblem based on the second order approximation of the objective and constraints functions is solved. Three strategies to solve the subproblem are used. Some computational results are given. Although the performance of the subroutine is very promising a number of areas are still under development and further improvement is expected.}, summary = {An algorithm for highly curved constrained optimisation is considered, which makes use of the second derivatives of both the objective function and constraints. At every iteration a subproblem based on the second order approximation of the objective and constraints functions is solved. Three strategies to solve the subproblem are used. Some computational results are given.}} @article{Mads75, author = {K. Madsen}, title = {An algorithm for the minimax solution of overdetermined systems of nonlinear equations}, journal = JIMA, volume = 16, number = 3, pages = {321--328}, year = 1975, abstract = {The problem of minimising the maximum residual of a system of non-linear equations is studied in the case where the number of equations is larger than the number of unknowns. It is supposed that the functions defining the problem have continuous first derivatives and the algorithm is based on successive linear approximations to these functions. The resulting linear systems are solved in the minimax sense, subject to bounds on the solutions, the bounds being adjusted automatically, depending on the goodness of the linear approximations. It is proved that the method always has sure convergence properties. Some numerical examples are given.}, summary = {A method for nonlinear minimax in which linear models are considered subject to an $\ell_{\infty}$ trust region.}} @article{MadyAazh94, author = {R. K. Madyastha and B. Aazhang}, title = {An algorithm for training multilayer perceptrons for data classification and function interpolation}, journal = {IEEE Transactions on Circuits and Systems I: Fundamental Theory and Applications}, volume = 41, number = 12, pages = {866--875}, year = 1994, abstract = {This paper addresses the issue of employing a parametric class of nonlinear models to describe nonlinear systems. This model class consists of a subclass of artificial neural networks, multilayer perceptrons. Specifically, we discuss the application of a "globally" convergent optimization scheme to the training of the multilayer perceptron. The algorithm discussed is termed the conjugate gradients-trust regions algorithm (CGTR) and combines the merits of two well known "global" algorithms-the conjugate gradients and the trust region algorithms. In this paper we investigate the potential of the multilayer perceptron, trained using the CGTR algorithm, towards function approximation in two diverse scenarios: i) signal classification in a multiuser communication system, and ii) approximating the inverse kinematics of a robotic manipulator. Until recently, the most widely used training algorithm has been the backpropagation algorithm, which is based on the linearly convergent steepest descent algorithm. It is seen that the multilayer perceptron trained with the CGTR algorithm is able to approximate the desired functions to a greater accuracy than when trained using backpropagation. Specifically, in the case of the multiuser communication problem, we obtain lower probabilities of error in demodulating a given user's signal and in the robotics problem, we observe lower root mean square errors in approximating the inverse kinematics function.}, summary = {The application of a globally convergent optimization scheme to the training of the multilayer perceptron is discussed. The algorithm combines the conjugate-gradients and the trust-region algorithms. The potential of the multilayer perceptron, trained using the algorithm, is considered in signal classification in a multiuser communication system, and in approximating the inverse kinematics of a robotic manipulator. It is seen that the multilayer perceptron trained with the trust-region algorithm is able to approximate the desired functions to a greater accuracy than when trained using backpropagation. Specifically, in the case of the multiuser communication problem, lower probabilities of error in demodulating a given user's signal, and, in the robotics problem, lower root mean square errors in approximating the inverse kinematics function, are obtained.}} @inproceedings{Mall97, author = {M. K. Mallick}, title = {Applications of Nonlinear Orthogonal Distance Regression in 3D Motion Estimation}, crossref = {VanH97}, pages = {273--282}} @book{Mang69, author = {O. L. Mangasarian}, title = {Nonlinear Programming}, publisher = MACGH, address = MACGH-ADDRESS, year = 1969, note = {Reprinted as \emph{Classics in Applied Mathematics 10}, SIAM, Philadelphia, USA, 1994}} @article{Mang80, author = {O. L. Mangasarian}, title = {Locally unique solutions of quadratic programs, linear and non-linear complementarity problems}, journal = MP, volume = 19, number = 2, pages = {200--212}, year = 1980} @article{MangFrom67, author = {O. L. Mangasarian and S. Fromovitz}, title = {The {F}ritz {J}ohn necessary optimality conditions in the presence of equality and inequality constraints}, journal = {Journal of Mathematical Analysis and Applications}, volume = 17, pages = {37--47}, year = 1967} @article{MangSolo93, author = {O. L. Mangasarian and M. V. Solodov}, title = {Nonlinear complementarity as unconstrained and constrained minimization}, journal = MPB, volume = 62, number = 2, pages = {277--297}, year = 1993} @phdthesis{Mara78, author = {N. Maratos}, title = {Exact penalty function algorithms for finite-dimensional and control optimization problems}, school = {University of London}, address = {London, England}, year = 1978} @article{Marq63, author = {D. Marquardt}, title = {An Algorithm For Least-Squares Estimation Of Nonlinear Parameters}, journal = {SIAM Journal on Applied Mathematics}, volume = 11, pages = {431--441}, year = 1963, summary = {Taylor-series and steepest-descent methods are sometimes ineffective as algorithms for the least-squares estimation of nonlinear parameters. A maximum neighbourhood method is developed which, in effect, performs an optimum interpolation between Taylor-series and steepest-descent methods. The interpolation is based upon the maximum neighbourhood in which the truncated Taylor series gives an adequate representation of the nonlinear model.}} @article{Mart70, author = {B. Martinet}, title = {R\'{e}gularisation d'in\'{e}quations variationnelles par approximations successives}, journal = {Revue Fran\c{c}aise d'Informatique et de Recherche Op\'{e}rationnelle}, volume = 4, pages = {154--159}, year = 1970} @article{Mart87, author = {J. M. Mart\'{\i}nez}, title = {An algorithm for solving sparse nonlinear least squares problems}, journal = {Computing}, volume = 39, number = 4, pages = {307--325}, year = 1997, abstract = {We introduce a new method for solving Nonlinear Least Squares problems when the Jacobian matrix of the system is large and sparse. The main features of the new method are the following: 1) the Gauss-Newton equation is ``partially'' solved at each iteration using a preconditioned Conjugate Gradient algorithm, 2) the new point is obtained using a two-dimensional trust region scheme, similar to the one introduced by \citebb{BultVial87}. We prove global convergence results and we present some numerical results.}, summary = {A method is given for solving nonlinear least-squares problems, when the Jacobian matrix of the system is large and sparse. The main features of the method are that the Gauss-Newton equation is ``partially'' solved at each iteration using a preconditioned conjugate-gradient algorithm, and that the new point is obtained using a two-dimensional trust region scheme, similar to the one introduced by \citebb{BultVial87}. Global convergence results and numerical results are presented.}} @article{Mart94, author = {J. M. Mart\'{\i}nez}, title = {Local minimizers of quadratic functions on {E}uclidean balls and spheres}, journal = SIOPT, volume = 4, number = 1, pages = {159--176}, year = 1994, abstract = {In this paper a characterization of the local-nonglobal minimizer of a quadratic function defined on a Euclidean ball or sphere is given. It is proven that there exists at most one local-nonglobal minimizer and that the Lagrange multiplier that corresponds to this minimizer is the largest solution of a nonlinear scalar equation. An algorithm is proposed for computing the local-nonglobal minimizer.}, summary = {A characterization of the local-non-global minimizers of a quadratic function defined on a Euclidean ball or sphere is given. It is proven that there exists at most one local-non-global minimizer and that the Lagrange multiplier that corresponds to this minimizer is the largest solution of a nonlinear scalar equation. An algorithm is proposed for computing the local-non-global minimizer.}} @article{Mart95, author = {J. M. Mart\'{\i}nez}, title = {Discrimination by Means of a Trust Region Method}, journal = {International Journal of Computer Mathematics}, volume = 55, number = {1--2}, pages = {91--103}, year = 1995, abstract = {Suppose that the individuals of a population are divided into two groups according to some unknown merit criterion. It is required to determine weights for a set of variables which should be positively correlated with merit, in such a way that scores of the individuals in the superior group are above some level, and vice-versa. This may be modelled as an easily-solvable convex optimization problem. Examples are given.}, summary = {The individuals of a population are divided in two groups according to some unknown merit criterion and the problem is considered to determine weights for a set of variables which should be positively correlated with merit, in such a way that scores of the individuals in the superior group are above some level, and vice-versa. This situation is modeled as an easy convex optimization problem.}} @article{MartSant97, author = {J. M. Mart\'{\i}nez and S. A. Santos}, title = {New convergence results on an algorithm for norm constrained regularization and related problems}, journal = RAIRO-OR, volume = 31, number = 3, pages = {269--294}, year = 1997, abstract = {The constrained least-squares regularization of nonlinear ill-posed problems is a nonlinear programming problem for which trust-region methods have been developed. In this paper we complement the convergence theory of one of those methods showing that, under suitable hypotheses, local (superlinear or quadratic) convergence holds and every accumulation point is second-order stationary.}, summary = {The constrained least-squares regularization of nonlinear ill-posed problems is a nonlinear programming problem for which trust-region methods have been developed. It is shown that for one such method, under suitable hypotheses, local (superlinear or quadratic) convergence occurs and every accumulation point is second-order stationary.}} @article{MartMore97, author = {J. M. Mart\'{\i}nez and A. C. Moretti}, title = {A trust region method for minimization of nonsmooth functions with linear constraints}, journal = MP, volume = 76, number = 3, pages = {431--449}, year = 1997, abstract = {We introduce a trust region algorithm for minimization of nonsmooth functions with linear constraints. At each iteration, the objective function is approximated by a model function that satisfies a set of assumptions stated recently by \citebb{QiSun94} in the context of unconstrained nonsmooth optimization. The trust region iteration begins with the resolution of an ``easy problem'', as in the recent works of \citebb{MartSant95} and \citebb{FrieMartSant94}, for smooth constrained optimization. In practical implementations we use the infinity norm for defining the trust-region, which fits well with the domain of the problem. We prove global convergence and report numerical experiments related to the parameter estimation problem.}, summary = {A trust-region algorithm for minimization of non-smooth functions with linear constraints is introduced. At each iteration, the objective function is approximated by a model that satisfies assumptions stated by \citebb{QiSun94} for unconstrained non-smooth optimization. The trust-region iteration begins with the solution of an ``easy problem'', as in \citebb{MartSant95} and \citebb{FrieMartSant94}. In practical implementations, the infinity norm is used to define the trust region. Global convergence is established, and numerical experiments for the parameter estimation problem reported.}} @article{MartSant95, author = {J. M. Mart\'{\i}nez and S. A. Santos}, title = {A trust-region strategy for minimization on arbitrary domains}, journal = MP, volume = 68, number = 3, pages = {267--301}, year = 1995, abstract = {We present a trust-region method for minimizing a general differentiable function restricted to an arbitrary closed set. We prove a global convergence theorem. The trust-region method defines difficult subproblems that are solvable in some particular cases. We analyze in detail the case where the domain is a Euclidean ball. For this case we present numerical experiments where we consider different Hessian approximations.}, summary = {A trust-region method for minimizing a general differentiable function restricted to an arbitrary closed set is presented, and global convergence is proved. The case where the domain is a Euclidean ball is analysed in detail. For this case, numerical experiments which consider a variety of Hessian approximations are presented.}} @article{MaurZowe79, author = {H. Maurer and J. Zowe}, title = {First and second-order necessary and sufficient optimality conditions for infinite-dimensional programming problems}, journal = MP, volume = 16, pages = {98--110}, year = 1979} @article{MaurMacu97, author = {D. Mauricio and N. Maculan}, title = {A trust region method for zero-one nonlinear programming}, journal = RAIRO-OR, volume = 31, number = 4, pages = {331--341}, year = 1997, abstract = {An $O(n \log n)$ trust region approximation method to solve $0-1$ non-linear programming is presented. Optimality conditions and numerical results are reported.}, summary = {An $O(n \log n)$ trust region approximation method to solve $0-1$ nonlinear programming is presented. Optimality conditions and numerical results are reported.}} @article{MaynMara79, author = {D. Q. Mayne and N. Maratos}, title = {A first-order, exact penalty function algorithm for equality constrained optimization problems}, journal = MP, volume = 16, number = 3, pages = {303--324}, year = 1979} @article{MaynPola82, author = {D. Q. Mayne and E. Polak}, title = {A superlinearly convergent algorithm for constrained optimization problems}, journal = MPS, volume = 16, pages = {45--61}, year = 1982} @article{McAfGayHozaLaudSchwSund86, author = {K. B. McAfee and D. M. Gay and R. S. Hozack and R. A. Laudise and G. Schwartz and W. A. Sunder}, title = {Thermodynamic considerations in the synthesis and crystal growth of GaSb}, journal = {Journal of Crystal Growth}, volume = 76, number = 2, pages = {263--271}, year = 1986, abstract = {A newly developed optimization algorithm, the 'sticky trust region technique', for Gibbs energy minimization is used to determine the gaseous species and liquid and solid phases present during the synthesis and crystal growth of GaSb. The growth system involves almost thirty species comprising a gaseous phase and nine condensed species. The system is modelled as a function of temperature, oxygen and hydrogen pressure in the presence of an SiO/sub 2/ crucible. Ga/sub 2/O/sub 3/ is identified as the most stable contaminant compound and is seen as a phase that floats on the liquid melt during growth. This oxide often prevents the growth of high quality crystals.}, summary = {The ``sticky trust region technique'' for Gibbs energy minimization is used to determine the gaseous species and liquid and solid phases present during the synthesis and crystal growth of GaSb. The growth system involves almost thirty species comprising a gaseous phase and nine condensed species. }} @article{McAfGayHozaLaudSund88, author = {K. B. McAfee and D. M. Gay and R. S. Hozack and R. A. Laudise and W. A. Sunder}, title = {Thermodynamic stability and reactivity of {A}l{S}b and their relationship to crystal growth}, journal = {Journal of Crystal Growth}, volume = 88, number = 4, pages = {488--498}, year = 1988, abstract = {The vapor pressure, species concentration and reactivity of AlSb is modeled thermodynamically using the newly-described sticky trust region technique, STRT, for free energy minimization. The conditions chosen include oxygen and hydrogen concentrations appropriate to Czochralski crystal growth in SiO/sub 2/, C, BN, Al/sub 2/O/sub 3/ and BeO crucibles and growth in the absence of a crucible. Results are compared with crystal growth experiments where appropriate. At the melting point the principal vapor species is Sb/sub 2/ with a total vapor pressure of approximately 10/sup -3/ atm, which is about 10/sup 2/ larger than for GaSb.}, summary = {The vapor pressure, species concentration and reactivity of AlSb is modeled thermodynamically using the newly-described sticky trust-region technique, STRT, for free energy minimization. The conditions chosen include oxygen and hydrogen concentrations appropriate to Czochralski crystal growth.}} @article{McCa98, author = {McCartin, B. J.}, title = {A model-trust region algorithm utilizing a quadratic interpolant}, journal = JCAM, volume = 91, number = 2, pages = {249--259}, year = 1998, abstract = {A new model-trust region algorithm for problems in unconstrained optimization and nonlinear equations utilizing a quadratic interpolant for step selection is presented and analyzed. This is offered as an alternative to the piecewise-linear interpolant employed in the widely used ''double dogleg'' step selection strategy. After the new step selection algorithm has been presented, we offer a summary, with proofs, of its desirable mathematical properties. Numerical results illustrating the efficacy of this new approach are presented.}, summary = {A model-trust region algorithm for problems in unconstrained optimization and nonlinear equations utilizing a quadratic interpolant is presented and analyzed. This is offered as an alternative to the piecewise-linear interpolant employed in the double dogleg strategy. A step selection algorithm is presented, along with a summary, with proofs, of its desirable mathematical properties. Numerical results are presented.}} @article{McCo69, author = {G. P. McCormick}, title = {Anti-zig-zagging by bending}, journal = {Management Science}, volume = 15, pages = {315--319}, year = 1969} @techreport{McCo91, author = {G. P. McCormick}, title = {The superlinear convergence of a nonlinear primal-dual algorithm}, institution = {Department of Operations Research, George Washington University, Washington}, type = {Technical Report}, number = {OR T-550/91}, year = 1991} @article{McKeMesiZeni95, author = {M. P. McKenna and J. P. Mesirov and S. A. Zenios}, title = {Data Parallel Quadratic Programming on Box-Constrained Problems}, journal = SIOPT, volume = 5, number = 3, pages = {570--589}, year = 1995} @article{MehrSun91, author = {S. Mehrotra and J. Sun}, title = {A method of analytic centers for quadratically constrained convex quadratic programs}, journal = SINUM, volume = 28, number = {}, pages = {529--544}, year = 1991} @article{MentAnde91, author = {J. Mentel and H. Anderson}, title = {A new Kind of Parameter-Estimation of Reactions Under Dynamic Temperature Program}, journal = {Thermochimica Acta}, volume = 187, number = {SEP}, pages = {121--132}, year = 1991, abstract = {For kinetic evaluation of TG, DTA and DSC of simple and complex reactions we tried to apply a parameter adjustment by means of non- linear optimization (trust-region method with Marquardt-routine). Including all experimental data and using the differential equation systems, we found many advantages over the usual linearizing methods. The problems of consecutive and competing reactions as well as those with steady states (enzyme kinetics) were solved satisfactorily if two or more data sets with different heating rates are known. Supplementary use of other analytical methods are recommendable.}, summary = {A trust-region method is applied a parameter estimation related to kinetic evaluation of TG, DTA and DSC of simple and complex reactions. When all experimental data is included and the differential equation systems used, this proves advantageous compared to usual linearizing methods. The problems of consecutive and competing reactions as well as those with steady states (enzyme kinetics) are solved satisfactorily if two or more data sets with different heating rates are known. Supplementary use of other analytical methods are recommended.}} @article{MentTillMollHabe92, author = {J. Mentel and V. Tiller and E. Moller and D. Haberland}, title = {Estimation of Parameters in Systems of Ordinary Differential-Equations to the Determination of Kinetic-Parameters}, journal = {Chemische Technik}, volume = 44, number = 9, pages = {300--303}, year = 1992, abstract = {The integral determination of kinetic constants in complex systems was handled as a special case of parameter estimation in systems of differential equations. The program for parameter estimation is written in Turbo-Pasca. A Trust-Region method with Levenberg- Marquardt routine was used. Especially with the use of the stable BDF-integration routine it is possible that the start values differ some magnitudes from the solution. Simulations of different models Prove the efficiency of the evaluation also with non-smooth data. In these cases it is not allowed to calculate concentrations needed for the evaluation with the help of stoichiometric relations.}, summary = {The integral determination of kinetic constants in complex systems is handled as a special case of parameter estimation in systems of differential equations. A trust-region method is used. A stable BDF-integration routine allows the use of bad initial values. Simulations of different models prove the efficiency of the method even with non-smooth data. In these cases it is not possible to calculate concentrations needed for the evaluation with the help of stoichiometric relations.}} @article{Miff75, author = {R. Mifflin}, title = {A superlinearly convergent algorithm for minimization without evaluating derivatives}, journal = MP, volume = 9, number = 1, pages = {100--117}, year = 1975} @article{Miff75b, author = {R. Mifflin}, title = {Convergence bounds for nonlinear programming algorithms}, journal = MP, volume = 8, number = 3, pages = {251--271}, year = 1975} @article{MineFukuTana84, author = {H. Mine and M. Fukushima and Y. Tanaka}, title = {On the use of epsilon -most-active constraints in an exact penalty function method for nonlinear optimization}, journal = {IEEE Transactions on Automatic Control}, volume = {AC-29}, number = 11, pages = {1040--1042}, year = {1984}, abstract = {An algorithm for nonlinear programming problems is presented which utilizes the epsilon -most-active constraint strategy in an exact penalty function method with trust region. The algorithm is particularly suitable for problems containing a large number of constraints. The global convergence of the proposed algorithm is proved. The results of limited computational experiments on discretized semi-infinite programming problems are reported to demonstrate the effectiveness of the present approach.}, summary = {An globally convergent algorithm for nonlinear programming problems is presented which utilizes the epsilon -most-active constraint strategy in an exact penalty function method with trust region. The algorithm is particularly suitable for problems containing a large number of constraints. Some computational experiments are reported.}} @article{MongSart95, author = {M. Mongeau and A. Sartenaer}, title = {Automatic decrease of the penalty parameter in exact penalty function methods}, journal = {European Journal of Operational Research}, volume = 83, number = 3, pages = {686--699}, year = 1995} @article{MontAdle89, author = {R. D. C. Monteiro and I. Adler}, title = {Interior path following primal-dual algorithms. 2. {C}onvex quadratic programming}, journal = MP, volume = 44, number = 1, pages = {43--66}, year = 1989} @article{MontWang98, author = {R. D. C. Monteiro and Y. Wang}, title = {Trust region affine scaling algorithms for linearly constrained convex and concave programs}, journal = MP, volume = 80, number = 3, pages = {283--310}, year = 1998, abstract = {We study a trust region affine scaling algorithm for solving the linearly constrained convex or concave programming problem. Under primal nondegeneracy assumption, we prove that every accumulation point of the sequence generated by the algorithm satisfies the first order necessary condition for optimality of the problem. For a special class of convex or concave functions satisfying a certain invariance condition on their Hessians, it is shown that the sequence of iterates and objective function values generated by the algorithm converge R-linearly and Q-linearly, respectively. Moreover, under primal nondegeneracy and for this class of objective functions, it is shown that the limit point of the sequence of iterates satisfies the first and second order necessary conditions for optimality of the problem.}, summary = {A trust region affine scaling algorithm for solving the linearly constrained convex or concave programming problem is presented. Under primal non-degeneracy assumption, every accumulation point of the sequence generated by the algorithm satisfies the first order necessary condition. For a special class of convex or concave functions satisfying a certain invariance condition on their Hessians, the sequence of iterates and objective function values converge R-linearly and Q-linearly, respectively. Moreover, under primal non-degeneracy and for this class of objective functions, the limit point of the sequence of iterates satisfies the first and second-order necessary conditions.}} @article{MontTsuc98, author = {R. D. C. Monteiro and T. Tsuchiya}, title = {Global convergence of the affine scaling algorithm for convex quadratic programming}, journal = SIOPT, volume = 8, number = 1, pages = {26--58}, year = 1998, summary = {A global convergence proof of the second-order affine scaling algorithm for convex quadratic programming problems is given, where the new iterate is the point that minimizes the objective function over the intersection of the feasible region with the ellipsoid centered at the current point and whose radius is a fixed fraction $\beta \in (0,1)]$ of the radius of the largest ``scaled'' ellipsoid inscribed in the nonnegative orthant. The analysis is based on the local Karmarkar potential function introduced by Tsuchiya. For any $\beta \in (0,1)$ and without making any nondegeneracy assumption on the problem, the sequences of primal iterates and dual estimates converge to optimal solutions of the quadratic program and its dual.}} @inproceedings{More78, author = {J. J. Mor\'{e}}, title = {The {L}evenberg-{M}arquardt algorithm: implementation and theory}, crossref = {Wats78}, pages = {105--116}, summary = {A robust implementation of the Levenberg-Morrison-Marquardt algorithm for nonlinear least squares is discussed. The proposed method is shown to have strong convergence properties. In addition to robustness, the main features are the proper use of implicitly scaled variables and the choice of the Levenberg-Morrison-Marquardt parameter via a scheme due to \citebb{Hebd73}. Numericial results illustrating the behaviour of this implementation are presented.}} @inproceedings{More83, author = {J. J. Mor\'{e}}, title = {Recent developments in algorithms and software for trust region methods}, crossref = {BachGrotKort83}, pages = {258--287}, abstract = {Trust region methods are an important class of iterative methods for the solution of systems of nonlinear equations, nonlinear estimation problems and large-scale optimization. Interest in trust region methods derives, in part, from the availability of strong convergence results and from the development of software for these methods which is reliable, efficient, and amazingly free of ad-hoc decisions. In this paper we survey the theoretical and practical results available for trust region methods and discuss the relevance of these results to the implementation of trust region methods}, summary = {The theoretical and practical results available for trust-region methods in systems of nonlinear equations, nonlinear estimation problems and large-scale optimization are surveyed, and their relevance to the implementation of trust-region methods discussed.}} @inproceedings{More88, author = {J. J. Mor\'{e}}, title = {Trust regions and projected gradients}, booktitle = {System Modelling and Optimization}, editor = {M. Iri and K. Yajima}, publisher = SPRINGER, address = SPRINGER-ADDRESS, volume = 113, pages = {1--13}, year = 1988, note = {Lecture Notes in Control and Information Sciences}, abstract = {The numerical solution of large scale linearly constrained problems by algorithms which use the gradient projection method is a promising research area. Algorithms based on the gradient projection method are able to drop and add many constraints at each iteration, and this ability gives them an important advantage in large scale problems. In this paper we show how the ideas from the gradient projection method combine with trust region methods, and we give an indication of the powerful convergence results that are available for algorithms of this type.}, summary = {It is shown how the ideas from the gradient projection method combine with trust-region methods, and an indication of the powerful convergence results that are available for gradient-projection algorithms is given.}} @article{More93, author = {J. J. Mor\'{e}}, title = {Generalizations of the trust region problem}, journal = OMS, volume = 2, number = 3, pages = {189--209}, year = 1993, abstract = {The trust region problem requires the global minimum of a general quadratic function subject to an ellipsoidal constraint. The development of algorithms for the solution of this problem has found applications in nonlinear and combinatorial optimization. In this paper we generalize the trust region problem by allowing a general quadratic constraint. The main results are a characterization of the global minimizer of the generalized trust region problem, and the development of an algorithm that finds an approximate global minimizer in a finite number of iterations.}, summary = {The trust region subproblem is generalized by allowing a general quadratic constraint. The main results are a characterization of the global minimizer of the generalized trust-region problem, and the development of an algorithm that finds an approximate global minimizer in a finite number of iterations.}} @techreport{MoreGarbHill80, author = {J. J. Mor\'{e} and B. S. Garbow and K. E. Hillstrom}, title = {User guide for {MINPACK-1}}, institution = ANL, address = ANL-ADDRESS, number = {80--74}, year = 1980} @article{MoreGarbHill81, author = {J. J. Mor\'{e} and B. S. Garbow and K. E. Hillstrom}, title = {Testing Unconstrained Optimization Software}, journal = TOMS, volume = 7, number = 1, pages = {17--41}, year = 1981} @article{MoreSore83, author = {J. J. Mor\'{e} and D. C. Sorensen}, title = {Computing A Trust Region Step}, journal = SISSC, volume = 4, number = 3, pages = {553--572}, year = 1983, abstract = {We propose an algorithm for the problem of minimizing a quadratic function subject to an ellipsoidal constraint and show that this algorithm is guaranteed to produce a nearly optimal solution in a finite number of iterations. We also consider the use of this algorithm in a trust region Newton's method. In particular, we prove that under reasonable assumptions the sequence generated by Newton's method has a limit point which satisfies the first and second order necessary conditions for a minimizer of the objective function. Numerical results for GQTPAR, which is a Fortran implementation of our algorithm, show that GQTPAR is quite successful in a trust region method. In our tests a call to GQTPAR only required $1.6$ iterations on the average.}, summary = {An algorithm for minimizing a quadratic function subject to an ellipsoidal constraint is proposed. This algorithm is guaranteed to produce a nearly optimal solution in a finite number of iterations. The use of this algorithm in a trust-region Newton's method is also considered. In particular, it is shown that, under reasonable assumptions the sequence generated by Newton's method has a limit point which satisfies first and second order necessary conditions for a minimizer. Numerical results for GQTPAR, a Fortran implementation of the algorithm, show that it is quite successful in a trust-region method. In these tests, a call to GQTPAR only required $1.6$ iterations on the average.}} @inproceedings{MoreSore84, author = {J. J. Mor\'{e} and D. C. Sorensen}, title = {{N}ewton's method}, booktitle = {Studies in Numerical Analysis}, editor = {G. H. Golub}, publisher = AMS, address = AMS-ADDRESS, series = {MAA Studies in Mathematics}, number = 24, pages = {29--82}, year = 1984, abstract = {Newton's method plays a central role in the development of numerical techniques for optimization. In fact, most of the current practical methods for optimization can be viewed as variations on {N}ewton's method. It is therefore important to understand {N}ewton's method as an algorithm in its own right and as a key introduction to the mot recent ideas in this area. One of the aims of this expository paper is to present and analyze two main approaches to {N}ewton's method for unconstrained optimization: the line search approach and the trust region approach. The other aim is to present some of the recent developments in the optimization field which are related to {N}ewton's method. In particular, we explore several variations on {N}ewton's method which are appropriate for large scale problems, and we also show how quasi-{N}ewton methods can be derived quite naturally from {N}ewton's method.}, summary = {The linesearch and trust region approaches for unconstrained optimization are discussed, and some of the recent developments related to Newton's method are presented. In particular, several variations on {N}ewton's method which are appropriate for large-scale problems are explored, and it is shown how quasi-Newton methods can be derived quite naturally from Newton's method.}} @article{MoreTora91, author = {J. J. Mor\'{e} and G. Toraldo}, title = {On the Solution of Large Quadratic Programming Problems with Bound Constraints}, journal = SIOPT, volume = 1, number = 1, pages = {93--113}, year = 1991} @book{MoreWrig93, author = {J. J. Mor\'{e} and S. J. Wright}, title = {Optimization Software Guide}, publisher = SIAM, address = SIAM-ADDRESS, number = 14, series = {Frontiers in Applied Mathematics}, year = 1993} @article{More62, author = {J. J. Moreau}, title = {D\'{e}composition orthogonale d'un espace {H}ilbertien selon deux c\^{o}nes mutuellement polaires}, journal = {Comptes-Rendus de l'Acad\'{e}mie des Sciences (Paris)}, volume = 255, pages = {238--240}, year = 1962} @inproceedings{Morr60, author = {D. D. Morrison}, title = {Methods for nonlinear least squares problems and convergence proofs}, booktitle = {Proceedings of the Seminar on Tracking Programs and Orbit Determination}, editor = {J. Lorell and F. Yagi}, publisher = {Jet Propulsion Laboratory}, address = {Pasadena, USA}, pages = {1--9}, year = 1960, abstract = {The STL tracking programs are designed o compute by least squares the most probable trajectory of a missile, fom observed radar or optical data. These data can be any combination of range, azimuth, elevation, range rate, hour angle and declination, or direction cosines, with respect to a given location. A differential correction method is used, starting from an initial estimate, and the iteration continues until the residuals (observed minus computed values) are either all within specified limits, or until there is no further improvement. Errors above a certain absolute value are automatically eliminated. One program has been prepared primarily for lunar and interplanetary flights. In this, Cowell's method of trajectory computation is used. The partial derivatives used in the least squares solution are found by solving the related variational equations. Here, the trajectory elements, to which the corrections are applied, are the componenets of position and velocity of the missile at a particular point in the trajectory (expressed in speherical coordinates). For earth-satellite tracking, the elliptic elements of the osculating ellipse are used to specify the trajectory. Herricks' variation of elements method is used to compute the trajectory, and the partial derivatives are computed analytically. For these two programs a special least square subroutine has been prepared in which convergence can be assumed by limiting the amount any variable can change in one iteration. The standard deviation of each variable is printed out. After a trajectory has been fitted to a ceratin set of data, additional data can be added without the necessity of reprocessing the original set, a feature which is especially valuable in regard to computing time.}, summary = {Least-squares estimation of missile trajectory is considered in the context of lunar and interplanetary flights as well as earthbound satellite tracking. A least-square subroutine is described in which convergence can be assumed by limiting the amount any variable can change in one iteration. A method is given that allows the computation of a quadratic model of the objective function within a sphere using a single linear system depending on a parameter. Monotonicity of the optimal model value as a function of this parameter is proved.}} @article{MukaPola75, author = {H. Mukai and E. Polak}, title = {A quadratically convergent primal-dual algorithm with global convergence properties for solving optimization problems with inequality constraints}, journal = MP, volume = 9, number = 3, pages = {336--349}, year = 1975} @article{MukaTatsFuku98, author = {K. Mukai and K. Tatsumi and M. Fukushima}, title = {An approximation algorithm for quadratic cost 0-1 mixed integer programming problems}, journal = {Transactions of the Institute of Electronics, Information and Communication Engineers A}, volume = {J81-A}, number = 4, pages = {649--657}, year = 1998, abstract = {In this paper, we focus on the quadratic cost 0-1 mixed integer programming problem. First, we formulate the problem as a two-level programming problem that consists of the lower level continuous quadratic programming problem with 0-1 variables being fixed and the upper level nonlinear 0-1 programming problem. We propose an approximation algorithm for solving the upper level 0-1 programming problem. This algorithm approximately solves a subproblem obtained by linearizing the objective function at a current point. To guarantee the descent property of the generated sequence, we use a trust region technique that adaptively controls a penalty constant in the objective function of the subproblem. To solve subproblems, we apply a Hopfield network with a new transition rule that allows a temporary state transition based on the variable depth method. Some numerical experiments for a location-transportation problem with quadratic costs indicate that the proposed algorithm is practically effective.}, summary = {The quadratic cost 0-1 mixed integer programming problem is formulated as a two-level programming problem that consists of the lower level continuous quadratic programming problem with 0-1 variables being fixed and the upper level nonlinear 0-1 programming problem. An approximation algorithm for solving the upper level 0-1 programming problem is proposed that approximately solves a subproblem obtained by linearizing the objective function at a current point. To guarantee the descent property of the generated sequence, a trust-region technique adaptively controls a penalty constant in the objective function of the subproblem. To solve subproblems, a Hopfield network is applied with a new transition rule that allows a temporary state transition based on the variable depth method. Some numerical experiments for a location-transportation problem with quadratic costs indicate that the proposed algorithm is practically effective.}} @inproceedings{Murr69, author = {W. Murray}, title = {An algorithm for constrained minimization}, crossref = {Flet69}, pages = {189--196}} @article{Murr71, author = {W. Murray}, title = {Analytical expressions for eigenvalues and eigenvectors of the {H}essian matrices of barrier and penalty functions}, journal = JOTA, volume = 7, pages = {189--196}, year = 1971} @techreport{Murr71b, author = {W. Murray}, title = {An algorithm for finding a local minimum of an indefinite quadratic program}, institution = NPL, address = NPL-ADDRESS, number = {NAC 1}, year = 1971} @inproceedings{Murr92, author = {W. Murray}, title = {Ill-conditioning in Barrier Methods}, booktitle = {Advances in numerical partial differential equations and optimization, Proceedings of the Sixth Mexico-United States Workshop}, publisher = SIAM, address = SIAM-ADDRESS, year = 1992} @techreport{MurrWrig78, author = {W. Murray and M. H. Wright}, title = {Project {L}agrangian methods based on the trajectories of penalty and barrier functions}, institution = STANFORD, address = STANFORD-ADDRESS, type = {Technical Report}, number = {SOL78-23}, year = 1978} @article{MurrPrie95, author = {W. Murray and F. J. Prieto}, title = {A Sequential Quadratic Programming Algorithm Using an Incomplete Solution of the Subproblem}, journal = SIOPT, volume = 5, number = 3, pages = {590--640}, year = 1995} @article{MurrWrig82, author = {W. Murray and M. H. Wright}, title = {Computation of the search direction in constrained optimization algorithms}, journal = MPS, volume = 16, number = {MAR}, pages = {62--83}, year = 1982} @book{Murt81, author = {B. A. Murtagh}, title = {Advanced Linear Programming}, publisher = MACGH, address = MACGH-ADDRESS, year = 1981} @article{MurtKaba87, author = {K. G. Murty and S. N. Kabadi}, title = {Some {NP}-complete problems in quadratic and nonlinear programming}, journal = MP, volume = 39, number = 2, pages = {117--129}, year = 1987} %%% N %%% @inproceedings{Nabo87, author = {N. Nabona}, title = {Computational results of {N}ewton's method with search along an approximate hook step curve for unconstrained minimization}, booktitle = {Actas I Seminario Internacional de Investigacion Operativa del Pais Vasco}, editor = {J. P. Vilaplana and L. F. Escudero}, publisher = {Argitaparen Zerbitzua Euskal Herriko Unibersitatea}, address = {Bilbao, Spain}, pages = {21--54}, year = 1987, summary = {Numerical experience is presented for an unconstrained optimization method in which a trust-region step is computed by minimizing the model along a Bezier curve that approximates the trajectory of exact minimizers as a function of the trust-region radius.}} @manual{NAG98, author = {{NAG}}, title = {Fortran Library Mark 18}, organization = {NAG Ltd.}, address = {Oxford, England}, year = 1998} @book{Nagu93, author = {A. Nagurney}, title = {Network Economics: a Variational Inequality Approach}, publisher = KLUWER, address = KLUWER-ADDRESS, series = {Advances in Computational Economics}, year = 1993} @article{Nash84, author = {S. G. Nash}, title = {{N}ewton-type Minimization via the {L}anczos Method}, journal = SINUM, volume = 21, number = 4, year = 1984} @article{Nash85a, author = {S. G. Nash}, title = {Preconditioning of truncated {N}ewton methods}, journal = SISSC, volume = 6, number = 3, pages = {599--618}, year = 1985} @article{NashNoce91, author = {S. G. Nash and J. Nocedal}, title = {A Numerical Study of the Limited Memory {BFGS} Method and the Truncated-{N}ewton Method for Large-Scale Optimization}, journal = SIOPT, volume = 1, number = 3, pages = {358--372}, year = 1991} @article{NashSofe90, author = {S. G. Nash and A. Sofer}, title = {Assessing a search direction within a truncated-{N}ewton method}, journal = {Operations Research Letters}, volume = 9, number = 4, pages = {219--221}, year = 1990} @article{NashSofe93, author = {S. G. Nash and A. Sofer}, title = {A barrier method for large-scale constrained optimization}, journal = ORSAC, volume = 5, number = 1, pages = {40--53}, year = 1993} @techreport{NashSofe98, author = {S. G. Nash and A. Sofer}, title = {Why Extrapolation Helps in Barrier Methods}, institution = {Operations Research and Engineering Department, George Mason University}, address = {Fairfax, USA}, month = {September}, year = 1998} @article{NeldMead65, author = {J. A. Nelder and R. Mead}, title = {A simplex method for function minimization}, journal = COMPJ, volume = 7, pages = {308--313}, year = 1965} @article{NelsPapa98, author = {S. A. Nelson and P. Y. Papalambros}, title = {A modified trust region algorithm for hierarchical NLP}, journal = {Structural Optimization}, volume = 16, number = 1, pages = {19--28}, year = 1998, abstract = {Large-scale optimization problems frequently require the exploitation of structure in order to obtain efficient and reliable solutions. Successful algorithms for general nonlinear programming problems with theoretical underpinnings do not usually accommodate any additional properties of the original algorithm.}, summary = {Modifications are made to a trust-region algorithm to take advantage of the hierarchical structure in large-scale optimization problems without compromising the theoretical properties of the original algorithm.}} @article{NemiSche96, author = {A. Nemirovskii and K. Scheinberg}, title = {Extension of {K}armarkar's algorithm onto convex quadratically constrained quadratic problems}, journal = MPA, volume = 72, number = 3, pages = {273--289}, year = 1996} @book{NestNemi94, author = {Y. Nesterov and A. Nemirovskii}, title = {Interior-Point Polynomial Algorithms in Convex Programming}, publisher = SIAM, address = SIAM-ADDRESS, year = 1994} @book{NobeDani77, author = {B. Noble and J. W. Daniel}, title = {Applied Linear Algebra}, publisher = PH, address = PH-ADDRESS, edition = {second}, year = 1977} @article{NestTodd98, author = {Y. Nesterov and M. J. Todd}, title = {Primal-Dual Interior-Point Methods for Self-Scaled Cones}, journal = SIOPT, volume = 8, number = 2, year = 1998} @article{Noce80, author = {J. Nocedal}, title = {Updating quasi-{N}ewton matrices with limited storage}, journal = MC, volume = 35, pages = {773--782}, year = 1980} @inproceedings{Noce84, author = {J. Nocedal}, title = {Trust Region Algorithms for Solving Large Systems of Nonlinear Equations}, booktitle = {Innovative Methods for Nonlinear Problems}, editor = {W. Liu and T. Belytschko and K. C. Park}, publisher = {Pineridge Press}, pages = {93--102}, year = 1984, abstract = {We review various algorithms for solving large sparse systems of nonlinear equations, with emphasis in those methods based on trust regions. We consider only those cases when derivatives are available, either analytically or by finite differences. We describe a new algorithm, which is based on minimizing the $\ell_1$-norm of the linearized equations within an $\ell_{\infty}$-norm trust region. The resulting problems are solved by means of sparse linear programming techniques}, summary = {Algorithms for solving large sparse systems of nonlinear equations are reviewed, with emphasis on trust-region methods. An algorithm is described in which the $\ell_1$ norm of the linearized equations is minimized within an $\ell_{\infty}$ norm trust region. The resulting subproblems are solved by means of sparse linear programming techniques}} @inproceedings{Noce86, author = {J. Nocedal}, title = {Viewing the Conjugate-Gradient Algorithm as a Trust Region Method}, booktitle = {Numerical Analysis}, editor = {J. P. Hennart}, publisher = SPRINGER, address = SPRINGER-ADDRESS, series = {Lecture Notes in Mathematics}, volume = 1230, pages = {118--126}, year = 1986, summary = {It is shown how a trust-region subproblem, based upon a memoryless secant-updating formula, may be solved very efficiently. A connection is made between such subproblems and the conjugate-gradient method.}} @article{NoceOver85, author = {J. Nocedal and M. L. Overton}, title = {Projected {H}essian updating algorithms for nonlinearly constrained optimization}, journal = SINUM, volume = 22, pages = {821--850}, year = 1985} @inproceedings{NoceYuan98, author = {J. Nocedal and Y. Yuan}, title = {Combining trust region and line search techniques}, crossref = {Yuan98}, pages = {153--176}, abstract = {We propose an algorithm for nonlinear optimization that employs both trust region techniques and line searches. Unlike traditional trust region methods, our algorithm does not resolve the subproblem if the trial step results in an increase in the objective function, but instead performs a backtracking line search from the failed point. Backtracking can be done along a straight line or along a curved path. We show that the new algorithm preserves the strong convergence properties of trust region methods. Numerical results are also presented.}, summary = {An algorithm for nonlinear optimization that employs both trust-region techniques and linesearches is proposed. This algorithm does not resolve the subproblem if the trial step results in an increase in the objective function, but instead performs a backtracking linesearch from the failed point. Backtracking can be done along a straight line or along a curved path. It is shown that the algorithm preserves the strong convergence properties of trust-region methods. Numerical results are presented.}} @article{Nota93, author = {Y. Notay}, title = {On the convergence rate of the conjugate gradients in presence of rounding errors}, journal = NUMMATH, volume = 65, number = 3, pages = {301--317}, year = 1993} %%% O %%% @article{OLea80, author = {D. P. O'Leary}, title = {A generalized conjugate gradient algorithm for solving a class of quadratic programming problems}, journal = LAA, volume = 34, pages = {371--399}, year = 1980} @phdthesis{Omoj89, author = {E. O. Omojokun}, title = {Trust region algorithms for optimization with nonlinear equality and inequality constraints}, school = {University of Colorado}, address = {Boulder, Colorado, USA}, year = 1989, abstract = {We consider the general nonlinear optimization problem defined as, minimize a nonlinear real-valued function of several variables, subject to a set of nonlinear equality and inequality constraints. This class of problems arise in many real life applications, for example in engineering design, chemical equilibrium, simulation and data fitting. In this research, we present algorithms that use the trust region technique to solve these problems. First, we develop an algorithm for solving the nonlinear equality constrained optimization, then we generalize the algorithm to handle the inclusion of nonlinear inequality constraints in the problem. The algorithms use the successive quadratic programming (SQP) approach and trust region technique. We define a model subproblem which minimizes a quadratic approximation of the Lagrangian subject to modified relaxed linearizations of the problem nonlinear constraints and a trust region constraint. Inequality constraints are handled by a compromise between an active set strategy and IQP subproblem solution technique. An analysis which describes the local convergence properties of our algorithms is presented. The algorithms are implemented and the model minimization is done approximately by using the dogleg approach. Numerical results are presented and compared with the results of a popular linesearch method. Some examples are presented in which the ability of our method to use directions of negative curvature results in greater reliability. Results of the numerical experiments indicate that our method is very robust and reasonably efficient.}, summary = {Trust-region algorithms for the general nonlinear optimization problem are presented. These handle both the equality and inequality constrained cases. The algorithms use the SQP approach combined with the trust-region technique. A model subproblem is defined in which a quadratic approximation of the Lagrangian is minimized subject to modified relaxed linearizations of the problem nonlinear constraints and a trust-region constraint. Inequality constraints are handled by a compromise between an active set strategy and IQP subproblem solution techniques. A local convergence analysis is presented. Numerical tests indicate that the proposed methods are very robust and reasonably efficient.}} @book{OrteRhei70, author = {J. M. Ortega and W. C. Rheinboldt}, title = {Iterative Solution of Nonlinear Equations in Several Variables}, publisher = AP, address = AP-ADDRESS, year = 1970} @article{Osbo76, author = {M. R. Osborne}, title = {Nonlinear least squares---the {L}evenberg-{M}arquardt algorithm revisited}, journal = {Journal of the Australian Mathematical Society, Series B}, volume = 19, pages = {343--357}, year = 1976, summary = {A rule for choosing the Levenberg-Morrison-Marquardt parameter is described, which permits a satisfactory convergence theorem to be proved, and is capable of satisfactory computer implementation.}} @book{Osbo85, author = {M. R. Osborne}, title = {Finite Algorithms for Optimization and Data Analysis}, publisher = WILEY, address = WILEY-ADDRESS, year = 1985} @article{Osbo87, author = {M. R. Osborne}, title = {Estimating nonlinear models by maximum likelihood for the exponential family}, journal = SISSC, volume = 8, number = 3, pages = {446--456}, year = 1987, abstract = {Many but not all attractive properties of generalized linear models associated with the exponential family of distributions are destroyed by nonlinearity. A consequence is that ensuring the stability of a computational process for maximizing the likelihood becomes relatively more important. Here it is shown that trust region methods for solving nonlinear least squares problems are readily adapted to maximize likelihoods based on the exponential family, and that the nice theoretical results available for the nonlinear least squares problem also generalize.}, summary = {Trust-region methods for solving nonlinear least-squares problems are adapted to maximize likelihoods based on the exponential family, while preserving their theoretical properties.}} @techreport{Osbo98, author = {M. R. Osborne}, title = {Variable selection and control in least squares problems}, institution = {Centre for Mathematics and its Applications, School of Mathematical Sciences, Australian National University}, address = {Canberra, Australia}, number = {MRR 047-98}, year = 1998, abstract = {The classical technique of stepwise regression provides a paradigm for variable selection in the linear least squares problem. Trust region method which control the size of the correction to the current solution estimate prove attractive for nonlinear least squares problems because of their good global convergence behaviour. Recently there has been a convergence of these techniques with the realisation that the $\ell_1$ trust region method also provides a form of variable selection. These results are reviewed here, and computational methods discussed.}, summary = {Results are reviewed showing that the $\ell_1$ trust-region method provides a form of variable selection for least-squares problems and computational methods are discussed.}} @techreport{OutrZoweSchr91, author = {J. Outrata and J. Zowe and H. Schramm}, title = {Bundle trust methods: {F}ortran codes for nondifferentiable optimization}, institution = {DFG}, address = {Germany}, number = 269, year = 1991} %%% P %%% @phdthesis{Paig71, author = {C. C. Paige}, title = {The computation of eigenvalues and eigenvectors of very large sparse matrices}, school = {University of London}, year = 1971} @article{PaigSaun75, author = {C. C. Paige and M. A. Saunders}, title = {Solution of sparse indefinite systems of linear equations}, journal = SINUM, volume = 12, number = 4, pages = {617--629}, year = 1975} @article{PaigSaun82a, author = {C. C. Paige and M. A. Saunders}, title = {{LSQR}: an algorithm for sparse linear equations and sparse least squares}, journal = TOMS, volume = 8, pages = {43--71}, year = 1982} @article{Pang81, author = {J. S. Pang}, title = {An equivalence between two algorithms for quadratic programming}, journal = MP, volume = 20, number = 2, pages = {152--165}, year = 1981} @inproceedings{Pang95, author = {J. S. Pang}, title = {Complementarity problems}, booktitle = {Handbook of Global Optimization}, editor = {R. Horst and P. Pardalos}, publisher = KLUWER, address = KLUWER-ADDRESS, pages = {271--338}, year = 1995} @article{PaniTits91, author = {E. Panier and A. L. Tits}, title = {Avoiding the {M}aratos effect by means of a nonmonotone linesearch {I}. General constrained problems}, journal = SINUM, volume = 28, pages = {1183--1195}, year = 1991} @article{Pant88, author = {J. F. A. Pantoja}, title = {Differential dynamic-programming and {N}ewton method}, journal = {International Journal of Control}, volume = 47, number = 5, pages = {1539-1553}, year = 1988} @article{PantMayn91, author = {J. F. A. Pantoja and D. Q. Mayne}, title = {Exact penalty functions with simple updating of the penalty parameter}, journal = JOTA, volume = 69, pages = {441--467}, year = 1991} @book{PapaStei82, author = {C. H. Papadimitriou and K. Steiglitz}, title = {Combinatorial Optimization}, publisher = PH, address = PH-ADDRESS, year = 1982} @book{Parl80, author = {B. N. Parlett}, title = {The Symmetric Eigenvalue Problem}, publisher = PH, address = PH-ADDRESS, year = 1980, note = {Reprinted as \emph{Classics in Applied Mathematics 20}, SIAM, Philadelphia, USA, 1998}} @article{ParlReid81, author = {B. N. Parlett and J. K. Reid}, title = {Tracking the progress of the {L}anczos algorithm for large symmetric eigenproblems}, journal = JIMA, volume = 1, pages = {135--155}, year = 1981} @book{Patr94, author = {M. Patriksson}, title = {The Traffic Assignment Problem: Models and Methods}, publisher = {VSP}, address = {Utrecht, The Netherlands}, year = 1994} @book{Patr98, author = {M. Patriksson}, title = {Nonlinear Programming and Variational Inequality Problems, a Unified Approach}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1998} @article{Peng96, author = {J. Peng}, title = {Unconstrained Methods for generalized nonlinear complementarity and variational inequality problems}, journal = JCM, volume = 14, number = 2, pages = {99--107}, year = 1996, abstract = {In this paper, we construct unconstrained methods for the generalized nonlinear complementarity problem and variational inequalities. Properties of the correspondent unconstrained optimization problem are studied. We apply these methods to the subproblems in trust region method, and study their interrelationships. Numerical results are also presented.}, summary = {Unconstrained methods for the generalized nonlinear complementarity problem and variational inequalities are constructed. Properties of the corresponding unconstrained optimization problem are studied. These methods are applied to the subproblems in trust-region methods, and their interrelationships studied. Numerical results are presented.}} @inproceedings{Peng98, author = {J. Peng}, title = {A Smoothing Function and Its Applications}, crossref = {FukuQi98}, pages = {293--316}} @article{PengYuan97, author = {J. Peng and Y. Yuan}, title = {Optimality conditions for the minimization of a quadratic with two quadratic constraints}, journal = SIOPT, volume = 7, number = 3, pages = {579--594}, year = 1997, abstract = {The trust region method has been proven to be very successful in both unconstrained and constrained optimization. It requires the global minimum of a general quadratic function subject to ellipsoid constraints. In this paper, we generalize the trust region subproblem by allowing two general quadratic constraints. Conditions and properties of its solution are discussed.}, summary = {The trust region subproblem is generalized by allowing two general quadratic constraints. Conditions and properties of its solution are discussed.}} @techreport{Perr76, author = {A. Perry}, title = {A modified conjugate gradient algorithm}, institution = {Center for Mathematical Studies in Economics and Management Science, Northwestern University}, address = NWU-ADDRESS, number = 229, year = 1976} @article{PetzRenMaly97, author = {L. R. Petzold and Y. H. Ren and T. Maly}, title = {Regularization of higher-index differential-algebraic equations with rank-deficient constraints}, journal = SISC, volume = 18, number = 3, pages = {753--774}, year = 1997, abstract = {In this paper we present several regularizations for higher-index differential-algebraic equations with rank-deficient or singular constraints. These types of problems arise, for example, in the solution of constrained mechanical systems, when a mechanism's trajectory passes through or near a kinematic singularity. We derive a class of regularizations for these problems which is based on minimization of the norm of the constraints. The new regularizations are analogous to trust-region methods of numerical optimization. We give convergence results for the regularizations and present some numerical experiments which illustrate their effectiveness.}, summary = {Several regularizations for higher-index differential-algebraic equations with rank-deficient or singular constraints are presented. These types of problems arise in the solution of constrained mechanical systems, when a mechanism's trajectory passes through or near a kinematic singularity. Regularizations for these problems are derived which are based on minimization of the norm of the constraints. They are analogous to trust-region methods. Convergence results are given and numerical experiments are presented.}} @article{PhamWangYass90, author = {Pham Dinh, T. and S. Wang}, title = {Training multi-layered neural network with a trust-region based algorithm}, journal = RAIRO-MM, volume = 24, number = 4, pages = {523--553}, year = 1990, abstract = {In this paper, we first show how the problem of training a neural network is modelized as an optimization problem; and the generally used training algorithm. Then we propose a new algorithm based on a trust-region technique which is very efficient for non-convex optimization problems. Experimental results show that the new algorithm is much faster and robust compared with GBP. It makes the design of neural net architecture much less problem-dependent.}, summary = {The problem of training a neural network is modelled as an optimization problem and gradient backpropagation, the most commonly used training method, is described. A trust-region algorithm is then proposed. Experimental results show that the algorithm is much faster and robust compared with gradient backpropagation. It makes the design of neural net architecture much less problem-dependent.}} @article{PhamHoai95, author = {Pham Dinh, T. and Le Thi, H. A.}, title = {{L}agrangian stability and global optimality on nonconvex quadratic minimization over {E}uclidean balls and spheres}, journal = {Journal of Convex Analysis}, volume = 2, number = {1--2}, pages = {263--276}, year = 1995, abstract = {We prove in this paper the stability of the Lagrangian duality in nonconvex quadratic minimization over Euclidean balls and spheres. As direct consequences we state both global optimality conditions in these problems and detailed descriptions of the structure of their solution sets. These results are essential for devising solution algorithms.}, summary = {Stability of the Lagrangian duality in non-convex quadratic minimization over Euclidean balls and spheres is proved. Global optimality conditions for these problems are deduced together with the detailed descriptions of the structure of their solution sets.}} @article{PhamHoai98, author = {Pham Dinh, T. and Le Thi, H. A.}, title = {{D.C.} optimization algorithm for solving the trust-region subproblem}, journal = SIOPT, volume = 8, number = 2, pages = {476--505}, year = 1998, abstract = {This paper is devoted to the framework of d.c. (difference of convex functions) optimization: d.c. duality, local and global optimalities in d.c. programming, the d.c. algorithm (DCA) and its application to solving the trust-region problem. The DCA is an iterative method which is quite different from well-known related algorithms. Thanks to the particular structure of the problem, the DCA becomes very simple (it requires only matrix-vector products) and, in practice, converges to a global solution. For checking the global optimality of solutions provided by the DCA the quite inexpensive Implicitly Restarted Lanczos method of Sorensen has been used. A simple numerical procedure has been introduced (in the case of nonglobal solutions) in order to find a feasible point having a smaller objective value and to restart the DCA with this point. It has been stated that in the nonconvex case (problem ($Q_1$) with $A$ being nonpositive semidefinite) the DCA with at most $2m+2$ restartings ($m$ is the number of distinct negative eigenvalues of $A$) requires only matrix-vector products too and converges to a global solution. Numerical simulations proved the robustness and the efficiency of the DCA with respect to related standard methods, especially in large scale problems.}, summary = {The framework of d.c. (difference of convex functions) optimization is presented, including d.c. duality, local and global optimality, the d.c. algorithm (DCA) and its application to solving the trust-region problem, which only requires matrix-vector products. In practice, it converges to a global solution, which is checked by using the Implicitly Restarted Lanczos method of Sorensen. If a nonglobal solution is found, a procedure is proposed, that finds a feasible point having a smaller objective value at which the DCA may then be restarted. It is proved that, in the non-convex case, the DCA needs at most $2m+2$ restarts to converge to a global solution, where $m$ is the number of distinct negative eigenvalues of the Hessian. The robustness and efficiency of the DCA is illustrated by numerical experiments.}} @article{PhamPhonHoraQuan97, author = {Pham Dinh, T. and T. Q. Phong and R. Horaud and L. Quan}, title = {Stability of {L}agrangian duality for nonconvex quadratic programming. Solution methods and applications in computer vision}, journal = RAIRO-MM, volume = 31, number = 1, pages = {57--90}, year = 1997, abstract = {The problem of minimizing a quadratic form over a ball centered at the origin is considered. The stability of Lagrangian duality is established and complete characterizations of a global optimal solution are given. On the basis of this theoretical study, two principal solution methods are presented. An important application of nonconvex quadratic programming is the computation of rite step to a new iterate in the Trust Region (TR) approach methods which are known to be efficient for nonlinear optimization problems. Also, we discuss the mathematical models of some important problems encountered in Computer Vision. Most of them can be formulated as a minimization of a sum of squares of nonlinear functions. A practical TR-based algorithm is proposed for nonlinear least squares problem which seems to be well suited for our applications.}, summary = {The problem of minimizing a quadratic form over a ball is considered. The stability of Lagrangian duality is established and complete characterizations of a global optimal solution are given. Two solution methods are deduced, with application to the trust-region subproblem. Mathematical models of some important problems encountered in computer vision are discussed, which can be formulated as a minimization of a sum of squares of nonlinear functions. A practical trust-region based algorithm is proposed for the nonlinear least-squares problem which seems to be well suited to the computer vision applications.}} @article{Phan82, author = {Phan huy Hao, E.}, title = {Quadratically constrained quadratic programming: some applications and a method for solution}, journal = {Zeitschrift f\"{u}r Operations Research}, volume = 26, number = 3, pages = {105--119}, year = 1982, abstract = {(no abstract)}, summary = {A method is proposed for the solution of quadratically constrained quadratic programming.}} @inproceedings{PhonHoraYassPham93, author = {T. Q. Phong and R. Horaud and A. Yassine and Pham Dinh, T.}, title = {Optimal estimation of object pose from a single perspective view}, booktitle = {1993 Proceedings Fourth International Conference on Computer Vision}, publisher = {IEEE Computer Society Press}, address = {Los Alamitos, CA, USA}, pages = {534--539}, year = 1993, abstract = {The authors present a method for robustly and accurately estimating the rotation and translation between a camera and a 3-D object from point and line correspondences. First they devise an error function and then show how to minimize this error function. The quadratic nature of this function is made possible by representing rotation and translation with a dual number quaternion. A detailed account is provided of the computational aspects of a trust-region optimization method. This method compares favourably with Newton's method, which has extensively been used to solve the problem, and with Faugeras-Toscani's linear method (1986) for calibrating a camera. Some experimental results are presented which demonstrate the robustness of the method with respect to image noise and matching errors.}, summary = {A method for robustly and accurately estimating the rotation and translation between a camera and a 3-D object from point and line correspondences. An error function is defined and then minimized. The quadratic nature of this function is made possible by representing rotation and translation with a dual number quaternion. A detailed account is provided of the computational aspects of a trust-region optimization method. This method compares favourably with Newton's method, which has extensively been used to solve the problem, and with Faugeras-Toscani's linear method (1986) for calibrating a camera. Experimental results are presented which demonstrate the robustness of the method with respect to image noise and matching errors.}} @article{PhonHoraYassPham95, author = {T. Q. Phong and R. Horaud and A. Yassine and Pham Dinh, T.}, title = {Object pose from 2-D to 3-D Point and Line Correspondences}, journal = {International Journal of Computer Vision}, volume = 15, number = 3, pages = {225--243}, year = 1995, abstract = {In this paper we present a method for optimally estimating the rotation and translation between a camera and a 3-D object from point and/or line correspondences. First we devise an error function and second we show how to minimize this error function. The quadratic nature of this function is made possible by representing rotation and translation with a dual number quaternion. We provide a detailed account of the computational aspects of a trust-region optimization method. This method compares favourably with Newton's method which has extensively been used to solve the problem at hand, with the Faugeras-Toscani linear method for calibrating a camera, and with the Levenberg-Marquardt non-linear optimization method. Finally we present some experimental results which demonstrate the robustness of our method with respect to image noise and matching errors.}, summary = {A method for optimally estimating the rotation and translation between a camera and a 3-D object from point and/or line correspondences is presented. An error function is devised. The quadratic nature of the error function is obatined by representing rotation and translation with a dual number quaternion. We provide a detailed account of the computational aspects of a trust-region optimization method to minimize the error. This method compares favourably with Newton's method, with the Faugeras-Toscani linear method for calibrating a camera, and with the Levenberg-Morrison-Marquardt nonlinear optimization method. Experimental results demonstrate the robustness of the method with respect to image noise and matching errors.}} @inproceedings{PiepMcMuLipk98, author = {J. A. Piepmeier and G. V. McMurray and H. Lipkin}, title = {Tracking a moving target with model independent visual servoing: a predictive estimation approach}, booktitle = {Proceedings 1998 IEEE International Conference on Robotics and Automation}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 3, pages = {2652--2657}, year = 1998, abstract = {Target tracking by model independent visual servo control is achieved by augmenting quasi-Newton trust region control with target prediction. Model independent visual servo control is defined using visual feedback to control the robot without precise kinematic and camera models. While a majority of the research assumes a known robot and camera model, there is a paucity of literature addressing model independent control. In addition, most researches have focused primarily on static targets. The work presented here demonstrates the use of predictive filters to improve the performance of the control algorithm for linear and circular target motions. The results show a performance of the same order of magnitude as compared to some model based visual servo control research. Certain limitations to the algorithm are also discussed.}, summary = {Target tracking by model independent visual servo control is achieved by augmenting quasi-Newton trust-region control with target prediction. Model independent visual servo control is defined using visual feedback to control the robot without precise kinematic and camera models. The use of predictive filters to improve the performance of the control algorithm for linear and circular target motions is demonstrated. The results show a performance of the same order of magnitude as compared to some model based visual servo control research. Certain limitations to the algorithm are also discussed.}} @article{Piet69, author = {T. Pietrzykowski}, title = {An Exact Potential Method for Constrained Maxima}, journal = SINUM, volume = 6, number = 2, pages = {299--304}, year = 1969} %abstract = {The main result of the paper consists of the theorem that % under certain, natural assumptions the local conditional % maximum $x_0$ of the function $f$ on the set \[A = \{x\in % \Re^n | \phi_i(x)\geq 0, \psi_j(x) = 0, i=1,\ldots, % k,j=1,\ldots,l\}\] is identical with the unconditional % maximum of the potential function \[p(x,\mu) = \mu f(x) + % \sum_{i=1}^k \neg(\phi_i(x))- \sum_{j=1}^l | \psi(x)|, % x\in \Re^n, \mu \geq 0,\] for $\mu$ sufficiently small. % There is also provided a draft of a modified gradient % procedure for maximizing the potential $p(x,\mu)$ since % it is generally nonsmooth even for differentiable % $f, \phi_i$ and $\psi_j$.}, %summary = {This paper presents a theorem that under certain, natural % assumptions the local conditional maximum $x_0$ of the % function $f$ on the set $A = \{x\in \Re^n | \phi_i(x)\geq 0, % \psi_j(x) = 0, i=1,\ldots, k,j=1,\ldots,l\}$ is identical % with the unconditional maximum of the potential function % $p(x,\mu) = \mu f(x) + \sum_{i=1}^k \neg(\phi_i(x))- % \sum_{j=1}^l | \psi(x)|, x\in \Re^n, \mu \geq 0,$ for $\mu$ % sufficiently small. In addition an outline of a modified % gradient procedure for maximizing the potential $p(x,\mu)$ % is given. The modification is necessary since the % potential function is generally non-smooth even for % differentiable $f, \phi_i$ and $\psi_j$.}} @article{Plan99, author = {T. D. Plantenga}, title = {A trust-region method for nonlinear programming based on primal interior point techniques}, journal = SISC, volume = 20, number = 1, pages = {282--305}, year = 1999, abstract = {This paper describes a new trust region method for solving large-scale optimization problems with nonlinear equality and inequality constraints. The new algorithm employs interior-point techniques from linear programming, adapting them for more general nonlinear problems. A software implementation based entirely on sparse matrix methods is described. The software handles infeasible start points, identifies the active set of constraints at a solution, and can use second derivative information to solve problems. Numerical results are reported for large and small problems, and a comparison made with other large-scale codes.}, summary = {A trust region method for large-scale optimization problems with nonlinear equality and inequality constraints is described. The algorithm employs interior-point techniques from linear programming, adapting them for more general nonlinear problems. A software implementation based entirely on sparse matrix methods is described. The software handles infeasible start points, identifies the active set of constraints at a solution, and can use second derivative information. Numerical results are reported for large and small problems, and a comparison made with other large-scale codes.}} @book{Pola97, author = {E. Polak}, title = {Optimization. Algorithms and Consistent Approximations}, publisher = SPRINGER, address = SPRINGER-ADDRESS, series = {Applied Mathematical Sciences, Volume 124}, year = 1997} @article{PolaTits80, author = {E. Polak and A. L. Tits}, title = {A globally convergent implementable multiplier method with automatic penalty limitation}, journal = {Applied Mathematics and Optimization}, volume = 6, pages = {335--360}, year = 1980} @article{PoliQi95, author = {R. Poliquin and L. Qi}, title = {Iteration Functions in some nonsmooth optimization algorithms}, journal = MOR, volume = 20, number = 2, pages = {479--496}, year = 1995, abstract = {Recently, several globally convergent model algorithms based on iteration functions have been proposed for solving nonsmooth optimization problems. In particular, \citebb{PangHanRang91} proposed such an algorithm for minimizing a locally Lipschitzian function. We determine properties of iteration functions (calculus, existence); we also identify characteristics of functions that possess iteration functions. We show that a locally Lipschitzian function has a Pang-Han-Rangaraj iteration function only when the function is pseudo-regular (in the sense of Borwein), and that a subsmooth (lower-C-1)function always has a Pang-Han-Rangaraj iteration function.}, summary = {Properties of iteration functions (calculus, existence) arising in model trust-region algorithms for non-smooth problems are analyzed. It is shown that a locally Lipschitzian function has a Pang-Han-Rangaraj iteration function only when the function is pseudo-regular (in the sense of Borwein), and that a sub-smooth (lower-C-1) function always has a Pang-Han-Rangaraj iteration function.}} @article{PoljWolk95, author = {S. Poljack and H. Wolkowicz}, title = {Convex relaxations of (0, 1)--Quadratic Programming}, journal = MOR, volume = 20, number = 3, pages = {550--561}, year = 1995} @article{PoljRendWolk95, author = {S. Poljack and F. Rendl and H. Wolkowicz}, title = {A recipe for semidefinite relaxation for (0,1)--quadratic programming}, journal = {Journal of Global Optimization}, volume = 7, number = 1, pages = {51--73}, year = 1995, abstract = {We review various relaxations of (0,1)-quadratic programming problems. These include semidefinite programs, parametric trust region problems and concave quadratic maximization. All relaxations that we consider lead to efficiently solvable problems. The main contributions of the paper are the following. Using Lagrangian duality, we prove equivalence of the relaxations in a unified and simple way. Some of these equivalences have been known previously, but our approach leads to short and transparent proofs. Moreover we extend the approach to the case of equality constraints into the objective function. We show how this technique can be applied to the Quadratic Assignment Problem, the Graph Partition Problem and the Max-Clique Problem. Finally, we show our relaxation to be best possible among all quadratic majorants with zero trace.}, summary = {Various relaxations of (0,1)-quadratic programming problems are reviewed, including semidefinite programs, parametric trust-region problems and concave quadratic maximization. All lead to efficiently solvable problems. Using Lagrangian duality, equivalence of the relaxations is proved in a unified way. The approach is extended to the case where equality constraints are present. It is shown how this technique can be applied to the Quadratic Assignment Problem, the Graph Partition Problem and the Max-Clique Problem. The relaxation is the best possible among all quadratic majorants with zero trace.}} @article{Poly69, author = {B. T. Polyak}, title = {The conjugate gradient method in extremal problems}, journal = {U.S.S.R. Computational Mathematics and Mathematical Physics}, volume = 9, pages = {94--112}, year = 1969} @misc{Poly82, author = {R. Polyak}, title = {Smooth optimization methods for solving nonlinear extremal and equilibrium problems with constraints}, howpublished = {Presentation at the IXth International Symposium on Mathematical Programming, Bonn}, month = {August}, year = 1982} @article{Poly90, author = {R. Polyak}, title = {Modified barrier functions (theory and methods)}, journal = MP, volume = 54, number = 2, pages = {177--222}, year = 1992} @phdthesis{Ponc90, author = {D. B. Poncele\'{o}n}, title = {Barrier methods for large-scale quadratic programming}, school = STANFORD, address = STANFORD-ADDRESS, year = 1990} @inproceedings{PornFichMullZapf90, author = {F. Pornbacher and U. Fichter and G. Muller-Liebler and H. Zapf}, title = {A new method for an efficient optimization of MOS transistor models}, booktitle = {1990 IEEE International Symposium on Circuits and Systems}, publisher = {IEEE}, address = {New York, USA}, volume = 1, pages = {81--84}, year = 1990, abstract = {Two methods that are especially useful for an accurate optimization of complex transistor models are presented. The first method focuses on sample reduction before the optimization process. An algorithm is described which allows a reduction of the number of samples by a factor of 10 to 20 in an efficient way. The second method is a trust-region-type optimization algorithm which is especially designed for this application. A substantial part of it is a new algorithm for the calculation of the step length. Industrial examples demonstrating the quality of the algorithms are given.}, summary = {Two methods are described for accurate optimization of complex transistor models. The first focuses on sample reduction before the optimization process. An algorithm is described which allows a reduction of the number of samples by a factor of 10 to 20. The second method is a trust-region-type optimization algorithm which is especially designed for this application. Industrial examples demonstrate the quality of the algorithms.}} @inproceedings{Powe69, author = {M. J. D. Powell}, title = {A method for nonlinear constraints in minimization problems}, crossref = {Flet69}, pages = {283--298}} @inproceedings{Powe70a, author = {M. J. D. Powell}, title = {A New Algorithm for Unconstrained Optimization}, crossref = {RoseMangRitt70}, pages = {31--65}, abstract = {A new algorithm is described for calculating the least value of a given differentiable function of several variables. The user must program the evaluation of the function and its first derivatives. Some convergence theorems are given that impose very mild conditions on the objective function. These theorems, together with some numerical results, indicate that the new method may be preferable to current algorithms for solving many unconstrained minimization problems.}, summary = {A trust-region algorithm is described for unconstrained smooth minimization. Convergence theorems are given that impose very mild conditions on the objective function. These theorems, together with some numerical results, indicate that the method may be preferable to then current algorithms for solving unconstrained minimization problems.}} @techreport{Powe70b, author = {M. J. D. Powell}, title = {A {F}ortran Subroutine for Unconstrained Minimization Requiring First Derivatives of The Objective Function}, institution = HARWELL, address = HARWELL-ADDRESS, number = {R-6469}, year = 1970, summary = {Details of the implementation of the algorithm described in \citebb{Powe70a} are provided and numerical experiments are discussed. The Fortran code is given in appendix.}} @inproceedings{Powe70c, author = {M. J. D. Powell}, title = {A hybrid method for nonlinear equations}, crossref = {Rabi70}, pages = {87--114}, summary = {An algorithm for solving systems of nonlinear equations is described that does not require the evaluation of the Jacobian of the system. Instead, the derivatives are approximated by Broyden's quasi-Newton formula. The algorithm uses a Levenberg-Morrison-Marquardt procedure for computing a new iterate, together with a safeguarding technique that prevents any tendencies towards linear independence of steps. Convergence of the algorithm is proved to either a solution of the system or a local minimum of the norm of its residual. }} @inproceedings{Powe70d, author = {M. J. D. Powell}, title = {A {F}ortran subroutine for solving systems of nonlinear algebraic equations}, crossref = {Rabi70}, pages = {115--161}, summary = {Details of the implementation of the algorithm described in \citebb{Powe70c} are provided and numerical experiments are discussed. The Fortran code is given in appendix.}} @inproceedings{Powe75, author = {M. J. D. Powell}, title = {Convergence Properties of a Class of Minimization Algorithms}, crossref = {MangMeyeRobi75}, pages = {1--27}} abstract = {Many iterative algorithms for minimizing a function $F(x)=F(x_1,x_2\ldots,x_n)$ require first derivatives of $F(x)$ to be calculated, but they maintain an approximation to the second derivative matrix automatically. In order that the approximation is useful, the change in $x$ made by each iteration is subject to a bound that is also revised automatically. Some convergence theorems for a class of minimization algorithms of this type are presented, which apply to methods proposed by \citebb{Powe70a} and \citebb{Flet72}. This theory has the following three valuable features which are rather uncommon. There is no need for the starting vector $x_1$ to be close to the solution. The function $F(x)$ need not be convex. Superlinear convergence is proved even though the second derivative approximations may not converge to the true second derivatives at the solution.}, summary = {Some convergence theorems are presented for a class of minimization algorithms including methods proposed by \citebb{Powe70a} and \citebb{Flet72}. There is no need for the starting vector $x_1$ to be close to the solution. The function $f$ need not be convex. Superlinear convergence is proved even though the second derivative approximations may not converge to the true second derivatives at the solution.}} @inproceedings{Powe78, author = {M. J. D. Powell}, title = {A Fast Algorithm For Nonlinearly Constrained Optimization Calculations}, crossref = {Wats78}, pages = {144--157}} @inproceedings{Powe81, author = {M. J. D. Powell}, title = {An upper triangular matrix method for quadratic programming}, editor = {O. L. Mangasarian and R. R. Meyer and S. M. Robinson}, booktitle = {Nonlinear Programming, 2}, publisher = AP, address = AP-ADDRESS, year = 1981} @book{Powe81b, author = {M. J. D. Powell}, title = {Approximation Theory and Methods}, publisher = CUP, address = CUP-ADDRESS, year = 1981} @techreport{Powe83, author = {M. J. D. Powell}, title = {General algorithms for discrete nonlinear approximation calculations}, institution = DAMTP, address = DAMTP-ADDRESS, number = {DAMTP/NA2}, year = 1983} @article{Powe84, author = {M. J. D. Powell}, title = {On the global convergence of trust region algorithms for unconstrained minimization}, journal = MP, volume = 29, number = 3, pages = {297--303}, year = 1984, abstract = {Many trust region algorithms for unconstrained optimization have excellent global convergence properties if their second derivative approximations are not too large (\bciteb{Powe75}). We consider how large these approximations have to be, if they prevent convergence when the objective function is bounded below and continuously differentiable. Thus we obtain a useful convergence result in the case when there is a bound on the second derivative approximations that depends linearly on the iteration number.}, summary = {Global convergence for trust-region methods in unconstrained optimization is obtained in the case when there is a bound on the Hessian approximations that depends linearly on the iteration number.}} @article{Powe85, author = {M. J. D. Powell}, title = {On the quadratic-programming algorithm of {G}oldfarb and {I}dnani}, journal = MPS, volume = 25, pages = {46--61}, year = 1985} @inproceedings{Powe87, author = {M. J. D. Powell}, title = {Methods for Nonlinear Constraints in Optimization Calculations}, crossref = {IserPowe87}, pages = {325--358}} @inproceedings{Powe93, author = {M. J. D. Powell}, title = {Log barrier methods for semi-infinite programming calculations}, booktitle = {Advances on Computer Mathematics and its Applications}, editor = {E. A. Lipitakis}, publisher = WSP, address = WSP-ADDRESS, pages = {1--21}, year = 1993} @inproceedings{Powe94a, author = {M. J. D. Powell}, title = {A direct search optimization method that models the objective and constraint functions by linear interpolation}, crossref = {GomeHenn94}, pages = {51--67}} @misc{Powe94b, author = {M. J. D. Powell}, title = {A direct search optimization method that models the objective by quadratic interpolation}, howpublished = {Presentation at the 5th Stockholm Optimization Days, Stockholm}, year = 1994} @misc{Powe96, author = {M. J. D. Powell}, title = {Trust region methods that employ quadratic interpolation to the objective function}, howpublished = {Presentation at the 5th SIAM Conference on Optimization, Victoria}, year = 1996} @inproceedings{Powe98, author = {M. J. D. Powell}, title = {The use of band matrices for second derivative approximations in trust region algorithms}, crossref = {Yuan98}, pages = {3--28}, abstract = {In many trust region algorithms for optimization calculations, each iteration seeks a vector $d \in \Re^n$ that solves the linear system of equations $(B+\lambda I) d=- g$, where $B$ is a symmetric estimate of a second derivative matrix, $I$ is the unit matrix, $g$ is a known gradient vector, and $\lambda$ is a parameter that controls the length of $d$. Several values of $\lambda$ may be tried on each iteration, and, when there is no helpful sparsity in $B$, it is usual for each solution to require $O(n^3)$ operations. However, if an orthogonal matrix $\Omega$ is available such that $M=\Omega^T B \Omega$ is an $n \times n$ matrix of bandwidth $2s+1$, then $\Omega^Td$ can be calculated in only $O(ns^2)$ operations for each new $\lambda$ by writing the system in the form $(M + \lambda I)(\Omega^Td=- \Omega^Tg$. We find unfortunately, that the construction of $M$ and $\Omega$ from $B$ is usually more expensive than the solution of the original system, but in variable metric and quasi-Newton algorithms for unconstrained optimization, each iteration changes $B$ by a matrix whose rank is at most two, and then updating techniques can be applied to $\Omega$. Thus it is possible to reduce the average work per iteration from $O(n^3)$ to $O(n^{7/3})$ operations. Here the elements of each orthogonal matrix are calculated explicitly, but instead one can express the orthogonal matrix updates as products of Givens rotations, which allows the average work per iteration to be only $O(n^{11/5})$ operations. Details of procedures that achieve these savings are described, and the $O(n^{7/3})$ complexity is confirmed by numerical results.}, summary = {In many trust-region algorithms, each iteration seeks a vector $d \in \Re^n$ that solves the linear system of equations $(B + \lambda I) d=- g$, where $B$ is a symmetric estimate of a second derivative matrix, $g$ is a known gradient vector, and $\lambda$ is a parameter that controls the length of $d$. Several values of $\lambda$ may be tried on each iteration, and, when there is no helpful sparsity in $B$, it is usual for each solution to require $O(n^3)$ operations. However, if an orthogonal matrix $\Omega$ is available such that $M=\Omega^T B \Omega$ is an $n \times n$ matrix of bandwidth $2s+1$, then $\Omega^Td$ can be calculated in only $O(ns^2)$ operations for each new $\lambda$ by writing the system in the form $(M + \lambda I)(\Omega^Td=- \Omega^Tg$. Unfortunately, it is found that the construction of $M$ and $\Omega$ from $B$ is usually more expensive than the solution of the original system, but in variable metric and quasi-Newton algorithms for unconstrained optimization, each iteration changes $B$ by a matrix whose rank is at most two, and then updating techniques can be applied to $\Omega$. Thus it is possible to reduce the average work per iteration from $O(n^3)$ to $O(n^{7/3})$ operations. Here the elements of each orthogonal matrix are calculated explicitly, but instead one can express the orthogonal matrix updates as products of Givens rotations, which allows the average work per iteration to be only $O(n^{11/5})$ operations. Details of procedures that achieve these savings are described, and the $O(n^{7/3})$ complexity is confirmed by numerical results.}} @article{Powe98b, author = {M. J. D. Powell}, title = {Direct search algorithms for optimization calculations}, journal = {Acta Numerica}, volume = 7, pages = {287--336}, year = 1998} @misc{Powe98c, author = {M. J. D. Powell}, title = {A quadratic model trust region method for unconstrained minimization without derivatives}, howpublished = {Presentation at the International Conference on Nonlinear Programming and Variational Inequalities, Hong Kong}, year = 1998, abstract = {A trust region method for unconstrained minimization calculates a trial change in the variables by minimizing an approximation to the objective function subject to a bound on the length of the trial step. The author has developed an easy-to-use algorithm of this kind by letting each approximation be a linear polynomial, but the rate of convergence is usually very slow because second derivatives are ignored. Therefore we will consider the use of quadratic approximations that are quadratic polynomials. They are constructed by interpolation to values of the objective function, so one has to ensure that the positions of the interpolation points are suitable. recent work on this important question will be reviewed. Attention will be given to the adjustment of the trust region radius, including the idea of having a separate radius for controlling the distance between interpolation points.}, summary = {A derivative-free trust-region method for unconstrained optimization is described that uses quadratic interpolation models. These are chosen to be linear combination of the Lagrange fundamental polynomials associated with the interpolation problem. It is shown that the coefficients of these polynomials can be updated from iteration to iteration in a numerically stable way. The method also uses a separate radius for controlling the distance between interpolation points.}} @article{PoweToin79, author = {M. J. D. Powell and Ph. L. Toint}, title = {On The Estimation of Sparse {H}essian Matrices}, journal = SINUM, volume = 16, number = 6, pages = {1060--1074}, year = 1979} @article{PoweYuan86, author = {M. J. D. Powell and Y. Yuan}, title = {A recursive quadratic-programming algorithm that uses differentiable exact penalty-functions}, journal = MP, volume = 35, number = 3, pages = {265--278}, year = 1986} @article{PoweYuan90, author = {M. J. D. Powell and Y. Yuan}, title = {A trust region algorithm for equality constrained optimization}, journal = MP, volume = 49, number = 2, pages = {189--213}, year = 1990, abstract = {A trust region algorithm for equality constrained optimization is proposed that employs a differentiable exact penalty function. Under certain conditions global convergence and local superlinear convergence results are proved.}, summary = {A trust-region algorithm for equality constrained optimization is proposed that employs a differentiable exact penalty function. Under certain conditions global convergence and local superlinear convergence results are proved.}} @misc{PrieMogu99, author = {F. Prieto and J. M. Moguerza}, title = {Interior-Point Methods using Negative Curvature}, howpublished = {Presentation at the First Workshop on Nonlinear Optimization ``Interior-Point and Filter Methods'', Coimbra, Portugal}, year = 1999, abstract = {Interior-point methods are very promising for the solution of large-scale nonlinear problems. In the nonconvex case, the use of negative curvature information may allow additional improvements in the behaviour of these methods without any significant increase in their computational costs. However, the efficient implementation of such a method requires addressing several issues; for example, how to guarantee the convergence of the method, and how to compute and use the negative-curvature information without incussring in excessive costs. We describe the implementation of two variants of an interior-point method using negative curvature. Particular attention is paid to the conditions under which the negative curvature information is used, the way in which the descent and negative curvature directions are combined, and to the updating of the barrier parameter. The convergence properties of the method are analyzed, and the conditions under which the methods are globally convergent and may attain quadratic convergence are discussed. Finally, computational results on a set of small and medium-sized problems are presented and compared with those obtained using other interior-point implementations, and with the results obtained by the proposed method when negative curvature information is not used.}, summary = {The implementation of two variants of an interior-point method using negative curvature is discussed. Particular attention is paid to the conditions under which the negative curvature information is used, the way in which the descent and negative curvature directions are combined, and to the updating of the barrier parameter. The conditions under which the methods are globally convergent and may attain quadratic convergence are discussed. Computational results on a set of small and medium-sized problems are compared with those obtained using other interior-point implementations, and with those obtained by the proposed method when negative curvature information is not used.}} @article{PropPukh93, author = {A. I. Propoi and A. V. Pukhlikov}, title = {{N}ewton Stochastic Method in nonlinear extremal problems}, journal = {Automation and Remote Control}, volume = 54, number = {4--Part 1}, pages = {605--613}, year = 1993, abstract = {This paper is concerned with the construction of consistent model-trust region pairs for optimization procedures of the Newtonian type. A problem of random extremal search in the class of normal distributions is studied. Two "training" and "improvement" components of optimization motion are singled out and analyzed.}, summary = {A random search procedure are explored in the context of trust-region methods.}} @article{Psch70, author = {B. N. Pschenichny}, title = {Algorithms for general problems of mathematical programming}, journal = {Kibernetica}, volume = 6, pages = {120--125}, year = 1970} @article{PsiaPark95, author = {M. Psiaki and K. Park}, title = {Augmented {L}agrangian Nonlinear-Programming algorithm that uses {SQP} and trust region techniques}, journal = JOTA, volume = 86, number = 2, pages = {311--325}, year = 1995, abstract = {An augmented Lagrangian nonlinear programming algorithm has been developed. Its goals are to achieve robust global convergence and fast local convergence. Several unique strategies help the algorithm achieve these dual goals. The algorithm consists of three nested loops. The outer loop estimates the Kuhn-Tucker multipliers at a rapid linear rate of convergence. The middle loop minimizes the augmented Lagrangian function for fixed multipliers. This loop uses the sequential quadratic programming technique with a box trust region stepsize restriction. The inner loop solves a single quadratic program. Slack variables and a constrained form of the fixed-multiplier middle-loop problem work together with curved line searches in the inner-loop problem to allow large penalty weights for rapid outer-loop convergence. The inner-loop quadratic programs include quadratic constraint terms, which complicate the inner loop, but speed the middle-loop progress when the constraint curvature is large. The new algorithm compares favorably with a commercial sequential quadratic programming algorithm on five low-order test problems. Its convergence is more robust, and its speed is not much slower.}, summary = {An augmented Lagrangian nonlinear programming algorithm is developed. The algorithm consists of three nested loops. The outer loop estimates the Kuhn-Tucker multipliers at a rapid linear rate of convergence. The middle loop minimizes the augmented Lagrangian function for fixed multipliers. This loop uses the sequential quadratic programming technique with a box trust-region stepsize restriction. The inner loop solves a single quadratic program. Slack variables and a constrained form of the fixed-multiplier middle-loop problem work together with curved linesearches in the inner-loop problem to allow large penalty weights for rapid outer-loop convergence. The inner-loop quadratic programs include quadratic constraint terms, which complicate the inner loop, but speed the middle-loop progress when the constraint curvature is large.}} %%% Q %%% @article{QiSun94, author = {L. Qi and J. Sun}, title = {A trust region algorithm for minimization of locally {L}ipschitzian functions}, journal = MP, volume = 66, number = 1, pages = {25--43}, year = 1994, abstract = {The classical trust region algorithm for smooth nonlinear programs is extended to the nonsmooth case where the objective function is only locally Lipschitzian. At each iteration, an objective function that carries both first and second order information is minimized over a trust region. The term that carries the first order information is an iteration function that may not explicitly depend on subgradients or directional derivatives. We prove that the algorithm is globally convergent. This convergence result extends the result of \citebb{Powe84} for minimization of smooth functions, the result of \citebb{Yuan85b} for minimization of composite convex functions, and the result of \citebb{DennLiTapi95} for minimization of regular functions. In addition, compared with the recent model of \citebb{PangHanRang91} for minimization of locally Lipschitzian functions using a linesearch, this algorithm has the same convergence property without assuming positive definiteness and uniform boundedness of the second order term. Applications of the algorithm to various nonsmooth optimization problems are discussed.}, summary = {The classical trust-region algorithm for smooth nonlinear programs is extended to the non-smooth case where the objective function is only locally Lipschitzian. At each iteration, an objective function that uses both first and second order information is minimized over a trust region. The term that carries the first order information is an iteration function that may not explicitly depend on subgradients or directional derivatives. It is proved that the algorithm is globally convergent. Applications of the algorithm to various non-smooth optimization problems are discussed.}} @article{Qi95, author = {L. Qi}, title = {Trust Region Algorithms for Solving Nonsmooth Equations}, journal = SIOPT, volume = 5, number = 1, pages = {219--230}, year = 1995, abstract = {Two globally convergent trust region algorithms are presented for solving nonsmooth equations, where the functions are only locally Lipschitzian. The first algorithm is an extension of the classic Levenberg-Marquardt method by approximating the locally Lipschitzian function with a smooth function and using the derivative of the smooth function in the algorithm wherever a derivative is needed. Global convergence for this algorithm is established under a regular condition. In the second algorithm, successive smooth approximation functions and their derivatives are used. Global convergence for the second algorithm is established under mild assumptions. Both objective functions of subproblems of these two algorithms are quadratic functions.}, summary = {Two globally convergent trust-region algorithms are presented for solving non-smooth equations, for the case where the functions are only locally Lipschitzian. The first algorithm is an extension of the classic Levenberg-Morrison-Marquardt method, obtained by approximating the locally Lipschitzian function with a smooth function and using the derivative of the smooth function in the algorithm wherever a derivative is needed. Global convergence is established under a regularity condition. In the second algorithm, successive smooth approximation functions and their derivatives are used. Global convergence for the second algorithm is established under mild assumptions. Both objective functions of subproblems of these two algorithms are quadratic functions.}} @techreport{QiQiSun99, author = {H. Qi and L. Qi and D. Sun}, title = {Solving {KKT} Systems via the Trust Region and the Conjugate Gradient Methods}, institution = {School of Mathematics, The university of New South Wales}, address = {Sydney, Australia}, number = {15-09-99}, year = 1999, abstract = {In this paper, we propose a trust region method for solving KKT systems arising from the variational inequality problem and the constrained optimization problem. The trust region subproblem is derived from the reformulation of the KKT system as a constrained optimization problem and is solved by the truncated conjugate gradient method; meanwhile the variables remain feasible with respect to the constrained optimization problem. Global and superlinear convergence are established. Some preliminary numerical experiments show that the method is quite promising.}, summary = {A trust-region method is proposed for solving KKT systems arising from the variational inequality and the constrained optimization problems. The trust-region subproblem is derived from the reformulation of the KKT system as a constrained optimization problem and is solved by the truncated conjugate gradient method while maintaining feasibility. Global and superlinear convergence is established. Preliminary numerical experiments illustrate the method.}} %%% R %%% @inproceedings{Reid71, author = {J. K. Reid}, title = {On the method of conjugate gradients for the solution of large sparse linear equations}, booktitle = {Large sparse sets of linear equations}, editor = {J. K. Reid}, publisher = AP, address = AP-ADDRESS, pages = {231--254}, year = 1971} @inproceedings{Reid73, author = {J. K. Reid}, title = {Least squares solution of sparse systems of non-linear equations by a modified {M}arquardt algorithm}, booktitle = {Decomposition of Large-Scale Problems}, editor = {D. M. Himmelblau}, publisher = NH, address = NH-ADDRESS, pages = {437--445}, year = 1973, abstract = {Marquardt's algorithm is adapted to exploit sparsity in non-linear least squares problem that may or may not be overdetermined. Consideration is given to cases when derivatives are available analytically and where they have to be estimated. The results of numerical experiments are presented.}, summary = {The Levenberg-Morrison-Marquardt algorithm is compared to the dogleg method for sparse least-squares problems. Approximation scheme for the Jacobian are also considered.}} @article{Rein71, author = {C. Reinsch}, title = {Smoothing by spline functions {II}}, journal = NUMMATH, volume = 16, pages = {451--454}, year = 1971} @article{RendWolk97, author = {F. Rendl and H. Wolkowicz}, title = {A Semidefinite Framework for Trust Region Subproblems with Applications to Large Scale Minimization}, journal = MP, volume = 77, number = 2, pages = {273--299}, year = 1997, abstract = {A primal-dual pair of semidefinite programs provides a general framework for the theory and algorithms for the trust region subproblem (TRS). This problem consists in minimizing a general quadratic function subject to a convex quadratic constraint and, therefore, it is a generalization of the minimum eigenvalue problem. The importance of TRS is due to the fact that it provides the step in trust region minimization algorithms. The semidefinite framework is studied as an interesting instance of semidefinite programming as well as a tool for viewing known algorithms and deriving new algorithms for TRS. In particular, a dual simplex type method is studied that solves TRS as a parametric eigenvalue problem. This method uses the Lanczos algorithm for the smallest eigenvalue as a black box. Therefore, the essential cost of the algorithm is the matrix-vector multiplication and, thus, sparsity can be exploited. A primal simplex type method provides steps for the so-called hard case. Extensive numerical tests for large sparse problems are discussed. These tests show that the cost of the algorithm is $1+\alpha(n)$ times the cost of finding a minimum eigenvalue using the Lanczos algorithm, where $0<\alpha(n)<1$ is a fraction which decreases as the dimension increases.}, summary = {A primal-dual pair of semidefinite programs provides a general framework for the theory and algorithms for the trust region subproblem (TRS). This problem is a generalization of the minimum eigenvalue problem. The semidefinite framework is studied as an instance of semidefinite programming as well as a tool for viewing known algorithms and deriving new algorithms for TRS. In particular, a dual simplex type method is studied that solves TRS as a parametric eigenvalue problem. This method uses the Lanczos algorithm for the smallest eigenvalue as a black box. Therefore, the essential cost of the algorithm is the matrix-vector multiplication and, thus, sparsity can be exploited. A primal simplex type method provides steps for the so-called hard case. Numerical tests for large sparse problems show that the cost of the algorithm is $1+\alpha(n)$ times the cost of finding a minimum eigenvalue using the Lanczos algorithm, where $0<\alpha(n)<1$ is a fraction which decreases as the dimension increases.}} @article{RendVandWolk95, author = {F. Rendl and R. J. Vanderbei and H. Wolkowicz}, title = {Max-min eigenvalue problems, primal-dual interior point algorithms, and trust region subproblems}, journal = OMS, volume = 5, number = 1, pages = {1--16}, year = 1995, abstract = {Two Primal-dual interior point algorithms are presented for the problem of maximizing the smallest eigenvalue of a symmetric matrix over diagonal perturbations. These algorithms prove to be simple, robust, and efficient. Both algorithms are based on transforming the problem to one with constraints over the cone of positive semidefinite matrices, i.e. L\"{o}wner order constraints. One of the algorithms does this transformation through an intermediate transformation to a trust region subproblem. This allows the removal of a dense row.}, summary = {Two primal-dual interior-point algorithms are presented for maximizing the smallest eigenvalue of a symmetric matrix over diagonal perturbations. These algorithms prove to be simple, robust, and efficient. Both algorithms are based on transforming the problem to one with constraints over the cone of positive semidefinite matrices. One of the algorithms does this transformation through an intermediate transformation to a trust-region subproblem. This allows the removal of a dense row.}} @article{Robi74, author = {S. M. Robinson}, title = {Perturbed {K}uhn-{T}ucker points and rates of convergence for a class of nonlinear programming algorithms}, journal = MP, volume = 7, number = 1, pages = {1--16}, year = 1974} @inproceedings{Robi83, author = {S. M. Robinson}, title = {Generalized equations}, crossref = {BachGrotKort83}, pages = {346--367}} @book{Rock70, author = {R. T. Rockafellar}, title = {Convex Analysis}, publisher = {Princeton University Press}, address = {Princeton, USA}, year = 1970} @article{Rock74, author = {R. T. Rockafellar}, title = {Augmented {L}agrangian multiplier functions and duality in nonconvex programming}, journal = SICON, volume = 12, number = 2, pages = {268--285}, year = 1974} @article{Rock76, author = {R. T. Rockafellar}, title = {Augmented {L}agrangians and applications of the proximal point algorithm in convex programming}, journal = MOR, volume = 1, pages = {97--116}, year = 1976} @article{Rock76b, author = {R. T. Rockafellar}, title = {Monotone Operators and the Proximal Point Algorithm}, journal = SICON, volume = 14, pages = {877--898}, year = 1976} @inproceedings{Rock83, author = {R. T. Rockafellar}, title = {Generalized subgradients in mathematical programming}, crossref = {BachGrotKort83}, pages = {368--390}} @article{RodrRenaWats99, author = {J. F. Rodr\`{\i}guez and J. E. Renaud and L. T. Watson}, title = {Convergence of Trust Region Augmented {L}agrangian Methods Using Variable Fidelity Approximation Data}, journal = {Structural Optimization}, volume = 15, number = {3--4}, pages = {141--156}, year = 1999, abstract = {To date the primary focus of most constrained approximate optimization strategies is that application of the method should lead to improved designs. Few researchers have focused on the development of constrained approximate optimization strategies that are assured of converging to a Karush-Kuhn-Tucker (KKT) point for the problem. Recent work by the authors based on a trust region model management strategy has shown promise in managing the convergence of constrained approximate optimization in application to a suite of single level optimization test problems. Using a trust-region model management strategy, coupled with an augmented Lagrangian approach for constrained approximate optimization, the authors have shown in application studies that the approximate optimization process converges to a KKT point for the problem. The approximate optimization strategy sequentially builds a cumulative response surface approximation of the augmented Lagrangian which is then optimized subject to a trust region constraint. In this research the authors develop a formal proof of convergence for the response surface approximation based optimization algorithm. Previous application studies were conducted on single level optimization problems for which response surface approximations were developed using conventional statistical response sampling techniques such as central composite design to query a high fidelity model over the design space. In this research the authors extend the scope of application studies to include the class of multidisciplinary design optimization (MDO) test problems. More importantly the authors show that response surface approximations constructed from variable fidelity data generated during concurrent subspace optimizations (CSSOs) can be effectively managed by the trust region model management strategy.}, summary = {An augmented Lagrangian trust-region method is given, that converges to a Karush-Kuhn-Tucker point for constrained optimization. The method behaves well on single level optimization test problems. Applications include multidisciplinary design optimization test problems. It is shown that response surface approximations constructed from variable fidelity data generated during concurrent subspace optimizations can be effectively managed by the trust-region model management strategy.}} @article{RodrRenaWats98b, author = {J. F. Rodr\`{\i}guez and J. E. Renaud and L. T. Watson}, title = {Trust-region augmented {L}agrangian methods for sequential response surface approximation and optimization}, journal = {Journal of Mechanical Design}, volume = 120, number = 1, pages = {58--66}, year = 1998, abstract = {A common engineering practice is the use of approximation models in place of expensive computer simulations to drive a multidisciplinary design process based on nonlinear programming techniques. The use of approximation strategies is designed to reduce the number of detailed, costly computer simulations required during optimization while maintaining the pertinent features of the design problem. To date the primary focus of most approximate optimization strategies is that application of the method should lead to improved designs. This is a laudable attribute and certainly relevant for practicing designers. However to date few researchers have focused on the development of approximate optimization strategies that are assured of converging to a solution of the original problem. Recent works based on trust region model management strategies have shown promise in managing convergence in unconstrained approximate minimization. In this research we extend these well established notions from the literature on trust-region methods to manage the convergence of the more general approximate optimization problem where equality, inequality and variable bound constraints are present. The primary concern addressed in this study is how to manage the interaction between the optimization and the fidelity of the approximation models to ensure that the process converges to a solution of the original constrained design problem. Using a trust-region model management strategy coupled with an augmented Lagrangian approach for constrained approximate optimization, one can show that the optimization process converges to a solution of the original problem. In this research an approximate optimization strategy is developed in which a cumulative response surface approximation of the augmented Lagrangian is sequentially optimized subject to a trust region constraint. Results for several test problems are presented in which convergence to a Karush-Kuhn-Tucker (KKT) point is observed.}, summary = {A common engineering practice is the use of approximation models in place of expensive computer simulations to drive a multidisciplinary design process based on nonlinear programming techniques. Well established notions on trust-region methods are extended to manage the convergence of the general approximate problem where equality, inequality and variable bound constraints are present. The primary concern is to manage the interaction between the optimization and the fidelity of the approximation models to ensure that the process converges to a solution of the original constrained design problem. This is achieved by using a trust-region model management strategy coupled with an augmented Lagrangian approach for constrained approximate optimization. An approximate optimization strategy is developed in which a cumulative response surface approximation of the augmented Lagrangian is sequentially optimized subject to a trust region constraint. Results for several test problems are presented in which convergence to a Karush-Kuhn-Tucker (KKT) point is observed.}} @inproceedings{RogeTerpGoss88, author = {A. Roger and P. Terpolilli and O. Gosselin}, title = {Trust region methods for seismic inverse problems}, booktitle = {Proceedings of the XVIth Workshop on Interdisciplinary Study of Inverse Problems: Some Topics on Inverse Problems}, editor = {P. C. Sabatier}, publisher = {World Scientific}, address = {Singapore}, pages = {93--103}, year = 1988, abstract = {The authors present a class of optimization algorithms, called trust region algorithms which exhibit attractive theoretical convergence properties. They outline the main features of trust region algorithms, give a few classical properties without demonstration, and present numerical results which show that this class of algorithm actually fits the required features.}, summary = {Trust-region methods are presented in the context of seismic inversion problems.}} @phdthesis{Roja98, author = {M. Rojas}, title = {A large-scale trust-region approach to the regularization of discrete ill-posed problems}, school = {Rice University}, address = RICE-ADDRESS, year = 1998, abstract = {We consider the problem of computing the solution of large-scale discrete ill-posed problems when there is noise in the data. These problems arise in important areas such as seismic inversion, medical imaging and signal processing. We pose the problem as a quadratically constrained least squares problem and develop a method for the solution of such problem. Our method does not require factorization of the coefficient matrix, it has very low storage requirements and handles the high degree of singularities arising in discrete ill-posed problems. We present numerical results on test problems and an application of the method to a practical problem with real data.}, summary = {A algorithm based on the proposals by \citebb{Sore97} and \citebb{SantSore95} is described for the solution of large-scale linear least-squares problems subject to a quadratic constraint. It is applied to regularize large discrete ill-posed problems. Numerical experiments on test cases and a real inverse interpolation problem illustrate the method.}} @techreport{RojaSantSore99, author = {M. Rojas and S. A. Santos and D. C. Sorensen}, title = {A new matrix-free algorithm for the large-scale trust-region subproblem}, institution = CAAM, address = RICE-ADDRESS, number = {TR99-19}, year = 1999, abstract = {We present a matrix-free algorithm for the large-scale trust-region subproblem. Our algorithm relies on matrix-vector products only and does not require matrix factorizations. We recast the trust-region subproblem as a parametrized eigenvalue problem and compute an optimal value for the parameter. We then find the optimal solution of the trust-region subproblem from the eigenvectors associated with two of the smallest eigenvalues of the parameterized eigenvalue problem corresponding to the optimal parameter. The new algorithm uses a different interpolating scheme than existent methods and introduces a unified iteration that naturally includes the so-called hard case. We show that the new iteration is well defined and convergent at a superlinear rate. We present computational results to illustrate convergence properties and robustness of the method.}, summary = {A matrix-free algorithm for the large-scale trust-region subproblem is presented, that relies on matrix-vector products only and does not require matrix factorizations. the trust-region subproblem is recast as a parametrized eigenvalue problem and an optimal value for the parameter computed. The optimal solution of the trust-region subproblem is then found from the eigenvectors associated with two of the smallest eigenvalues of the parameterized eigenvalue problem corresponding to the optimal parameter. The new algorithm uses a unified iteration that naturally includes the hard case, and is superlinearly convergent. Computational results illustrate convergence properties and robustness of the method.}} @article{Rose60, author = {H. H. Rosenbrock}, title = {An automatic method for finding the greatest or least value of a function}, journal = COMPJ, volume = 3, pages = {175--184}, year = 1960} @article{RoyServ99, author = {R. Roy and Sevick Muraca, E. M.}, title = {Truncated {N}ewton's optimization scheme for absorption and fluorescence optical tomography: {P}art {I} theory and formulation }, journal = {Optics Express}, volume = 4, number = 10, pages = {353--371}, year = 1999, abstract = {The development of non-invasive, biomedical optical imaging from time-dependent measurements of near-infrared (NIR) light propagation in tissues depends upon two crucial advances: (i) the instrumental tools to enable photon ''time-of-flight'' measurement within rapid and clinically realistic times, and (ii) the computational tools enabling the reconstruction of interior tissue optical property maps from exterior measurements of photon ''time-of-flight'' or photon migration. In this contribution, the image reconstruction algorithm is formulated as an optimization problem in which an interior map of tissue optical properties of absorption and fluorescence lifetime is reconstructed from synthetically generated exterior measurements of frequency-domain photon migration (FDPM). The inverse solution is accomplished using a truncated Newton's method with trust region to match synthetic fluorescence FDPM measurements with that predicted by the finite element prediction. The computational overhead and error associated with computing the gradient numerically is minimized upon using modified techniques of reverse automatic differentiation.}, summary = {The development of non-invasive, biomedical optical imaging from time-dependent measurements of near-infrared (NIR) light propagation in tissues depends upon two crucial advances: (i) the instrumental tools to enable photon ''time-of-flight'' measurement within rapid and clinically realistic times, and (ii) the computational tools enabling the reconstruction of interior tissue optical property maps from exterior measurements of photon ''time-of-flight'' or photon migration. The image reconstruction algorithm is formulated as an optimization problem in which an interior map of tissue optical properties of absorption and fluorescence lifetime is reconstructed from synthetically generated exterior measurements of frequency-domain photon migration (FDPM). The inverse solution is accomplished using a truncated Newton's method with trust region to match synthetic fluorescence FDPM measurements with that predicted by the finite element prediction. The computational overhead and error associated with computing the gradient numerically is minimized upon using modified techniques of reverse automatic differentiation.}} @article{Rudn94, author = {M. Rudnicki}, title = {Smoothing strategies in solving inverse electromagnetic problems}, journal = {International Journal of Applied Electromagnetics in Materials}, volume = 4, number = 3, pages = {239-264}, year = 1994, abstract = {Many inverse and optimal design problems of electrical engineering are formulated in terms of least squares methodology (linear and nonlinear least squares). The objective function used makes a square-root deviation between prescribed and actual field distribution in a controlled subregion. In the paper some numerical methods that proved to be particularly useful in solving such problems of electrical engineering are presented. The common feature of these methods is the need of choosing a smoothing parameter controlling the solution quality. It is shown how this parameter may be chosen when using the linear least squares (regularization) approach and the trust-region approach in the nonlinear least squares method. For completeness the author presents the zeroth-order stochastic methods of optimization as well. As opposed to the least squares approach, the latter do not require restrictive a priori assumptions about convexity and smoothness. Some electromagnetic optimal design problems solved by means of the above techniques are referred.}, summary = {It is shown how the regularization parameter may be chosen when using the linear least-squares approach and the trust-region approach in the nonlinear least squares method for solving inverse and optimal design problems of electrical engineering. The zeroth-order stochastic methods of optimization is also discussed, because it does not require restrictive a priori assumptions about convexity and smoothness. Some electromagnetic optimal design problems solved by means of these techniques.}} %%% S %%% @book{Saad91, author = {Y. Saad}, title = {Numerical Methods for Large Eigenvalue Problems}, publisher = {Manchester University Press}, year = 1991} @book{Saad96, author = {Y. Saad}, title = {Iterative Methods for Sparse Linear Systems}, publisher = {PWS Publishing Company}, address = {Boston, USA}, year = 1996} @techreport{SachSart99, author = {E. W. Sachs and A. Sartenaer}, title = {A Class of Augmented {L}agrangian Algorithms for Infinite Dimensional Optimization with Equality Constraints}, institution = FUNDP, address = FUNDP-ADDRESS, number = {(in preparation)}, year = 1999} @article{SadjPonn99, author = {S. J. Sadjadi and K. Ponnambalam}, title = {Advances in trust region algorithms for constrained optimization}, journal = {Applied Numerical Mathematics}, volume = 29, number = 3, pages = {423--443}, year = 1999, abstract = {Constrained optimization problems occur in many applications of engineering, science and medicine. Much attention has recently been devoted to solving this class of problems using trust region algorithms with strong convergence properties, in part because of the availability of reliable software. This paper presents a survey of recent advances in trust region algorithms. We then explain the different choices of penalty function, Lagrange function and expanded Lagrangian function used for modeling constrained optimization problems and solving these equations using trust region algorithms. Finally, some numerical results for the implementation of our proposed method on different test problems with various sizes are presented.}, summary = {A survey of recent advances in trust-region methods for constrained minimization is given. Different choices for the penalty function, Lagrange function and expanded Lagrangian functions which are used are compared. Some numerical results for an implementation of a recommended method on different test problems with various sizes are presented.}} @article{Sahb87, author = {M. Sahba}, title = {Globally convergent algorithm for nonlinearly constrained optimization problems}, journal = JOTA, volume = 52, number = 2, pages = {291--309}, year = 1987} @misc{SachSchuFrom98, author = {E. W. Sachs and M. Schulze and S. Fromme}, title = {Neural Networks---An application of numerical optimization in the financial markets}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998, abstract = {We present results from a joint research project between a research group of a bank and an optimization group of a university. The goal was to develop an efficient code to train neural networks and to use this software too to forecast some economic indicators. We show how fully interactive methods exploit the structure of the neural networks and give theoretical and numerical results. The software is applied to forecasting the German stock index DAX using various input parameters. The success of the forecast is measured in the development of the value of a portfolio.}, summary = {The development of an efficient code to train neural networks and the use of this software to forecast some economic indicators is discussed. The proposed method exploits the underdetermined nature of the application and uses a Steihaug-Toint truncated conjugate gradient trust-region method. The software is applied to forecasting the German stock DAX index. The success of the forecast is measured in the development of the value of a portfolio.}} @article{SagaFuku91, author = {N. Sagara and M. Fukushima}, title = {A Hybrid Method for the nonlinear least-squares problem with simple bounds}, journal = JCAM, volume = 36, number = 2, pages = {149--157}, year = 1991, abstract = {This paper presents a new method with trust region technique for solving the nonlinear least-squares problem with lower and upper bounds on the variables. The proposed method constructs trust region constraints that are ellipses centered at the iterative points in such a way that they lie in the interior of the feasible region. Thus the method belongs to the class of interior point methods, and hence we may expect that the generated sequence approaches a solution smoothly without the combinatorial complications inherent to traditional active set methods. We establish a convergence theorem for the proposed method and show its practical efficiency by numerical experiments.}, summary = {An interior trust-region method is presented for solving the nonlinear least-squares problem with lower and upper bounds on the variables. Convergence is established and the practical efficiency of the method is illustrated by numerical experiments.}} @article{SagaFuku95, author = {N. Sagara and M. Fukushima}, title = {A Hybrid Method for solving the nonlinear least-squares problem with linear inequality constraints}, journal = {Journal of the Operations Research Society of Japan}, volume = 38, number = 1, pages = {55--69}, year = 1995, abstract = {This paper presents a new method with trust region technique for solving the nonlinear least squares problem with linear inequality constraints. The method proposed in this paper stems from the one presented in a recent paper by the authors. The method successively constructs trust region constraints, which are ellipsoids centered at the iterative points, in such a way that they lie in the relative interior of the feasible region. Thus the method belongs to the class of interior point methods, and hence we may expect that the generated sequences approaches a solution smoothly without the combinatorial complications inherent to traditional active set methods. We establish a convergence theorem for the proposed method and show its practical efficiency by numerical experiments.}, summary = {A trust-region method is presented for solving the nonlinear least-squares problem with linear inequality constraints. It is an adaptation of the method by \citebb{SagaFuku91} to this more general case, and enjoy similar properties.}} @article{Sala87, author = {D. E. Salane}, title = {A continuation approach for solving large-residual nonlinear least squares problems}, journal = SISSC, volume = 8, number = 4, pages = {655--671}, year = {1987}, abstract = {This paper is concerned with the solution of the nonlinear least squares problem. A continuation method is used to develop a new framework for the model trust region approach for solving nonlinear least squares problem. This framework gives a motivation for the direct selection of the trust region parameter. It also provides a natural safeguard for trust region methods and leads to a very robust algorithm. A class of algorithms based on the continuation method is presented. In addition, the implementation details for one member of the new class are examined. The convergence and descent properties of this algorithm are also discussed. Numerical evidence is given showing that the new algorithms are competitive with existing model trust region algorithms. For large-residual problems or problems in which a good initial starting guess is not available, the performance of the new algorithm is very promising.}, summary = {A continuation method is used to develop a new framework for the model trust-region approach for solving nonlinear least squares problem, which motivates the direct selection of the trust-region parameter. It also provides a safeguard for trust-region methods and leads to a very robust algorithm. A class of algorithms based on the continuation method is presented. In addition, the implementation details for one member of the new class are examined. Convergence and descent properties of this algorithm are also discussed. Numerical evidence is given showing that the new algorithms are competitive with existing trust-region algorithms. }} @article{Salz60, author = {H. E. Salzer}, title = {A note on the solution of quartic equations}, journal = MC, volume = 14, number = 71, pages = {279--281}, year = 1960} @techreport{SantSore95, author = {S. A. Santos and D. C. Sorensen}, title = {A new matrix-free algorithm for the large-scale trust-region subproblem}, institution = CAAM, address = RICE-ADDRESS, number = {TR95-20}, year = 1995, abstract = {The trust-region subproblem arises frequently in linear algebra and optimization applications. Recently, matrix-free methods have been introduced to solve large-scale trust-region subproblems. These methods only require a matrix-vector product and do not rely on matrix factorizations (\bciteb{RendWolk97}, \bciteb{Sore97}). These approaches recast the trust-region subproblem in terms of a parameterized eigenvalue problem and then adjust the parameter to find the optimal solution from the eigenvector corresponding to the smallest eigenvalue of the parameterized eigenvalue problem. This paper presents a new matrix-free algorithm for the large-scale trust-region subproblem. The new algorithm improves upon the previous algorithms by introducing a unified iteration that naturally includes the so called hard case. The new iteration is shown to be superlinearly convergent in all cases. Computational results are presented to illustrate convergence properties and robustness of the method.}, summary = {A matrix-free algorithm for the large-scale trust-region subproblem is presented, which improves upon the previous algorithms by introducing a unified iteration that naturally includes the hard case. The iteration is superlinearly convergent in all cases. Computational results are presented to illustrate its convergence properties and robustness.}} @inproceedings{Sarg74, author = {R. W. H. Sargent}, title = {Reduced-gradient and projection methods for nonlinear programming}, crossref = {GillMurr74a}, pages = {149--174}} @misc{SargZhan98, author = {R. W. H. Sargent and X. Zhang}, title = {An Interior-point Algorithm for Solving General Variational Inequalities and Nonlinear Programs}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998} % abstract = {The paper describes a new algorithm for finding local % solutions of general variational inequalities or % nonlinear programs. It applies the interior-point % approach directly to the necessary conditions for the % nonlinear problem, which can then be degenerate. It % is proved that the algorithm always has finite % termination, and that the rate of convergence is % globally linear, and under a certain regularity condition % ultimately Q-subquadratic. The regularity condition % does not necessarily imply an isolated solution point. % Numerical results are given for a selection of nonlinear % programming problems from the CUTE package.}, % abstract = {An algorithm for finding local solutions of general % variational inequalities or nonlinear programs is described. % It applies the interior-point approach directly to the % necessary conditions for the nonlinear problem, which can % then be degenerate. It is proved that the algorithm always % has finite termination, and that the rate of convergence % is globally linear, and under a certain regularity % condition ultimately Q-subquadratic. The regularity % condition does not necessarily imply an isolated solution % point. Numerical results are given for a selection of % nonlinear programming problems from the {\sf CUTE} package.}} @phdthesis{Sart91, author = {A. Sartenaer}, title = {On some strategies for handling constraints in nonlinear optimization}, school = FUNDP, address = FUNDP-ADDRESS, year = 1991} @article{Sart93b, author = {A. Sartenaer}, title = {Armijo-type condition for the determination of a generalized {C}auchy point in trust region algorithms using exact or inexact projections on convex constraints}, journal = {Belgian Journal of Operations Research, Statistics and Computer Science}, volume = 33, number = 4, pages = {61--75}, year = 1993, abstract = {This paper considers some aspects of two classes of trust region methods for solving constrained optimization problems. The first class proposed by \citebb{Toin88} uses techniques based on the explicitly calculated projected gradient, while the second class proposed by \citebb{ConnGoulSartToin96a} allows for inexact projections on the constraints. We propose and analyze for each class a step-size rule in the spirit of the Armijo rule for the determination of a Generalized Cauchy Point. We then prove under mild assumptions that, in both cases, the classes preserve their theoretical properties of global convergence and identification of the correct active set in a finite number of iterations. Numerical issues are also discussed for both classes.}, summary = {An Armijo step-size rule is proposed for the determination of a Generalized Cauchy Point and analyzed for the trust-region methods proposed by \citebb{Toin88} and \citebb{ConnGoulSartToin96a}. It is proved under mild assumptions that both classes preserve their theoretical properties of global convergence and identification of the correct active set in a finite number of iterations. Numerical issues are also discussed for both classes.}} @article{Sart95, author = {A. Sartenaer}, title = {A class of trust region methods for nonlinear network optimization problems}, journal = SIOPT, volume = 5, number = 2, pages = {379--407}, year = 1995, abstract = {We describe the results of a series of tests upon a class of new methods of trust region type for solving the nonlinear network optimization problem. The trust region technique considered is characterized by the use of the infinity norm and of inexact projections on the network constraints. The results are encouraging and show that this approach is particularly useful in solving large-scale nonlinear network optimization problems, especially when many bound constraints are expected to be active at the solution.}, summary = {The results of a series of tests upon a class of methods of trust-region type for solving the nonlinear network optimization problem are described. The trust-region technique considered is characterized by the use of the infinity norm and of inexact projections on the network constraints. The results are encouraging and show that this approach is particularly useful in solving large-scale nonlinear network optimization problems, especially when many bound constraints are expected to be active at the solution.}} @article{Sart97, author = {A. Sartenaer}, title = {Automatic determination of an initial trust region in nonlinear programming}, journal = SISC, volume = 18, number = 6, pages = {1788--1803}, year = 1997, abstract = {This paper presents a simple but efficient way to find a good initial trust region radius in trust region methods for nonlinear optimization. The method consists of monitoring the agreement between the model and the objective function along the steepest descent direction, computed at the starting point. Further improvements for the starting point are also derived from the information gleaned during the initializing phase. Numerical results on a large set of problems show the impact the initial trust region radius may have on trust region methods behaviour and the usefulness of the proposed strategy.}, summary = {A simple but efficient way to find a good initial trust region radius in trust-region methods for nonlinear optimization consists of monitoring the agreement between the model and the objective function along the steepest-descent direction at the starting point. Further improvements for the starting point are also derived from the information gleaned during the initializing phase. Numerical results on a large set of problems show the impact the initial trust region radius may have on trust-region methods behaviour.}} @article{Saue95, author = {Th. Sauer}, title = {Computational aspects of multivariate polynomial interpolation}, journal = {Advances in Computational Mathematics}, volume = 3, pages = {219--238}, year = 1995} @misc{Saue96, author = {Th. Sauer}, title = {Notes on polynomial interpolation}, howpublished = {Private communication}, month = {February}, year = 1996} @article{SaueXu95, author = {Th. Sauer and Y. Xu}, title = {On multivariate {L}agrange interpolation}, journal = MC, volume = 64, pages = {1147--1170}, year = 1995} @inproceedings{SchaLude94, author = {J. Schaepperle and E. Luder}, title = {Optimization of distributed parameter systems with a combined statistical-deterministic method}, booktitle = {1994 IEEE International Symposium on Circuits and Systems}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 6, pages = {141--144}, year = 1994, abstract = {This paper describes an optimization method for nonlinear systems with properties that depend on functions instead of discrete parameters. The method is applied to the design of systems with spatially distributed parameters. The underlying mathematical problem of the calculus of variations is approximated by a finite dimensional constrained nonlinear minimax-problem. This is solved with a method that combines a deterministic algorithm for local optimisation with a statistical method for global optimization. The former is based on linearization and linear programming with adaptive trust region, while the latter uses elements from genetic methods and pattern recognition. An example with a nonlinearly loaded nonuniform transmission line shows the capability of the algorithm to determine the unknown optimum function with high precision.}, summary = {An optimization method is described for nonlinear systems with properties that depend on functions instead of discrete parameters. The method is applied to the design of systems with spatially distributed parameters. The underlying mathematical problem of the calculus of variations is approximated by a finite-dimensional constrained nonlinear minimax-problem. This is solved with a method that combines a deterministic algorithm for local optimization with a statistical method for global optimization. The former is based on linearization and linear programming with adaptive trust region, while the latter uses elements from genetic methods and pattern recognition. An example with a nonlinearly loaded non-uniform transmission line shows the capability of the algorithm to determine the unknown optimum function with high precision.}} @article{Shii99, author = {T. Shiina}, title = {Numerical solution technique for joint chance-constrained programming problem---an application to electric power capacity expansion }, journal = {Journal of the Operations Research Society of Japan}, volume = 42, number = 2, pages = {128--140}, year = 1999, abstract = {We consider a joint chance-constrained linear programming problem with random right hand side vector. The deterministic equivalent of the joint chance-constraint is already known in the case that the right hand side vector is statistically independent. But if the right hand side vector is correlative, it is difficult to derive the deterministic equivalent of the joint chance-constraint. We discuss two methods for calculating the joint chance-constraint. For the case of uncorrelated right hand side, we try a direct method different from the usual deterministic equivalent, for the correlative right hand side case, we apply numerical integration. In this paper a chance-constrained programming problem is developed for electric power capacity expansion, where the error of forecasted electricity demand is defined by a random variable. Finally we show that this problem can be solved numerically using the trust region method and numerical integration, and we present the results of our computational experiments.}, summary = {A joint chance-constrained linear programming problem with random right hand side vector is considered. The deterministic equivalent of the joint chance-constraint is already known in the case that the right hand side vector is statistically independent. But if the right hand side vector is correlated, it is difficult to derive the deterministic equivalent of the joint chance-constraint. Two methods for calculating the joint chance-constraint are discussed. For the case of uncorrelated right hand side, a direct method different from the usual deterministic equivalent is tried, while for the correlated right hand side case, numerical integration is applied. A chance-constrained programming problem is developed for electric power capacity expansion, where the error of forecasted electricity demand is defined by a random variable. This problem is solved numerically using a trust region method, and numerical integration, and results of computational experiments are presented.}} @article{Schi81, author = {K. Schittkowski}, title = {The nonlinear programming method of {W}ilson, {H}an and {P}owell with an augmented {L}agrangian type line search function}, journal = NUMMATH, volume = 38, pages = {83--114}, year = 1981} @phdthesis{Schl95, author = {S. Schleiff}, title = {Parametersch\"{a}tzung in nichtlinearen {M}odellen unter besonderer {B}er\-\"{u}cks\-ichtigung kritischer {P}unkte}, school = {Martin-Luther-Universit\"{a}t}, address = {Halle-Wittenberg, Germany}, year = 1995} @article{Schl93, author = {T. Schlick}, title = {Modified {C}holesky factorizations for sparse preconditioners}, journal = SISC, volume = 14, number = 2, pages = {424--445}, year = 1993} @article{SchnEsko91, author = {R. B. Schnabel and E. Eskow}, title = {A new modified {C}holesky factorization}, journal = SISC, volume = 11, number = 6, pages = {1136--1158}, year = 1991} @article{SchnEsko99, author = {R. B. Schnabel and E. Eskow}, title = {A revised modified {C}holesky factorization}, journal = SIOPT, number = 9, volume = 4, pages = {1064--1081}, year = 1999} @article{SchnKoonWeis85, author = {R. B. Schnabel and J. E. Koontz and B. E. Weiss}, title = {A modular system of algorithms for unconstrained minimization}, journal = {ACM Transactions on Mathematical Software}, volume = 11, number = 4, pages = {419--440}, year = 1985, abstract = {We describe a new package, UNCMIN, for finding a local minimizer of a real valued function of more than one variable. The novel feature of UNCMIN is that it is a modular system of algorithms, containing three different step selection strategies (line search, dogleg, and optimal step) that may be combined with either analytic or finite difference gradient evaluation and with either analytic, finite difference, or BFGS Hessian approximation. We present the results of a comparison of the three step selection strategies on the problems in \citebb{MoreGarbHill81} in two separate cases: using finite difference gradients and Hessians, and using finite difference gradients with BFGS Hessian approximations. We also describe a second package, REVMIN, that uses optimization algorithms identical to UNCMIN but obtains values of user-supplied functions by reverse communication.}, summary = {UNCMIN is a modular system of algorithms for optimization containing three different step selection strategies (linesearch, dogleg, and optimal step) that may be combined with either analytic or finite difference gradient evaluation and with either analytic, finite difference, or BFGS Hessian approximation. The results of a comparison of the three step selection strategies on the problems in \citebb{MoreGarbHill81} are compared when using finite difference gradients and Hessians, or finite difference gradients with BFGS Hessian approximations. A second package, REVMIN, that uses optimization algorithms identical to UNCMIN but obtains values of user-supplied functions by reverse communication is also described.}} @article{SchoStoh99, author = {S. Scholtes and M. St\"{o}hr}, title = {Exact penalization of mathematical programs with equilibrium constraints}, journal = SICON, volume = 37, number = 2, pages = {617--652}, year = 1999, abstract = {We study theoretical and computational aspects of an exact penalization approach to mathematical programs with equilibrium constraints (MPEC). In the first part, we prove that a Mangasarian-Fromowitz type condition ensures the existence of a stable local error bound at the root of a real-valued nonnegative piecewise smooth function. A specification to nonsmooth formulations of equilibrium constraints, e.g. complementarity conditions or normal equations, provides conditions which guarantee the existence of a nonsmooth exact penalty function for MPECs. In the second part, we study a trust region minimization method for a class of composite nonsmooth functions which comprises exact penalty functions arising from MPECs. We prove a global convergence result for the general method and incorporate a penalty update rule. A further specification results in an SQP trust region method for MPECs based on an $\ell_1$ penalty function.}, summary = {Theoretical and computational aspects of an exact penalization approach to mathematical programs with equilibrium constraints (MPEC) are studied. It is shown that a Mangasarian-Fromowitz type condition ensures the existence of a stable local error bound at the root of a real-valued non-negative piecewise smooth function. A specification to non-smooth formulations of equilibrium constraints, e.g. complementarity conditions or normal equations, then provides conditions which guarantee the existence of a non-smooth exact penalty function for MPECs. A trust-region minimization method for a class of composite non-smooth functions is then presented, which comprises exact penalty functions arising from MPECs. Global convergence is proved and a penalty update rule is described. A further specification results in an SQP trust-region method for MPECs based on an $\ell_1$ penalty function.}} @article{SchrZowe92, author = {H. Schramm and J. Zowe}, title = {A version of the bundle idea for minimizing a nonsmooth function: conceptual idea, convergence analysis, numerical results}, journal = SIOPT, volume = 2, number = 1, pages = {121--152}, year = 1992, abstract = {During recent years various proposals for the minimization of a nonsmooth functional have been made. Amongst these, the bundle concept turned out to be an especially fruitful idea. Based on this concept, a number of authors have developed codes that can successfully deal with nonsmooth problems. The aim of the paper is to show that, by adding some features of the trust-region philosophy to the bundle concept, the end result is a distinguished member of the bundle family with a more stable behaviour than some other versions. The reliability and efficiency of this code is demonstrated on the standard academic examples and on some real-life problems}, summary = {An stable algorithm of the bundle family is obtained for solving non-smooth minimization problems by adding some features of the trust-region philosophy to the bundle concept. The reliability and efficiency of the corresponding code is demonstrated on the standard academic examples and on some real-life problems.}} @article{SchwTill89, author = {H. Schwetlick and V. Tiller}, title = {Nonstandard Scaling Matrices for Trust Region {G}auss-{N}ewton Methods}, journal = SISSC, volume = 10, number = 4, pages = {654--670}, year = 1989, abstract = {For solving large nonlinear problems via the trust-region Gauss-Newton methods, nonstandard scaling matrices are proposed for scaling the norm of the step. The scaling matrices are rectangular, of full rank, and contain a block of the Jacobian matrix of the residual function. Three types of such matrices are investigated. The corresponding trust region methods are shown to have qualitatively the same convergence properties as the standard method. Nonstandard scaling matrices are especially intended for solving large and structured problems such as orthogonal distance regression or surface fitting. Initial computational experience suggests that for such problems the proposed scaling implies sometimes a modest increase in the number of iterations but reduces overall computational cost.}, summary = {Non-standard scaling matrices are proposed for scaling the norm of the step in the solution of large nonlinear problems via the trust-region Gauss-Newton methods. The scaling matrices are rectangular, of full rank, and contain a block of the Jacobian matrix of the residual function. Three types of such matrices are investigated. The corresponding trust-region methods have qualitatively the same convergence properties as the standard method. Non-standard scaling matrices are especially intended for solving large and structured problems such as orthogonal distance regression or surface fitting. Initial computational experience suggests that for such problems the proposed scaling sometimes implies a modest increase in the number of iterations but reduces overall computational cost.}} @phdthesis{Sebu92b, author = {Ch. Sebudandi}, title = {Algorithmic developments in seismic tomography}, school = FUNDP, address = FUNDP-ADDRESS, year = 1992} @article{SebuToin92, author = {Ch. Sebudandi and Ph. L. Toint}, title = {Nonlinear optimization for seismic travel time tomography}, journal = {Geophysical Journal International}, volume = 115, pages = {929--940}, year = 1993, abstract = {This paper presents a non-linear algorithmic approach for seismic traveltime. It is based on large-scale optimization using non-linear least-squares and trust-region methods. These methods provide a natural way to stabilize algorithms based on Newton's iteration for non-linear minimization. They also correspond to an alternative (and often more efficient) view of the Levenberg-Marquardt method. Numerical experience on synthetic data and on real borehole-to-borehole problems are presented. In particular, results produced by the algorithms are compared with those of Ivansson (1985) for the Kr\aa m\aa la experiment.}, summary = {A nonlinear algorithm for seismic traveltime analysis is presented, based on large-scale nonlinear least-squares and trust-region methods. Numerical experience on synthetic data and on real borehole-to-borehole problems is presented. Results produced by the algorithms are compared with those of Ivansson (1985) for the Kr\aa m\aa la experiment.}} @article{Semp97, author = {J. Semple}, title = {Optimality conditions and solution procedures for nondegenerate dual-response systems}, journal = {IIE Transactions}, volume = 29, number = 9, pages = {743-752}, year = 1997, abstract = {This paper investigates the dual-response problem in the case where the response functions are nonconvex (nonconcave) quadratics and the independent variables satisfy a radial bound. Sufficient conditions for a global optimum are established and shown to generalize to the multi-response case. It is then demonstrated that the sufficient conditions may not hold if the problem is 'degenerate'. However, if the problem is nondegenerate, it is shown that the sufficient conditions are necessarily satisfied by some stationary point. In this case, a specialized algorithm (DRSALG) is shown to locate the global optimum in a finite number of steps. DRSALG will also identify the degenerate case and pinpoint the location where degeneracy occurs. The algorithm is easy to implement from the standpoint of code development, and we illustrate our elementary version on a well-studied dual-response example from quality control.}, summary = {The dual-response problem in the case where the response functions are non-convex (non-concave) quadratics and the independent variables satisfy a radial bound is investigated. Sufficient conditions for a global optimum are established and generalized to the multi-response case. It is demonstrated that the sufficient conditions may not hold if the problem is ``degenerate''. However, if the problem is non-degenerate, the sufficient conditions are necessarily satisfied by some stationary point. In this case, a specialized algorithm (DRSALG) locates the global optimum in a finite number of steps. DRSALG also identifies the degenerate case and pinpoints the location where degeneracy occurs. The algorithm is easy to implement, and is illustrated on a well-studied dual-response example from quality control.}} @phdthesis{Shah96, author = {J. S. Shahabuddin}, title = {Structured trust-region algorithms for the minimization of nonlinear functions}, school = CS-CORNELL, address = CORNELL-ADDRESS, year = 1996, summary = {Trust-region algorithms are a popular and successful class of tools for the solution of nonlinear, non-convex optimization problems. The basic trust-region algorithm is extended so that it takes advantage of partial separability to solve such large-scale problems in an efficient way. It aims at simplifying the proposal of \citebb{ConnGoulSartToin96a} in the case where no constraints are imposed in the problem. Three related approaches of ``multiple''or ``structured'' trust regions are proposed. Convergence results are presented for them in the unconstrained case. Some computational results are also discussed, in which the three approaches are compared.}} @article{Shan70, author = {D. F. Shanno}, title = {Conditioning of Quasi-{N}ewton Methods For Function Minimization}, journal = MC, volume = 24, pages = {647--657}, year = 1970} @article{Shan78b, author = {D. F. Shanno}, title = {Conjugate gradient methods with inexact searches}, journal = MOR, volume = 3, pages = {244--256}, year = 1978} @misc{Shan99, author = {D. F. Shanno}, title = {Topics in Implementing an Interior Point Method for Nonconvex Nonlinear Programming}, howpublished = {Presentation at the First Workshop on Nonlinear Optimization ``Interior-Point and Filter Methods'', Coimbra, Portugal}, year = 1999} @book{Shef85, author = {Y. Sheffi}, title = {Urban Transportation Networks}, publisher = PH, address = PH-ADDRESS, year = 1985} @article{ShulSchnByrd85, author = {G. A. Shultz and R. B. Schnabel and R. H. Byrd}, title = {A family of trust-region-based algorithms for unconstrained minimization with strong global convergence properties}, journal = SINUM, volume = 22, number = 1, pages = {47--67}, year = 1985, abstract = {This paper has two aims: to exhibit very general conditions under which members of a broad class of unconstrained minimization algorithms are globally convergent in a strong sense, and to propose several new algorithms that use second derivative information and achieve such convergence. In the first part of the paper, we present a general trust-region-based algorithm schema that includes an undefined step selection strategy. We give general conditions on the step selection strategy under which limit points of the algorithm will satisfy first and second order necessary conditions for unconstrained minimization. Our algorithm schema is sufficiently broad to include line search algorithms as well. Next, we show that a wide range of step selection strategies satisfy the requirements of our convergence theory. This leads us to propose several new algorithms that use second derivative information and achieve strong global convergence, including an indefinite line search algorithm, several indefinite dogleg algorithms and a modified ``optimal-step'' algorithm. Finally, we propose an implementation of one such indefinite dogleg algorithm.}, summary = {A general trust-region-based algorithm schema that includes an undefined step selection strategy is presented. General conditions on the step selection strategy under which limit points will satisfy first and second order necessary conditions are given. The algorithm schema is sufficiently broad to include linesearch methods as well. It is shown that a wide range of step selection strategies satisfy the requirements of the convergence theory, and several algorithms that use second derivative information and achieve strong global convergence are proposed. These include an indefinite linesearch algorithm, several indefinite dogleg algorithms and a modified ``optimal-step'' algorithm. An implementation of one such indefinite dogleg algorithm is proposed.}} @inproceedings{SimaShan97, author = {E. M. Simantiraki and D. F. Shanno}, title = {An Infeasible-Interior-Point Method for Linear Complementarity Problems}, crossref = {DuffWats97}, pages = {339--362}} @article{Smal83, author = {S. Smale}, title = {On the average number of steps of the simplex method of linear programming}, journal = MP, volume = 27, number = 3, pages = {241--262}, year = 1983} @article{SmitBowe93, author = {R. C. Smith and K. L. Bowers}, title = {{S}inc-{G}alerkin Estimation of Diffusivity in Parabolic Problems}, journal = {Inverse Problems}, volume = 9, number = 1, pages = {113--135}, year = 1993, abstract = {A fully Sinc-Galerkin method for the numerical recovery of spatially varying diffusion coefficients in linear parabolic partial differential equations is presented. Because the parameter recovery problems are inherently ill-posed, an output error criterion in' conjunction with Tikhonov regularization is used to formulate them as infinite-dimensional minimization problems. The forward problems are discretized with a sinc basis in both the spatial and temporal domains thus yielding an approximate solution which displays an exponential convergence rate and is valid on the infinite time interval. The minimization problems are then solved via a quasi-Newton/trust region algorithm. The L-curve technique for determining an appropriate value of the regularization parameter is briefly discussed, and numerical examples are given which demonstrate the applicability of the method both for problems with noise-free data as well as for those whose data contain white noise.}, summary = {A fully Sinc-Galerkin method for the numerical recovery of spatially varying diffusion coefficients in linear parabolic partial differential equations is presented. Because the parameter recovery problems are inherently ill-posed, an output error criterion in conjunction with Tikhonov regularization is used to formulate them as infinite-dimensional minimization problems. The forward problems are discretized with a sinc basis in both the spatial and temporal domains thus yielding an approximate solution which displays an exponential convergence rate and is valid on the infinite time interval. The minimization problems are then solved via a quasi-Newton/trust-region algorithm. The L-curve technique for determining an appropriate value of the regularization parameter is briefly discussed, and numerical examples illustrate the applicability of the method both for problems with noise-free data as well as for those whose data contain white noise.}} @article{Sore82, author = {D. C. Sorensen}, title = {{N}ewton's Method with a Model Trust-Region Modification}, journal = SINUM, volume = 19, number = 2, pages = {409--426}, year = 1982, abstract = {A modified Netwon method for unconstrained minimization is presented and analyzed. The modification is based upon the model trust region approach. This report contains a thorough analysis of the locally constrained quadratic minimizations that arise as subproblems in the modified Newton iteration. Several promising alternatives are presented for solving these subproblems in ways that overcome certain theoretical difficulties exposed by the analysis. Very strong convergence results are presented concerning the minimization algorithm. In particular, the explicit use of second order information is justified by demonstrating that the iterates converge to a point which satisfies the second order necessary conditions for minimization. With the exception of very pathological cases this occurs whenever the algorithm is applied to problems with continuous second partial derivatives.}, summary = {A modified Netwon method for unconstrained minimization is presented and analyzed. The modification is based upon the model trust-region approach. A thorough analysis of the locally constrained quadratic minimizations that arise as subproblems in the modified Newton iteration is given. Several promising alternatives are presented for solving these subproblems in ways that overcome certain theoretical difficulties exposed by the analysis. Very strong convergence results are presented concerning the minimization algorithm. In particular, the explicit use of second order information is justified by demonstrating that the iterates converge to a point which satisfies the second order necessary conditions for minimization. With the exception of very pathological cases this occurs whenever the algorithm is applied to problems with continuous second partial derivatives.}} @inproceedings{Sore82b, author = {D. C. Sorensen}, title = {Trust Region Methods for Unconstrained Optimization}, crossref = {Powe82}, pages = {29--39}, summary = {The basic trust-region approach to safeguarding Newton-like methods for unconstrained optimization is discussed.}} @article{Sore97, author = {D. C. Sorensen}, title = {Minimization of a Large-Scale Quadratic Function subject to a Spherical Constraint}, journal = SIOPT, volume = 7, number = 1, pages = {141--161}, year = 1997, abstract = {An important problem in linear algebra and optimization is the Trust-Region Subproblem: Minimize a quadratic function subject to an ellipsoidal or spherical constraint. This basic problem has several important large scale applications including seismic inversion and forcing convergence in optimization methods. Existing methods to solve the trust-region subproblem require matrix factorizations, which are not feasible in large scale setting. This paper presents an algorithm for solving the large scale trust-region subproblem that requires a fixed size limited storage proportional to order of the quadratic and that relies only on matrix-vector products. The algorithm recasts the trust-region subproblem in terms of a parametrized eigenvalue problem and adjusts the parameter with a superlinearly convergent iteration to find the optimal solution from the eigenvector of the parametrized problem. Only the smallest eigenvalue and corresponding eigenvector of the parametrized problem needs to be computed. The Implicitly Restarted Lanczos Method is well-suited to this subproblem.}, summary = {An algorithm is presented for solving the large scale trust-region subproblem that requires a fixed size limited storage proportional to order of the quadratic and that relies only on matrix-vector products. The algorithm recasts the trust-region subproblem in terms of a parametrized eigenvalue problem and adjusts the parameter with a superlinearly convergent iteration to find the optimal solution from the eigenvector of the parametrized problem. Only the smallest eigenvalue and corresponding eigenvector of the parametrized problem needs to be computed. The Implicitly Restarted Lanczos Method is well-suited to this subproblem.}} @article{StanSnym93, author = {N. Stander and J. A. Snyman}, title = {A new first-order interior feasible direction method for structural optimization}, journal = {International Journal for Numerical Methods in Engineering}, volume = 36, number = 23, pages = {4009--4025}, year = 1993} @article{Stei83a, author = {T. Steihaug}, title = {The conjugate gradient method and trust regions in large scale optimization}, journal = SINUM, volume = 20, number = 3, pages = {626--637}, year = 1983, abstract = {Algorithms based on trust regions have been shown to be robust methods for unconstrained optimization problems. All existing methods, either based on the dogleg strategy or \citebb{Hebd73}--\citebb{More78} iterations, require the solution of system of linear equations. In large scale optimization this may be prohibitively expensive. It is shown in this paper that an approximate solution of the trust region problem may be found by the preconditioned conjugate gradient method. This may be regarded as a generalized dogleg technique where we asymptotically take the inexact quasi-Newton step. We also show that we have the same properties as existing methods based on the dogleg strategy using an approximate Hessian.}, summary = {It is shown that an approximate solution of the trust-region problem may be found by the preconditioned conjugate gradient method. This may be regarded as a generalized dogleg technique where asymptotically the inexact quasi-Newton step is taken. The resulting algorithm has the same properties as existing methods based on the dogleg strategy using an approximate Hessian.}} @article{SterWolk94, author = {R. J. Stern and H. Wolkowicz}, title = {Trust region problems and nonsymetric eigenvalue perturbations}, journal = SIMAA, volume = 15, number = 3, pages = {775--778}, year = 1994, abstract = {A characterization is given for the spectrum of a symmetric matrix to remain real after a nonsymmetric sign-restricted border perturbation, including the case where the perturbation is skew-symmetric. The characterization is in terms of the stationary points of a quadratic function on the unit sphere. This yields interlacing relationships between the eigenvalues of the original matrix and those of the perturbed matrix. As a result of the linkage between the perturbation and stationarity problems, new theoretical insights are gained for each. Applications of the main results include a characterization of those matrices that are exponentially nonnegative with respect to the n-dimensional ice-cream cone, which in turn leads to a decomposition theorem for such matrices. In addition, results are obtained for nonsymmetric matrices regarding interlacing and majorization.}, summary = {A characterization is given for the spectrum of a symmetric matrix to remain real after a non-symmetric sign-restricted border perturbation, including the case where the perturbation is skew-symmetric. The characterization is in terms of the stationary points of a quadratic function on the unit sphere. This yields interlacing relationships between the eigenvalues of the original matrix and those of the perturbed matrix. Applications include a characterization of matrices that are exponentially non-negative with respect to the $n$-dimensional ice-cream cone, which leads to a decomposition theorem for such matrices. Results are obtained for non-symmetric matrices regarding interlacing and majorization.}} @article{SterWolk95, author = {R. J. Stern and H. Wolkowicz}, title = {Indefinite Trust Region Subproblems and Nonsymmetric Eigenvalue Perturbations}, journal = SIOPT, volume = 5, number = 2, pages = {286--313}, year = 1995, abstract = {This paper extends the theory of trust region subproblems in two ways: (i) it allows indefinite inner products in the quadratic constraint, and (ii) it uses a two-sided (upper and lower bound) quadratic constraint. Characterizations of optimality are presented that have no gap between necessity and sufficiency. Conditions for the existence of solutions are given in terms of the definiteness of a matrix pencil. A simple dual program is introduced that involves the maximization of a strictly concave function on an interval. The dual program simplifies the theory and algorithms for trust region subproblems. It also illustrates that the trust region subproblems are implicit convex programming problems, and thus explains why they are so tractable. The duality theory also provides connections to eigenvalue perturbation theory. Trust region subproblems with zero linear term in the objective function correspond to eigenvalue problems, and adding a linear term in the objective function is seen to correspond to a perturbed eigenvalue problem. Some eigenvalue interlacing results are presented.}, summary = {The theory of trust-region subproblems is extended in two ways: (i) indefinite inner products in the quadratic constraint are allowed, and (ii) a two-sided (upper and lower bound) quadratic constraint is used. Characterizations of optimality are presented that have no gap between necessity and sufficiency. Conditions for the existence of solutions are given in terms of the definiteness of a matrix pencil. A simple dual program is introduced that involves the maximization of a strictly concave function on an interval. The dual program simplifies the theory and algorithms for trust-region subproblems. It also illustrates that they are implicit convex programming problems, and thus explains why they are so tractable. The duality theory provides connections to eigenvalue perturbation theory. Trust-region subproblems with zero linear term in the objective function correspond to eigenvalue problems, and adding a linear term in the objective function is seen to correspond to a perturbed eigenvalue problem. Some eigenvalue interlacing results are presented.}} @article{Stew67, author = {G. W. Stewart}, title = {A Modification of {D}avidon's Minimization Method to Accept Difference Approximations of Derivatives}, journal = {Journal of the ACM}, volume = 14, year = 1967} @inproceedings{Stoe83, author = {J. Stoer}, title = {Solution of Large Linear Systems of Equations by Conjugate Gradient Type Methods}, crossref = {BachGrotKort83}, pages = {540--565}} @phdthesis{Stoh99, author = {M. St\"{o}hr}, title = {Nonsmooth trust-region methods and their applications to mathematical programs with equilibrium constraints}, school = {Faculty of Mathematics, University of Karlsruhe}, address = {Karlsruhe, Germany}, year = 1999, abstract = {(none)}, summary = {A trust-region method for the solution of mathematical programs with equilibrium constraints (MPEC) is proposed and analyzed. It makes use of exact penalty functions arising from the MPECs formulation. A variant of the algorithm by \citebb{SchoStoh99} is discussed that uses the concept of Cauchy point rather than requiring a model decrease proportional to that obtained at the global solution of the trust-region subproblem. Global convergence is proved and some numerical tests illustrate the method.}} @inproceedings{StudLuth97, author = {G. Studer and H.-J. L\"{u}thi}, title = {Maximum loss for risk measurement of portfolios}, booktitle = {Operations Research Proceedings 1996: Selected Papers of the Symposium on Operations Research (SOR 96)}, editor = {U. Zimmermann and U. Derigs and W. Gaul and R. H. Mohring and K. P. Schuster}, publisher = SPRINGER, address = SPRINGER-ADDRESS, pages = {386--391}, year = 1997, abstract = {Effective risk management requires adequate risk measurement. A basic problem herein is the quantification of market risks: what is the overall effect on a portfolio if market rates change? The first chapter gives a brief review of the standard risk measure "Value-At-Risk" (VAR) and introduces the concept of "Maximum Loss" (ML) as a method for identifying the worst case in a given scenario space, called "Trust Region". Next, a technique for calculating efficiently ML for quadratic functions is described; the algorithm is based on the Levenberg-Marquardt theorem, which reduces the high dimensional optimization problem to a one dimensional root finding. Following this, the idea of the "Maximum Loss Path" is presented: repetitive calculation of ML for a growing trust region leads to a sequence of worst cases, which form a complete path. Similarly, the paths of "Maximum Profit" (MP) and "Expected Value" (EV) can be determined; the comparison of them permits judgments on the quality of portfolios. These concepts are also applicable to non-quadratic portfolios by using "Dynamic Approximations", which replace arbitrary profit and loss functions with a sequence of quadratic functions. Finally, the idea of "Maximum Loss Distribution" is explained. The distributions of ML and MP can be obtained directly from the ML and MP paths. They lead to lower and upper bounds of VAR and allow statements about the spread of ML and MP.}, summary = {A brief review of the standard risk measure "Value-At-Risk" (VAR) is given and the concept of "Maximum Loss" (ML) for identifying the worst case in a given scenario space, a trust region, introduced. A technique for calculating efficiently ML for quadratic functions is described; the algorithm is based on the Levenberg-Morrison-Marquardt theorem. The idea of the "Maximum Loss Path" is presented. Repetitive calculation of ML for a growing trust region leads to a sequence of worst cases, which form a complete path. Similarly, the paths of "Maximum Profit" (MP) and "Expected Value" (EV) can be determined; the comparison of them permits judgments on the quality of portfolios. These concepts are applicable to non-quadratic portfolios by using "Dynamic Approximations", which replace arbitrary profit and loss functions with a sequence of quadratic functions. The idea of "Maximum Loss Distribution" is explained. The distributions of ML and MP can be obtained directly from the ML and MP paths, lead to lower and upper bounds of VAR and allow statements about the spread of ML and MP.}} @article{Stra91, author = {Z. Strako\v{s}}, title = {On the real convergence rate of the conjugate gradient method}, journal = LAA, volume = {154-156}, pages = {535--549}, year = 1991} @article{Sun96, author = {L. P. Sun}, title = {A restricted trust region method with supermemory for unconstrained optimization}, journal = JCM, volume = 14, number = 3, pages = {195--202}, year = 1996, abstract = {A new method for unconstrained optimization problems is presented. It belongs to the class of trust-region method, in which the descent direction is sought by using the trust region steps within the restricted subspace. Because this subspace can be specified to include information about previous steps, the method is also related to a supermemory descent method without performing multiple dimensional searches. Trust region methods have attractive global convergence property. Since the method possesses the characteristics of both the trust region methods and the supermemory descent methods, it is endowed with rapidly convergence. Numerical tests illustrate this point.}, summary = {A trust-region method for unconstrained optimization problems is presented, in which the descent direction is sought by using the trust-region steps within a restricted subspace. Because this subspace can be specified to include information about previous steps, the method is also related to a supermemory descent methods. It is endowed with rapidly convergence, as illustrated by numerical tests.}} @article{Sun96b, author = {W. Sun}, title = {Optimization methods for nonquadratic model}, journal = {Asia-Pacific Journal of Operational Research}, volume = 13, number = 1, pages = {43--63}, year = 1996} % abstract = {In this paper we review the state of the art and future % development on non-quadratic model optimization (NQMO). % Optimization methods based on the non-quadratic model are % more interesting than ones based on the quadratic model % because they have more interpolation information and better % approximation effect, especially for ill-conditioned % functions or functions with strong non-quadratic behaviour. % The main non-quadratic model methods include conic model % methods, homogeneous model methods, nonlinear scaling model % methods, tensor model methods and trust region model % methods. We think that further study of NQMO is necessary, % the area shows great potential and it is interesting.}, % summary = {The state of the art and future development on % non-quadratic model optimization (NQMO) is reviewed. % Optimization methods based on the non-quadratic model are % more interesting than ones based on the quadratic model % because they have more interpolation information and better % approximation effect, especially for ill-conditioned % functions or functions with strong non-quadratic behaviour. % The main non-quadratic models include conic models, % homogeneous models, nonlinear scaling models, % tensor models and trust-region models.} @article{Sun93, author = {J. Sun}, title = {A convergence proof for an affine-scaling algorithm for convex quadratic programming without nondegeneracy assumptions}, journal = MP, volume = 60, number = 1, pages = {69--79}, year = 1993} @article{Sun97, author = {J. Sun}, title = {On piecewise quadratic {N}ewton and trust-region problems}, journal = MP, volume = 76, number = 3, pages = {451--468}, year = 1997, abstract = {Some recent algorithms for nonsmooth optimization require solutions to certain piecewise quadratic programming subproblems. Two types of subproblems are considered in this paper. The first type seeks the minimization of a continuously differentiable and strictly convex piecewise quadratic function subject to linear equality constraints. We prove that a nonsmooth version of Newton's method is globally and finitely convergent in this case. The second type involves the minimization of a possibly nonconvex and nondifferentiable piecewise quadratic function over a Euclidean ball. Characterizations of the global minimizer are studied under various conditions. The results extend a classical result on the trust region problem.}, summary = {Some algorithms for non-smooth optimization require the solutions to certain piecewise quadratic programming subproblems. Two types of subproblems are considered. The first uses the minimization of a continuously differentiable and strictly convex piecewise quadratic function subject to linear equality constraints. A non-smooth version of Newton's method is globally and finitely convergent in this case. The second type involves the minimization of a possibly non-convex and non-differentiable piecewise quadratic function over a Euclidean ball. Characterizations of the global minimizer are studied under various conditions.}} @article{SunRued93, author = {J.-Q. Sun and K. Ruedenberg}, title = {Quadratic steepest descent on potential energy surfaces. {I}. {B}asic formalism and quantitative assessment}, journal = {Journal of Chemical Physics}, volume = 99, number = 7, pages = {5257--5268}, year = 1993, abstract = {A novel second-order algorithm is formulated for determining steepest-descent lines on potential energy surfaces. The reaction path is deduced from successive exact steepest-descent lines of local quadratic approximations to the surface. At each step, a distinction is made between three points: the center for the local quadratic Taylor expansion of the surface, the junction of the two adjacent local steepest-descent line approximations, and the predicted approximation to the true steepest-descent line. This flexibility returns a more efficient yield from the calculated information and increases the accuracy of the local quadratic approximations by almost an order of magnitude. In addition, the step size is varied with the curvature and, if desired, can be readjusted by a trust region assessment. Applications to the Gonzalez-Schlegel and the Muller-Brown surfaces show the method to compare favorably with existing methods. Several measures are given for assessing the accuracy achieved without knowledge of the exact steepest-descent line. The optimal evaluation of the predicted gradient and curvature for dynamical applications is discussed.}, summary = {A second-order algorithm is formulated for determining steepest-descent lines on potential energy surfaces, in which the step size is varied with the curvature and, if desired, readjusted by a trust region assessment. Applications to the Gonzalez-Schlegel and the Muller-Brown surfaces show the method to behave well. Several measures are given for assessing the accuracy achieved without knowledge of the exact steepest-descent line. The optimal evaluation of the predicted gradient and curvature for dynamical applications is discussed.}} @misc{SunYuan98, author = {W. Sun and Y. Yuan}, title = {A Conic Model Trust Region Method for Nonlinearly Constrained Optimization}, howpublished = {Presentation at the International Conference on Nonlinear Programming and Variational Inequalities, Hong Kong}, year = 1998, abstract = {In this paper we present conic trust region methods for constrained optimization problems. We give necessary and sufficient conditions for the solution of the associated trust region subproblems. Several equivalent variations and their properties are discussed. Some conic trust region algorithms are constructed. Finally, we establish the global and local convergence of our algorithms}, summary = {Trust-region methods for constrained optimization using conic models are considered, and necessary and sufficient conditions given for the solution of several equivalent formulations of the associated subproblems. Convergence properties of the resulting algorithm are established.}} @article{SunaBele91, author = {M. Sunar and A. D. Belegundu}, title = {Trust Region Methods for Structural Optimization Using Exact 2nd-Order Sensitivity}, journal = {International Journal for Numerical Methods in Engineering}, volume = 32, number = 2, pages = {275--293}, year = 1991, abstract = {The performance of multiplier algorithms for structural optimization has been significantly improved by using trust regions. The trust regions are constructed using analytical second order sensitivity, and within this region, the augmented Lagrangian $\phi$ is minimized subject to bounds. Evaluation of first and second derivatives of $\phi$ by the adjoint method does not require derivations of individual (implicit) constraint functions, which makes the method economical. Eight test problems are considered and a vast improvement over previously used multiplier algorithms has been noted. Also, the algorithm is robust with respect to scaling, input parameters and starting designs.}, summary = {A trust-region method for structural optimization is constructed using analytical second order sensitivity. The augmented Lagrangian $\phi$ is minimized in this region subject to bounds. Evaluation of first and second derivatives of $\phi$ by the adjoint method does not require derivations of individual (implicit) constraint functions, which makes the method economical. The algorithm is robust with respect to scaling, input parameters and starting designs.}} @book{Suth75, author = {W. A. Sutherland}, title = {Introduction to Metric and Topological Spaces}, publisher = OUP, address = OUP-ADDRESS, year = 1975} %%% T %%% @article{Tapi77, author = {R. A. Tapia}, title = {Diagonalized multiplier methods and quasi-{N}ewton methods for constrained optimization}, journal = JOTA, volume = 22, pages = {135--194}, year = 1977} @inproceedings{TappJala90, author = {C. Tappayuthpijarn and J. Jalali}, title = {Loadflow solution by applying hybrid algorithm to the {N}ewton-{R}aphson method}, booktitle = {Proceedings of the American Power Conference, Illinois Institute of Technology, Chicago, IL, USA}, pages = {234--238}, year = 1990, abstract = {The purpose of the hybrid method in solving power flow problems is to improve the efficiency in convergence of the existing Newton-Raphson method (NR) when its close initial estimates are not available. The method is based on interpolating between the fast convergence standard Newton-Raphson iteration and the method of steepest descent applied to the sum of the square of mismatch $f_i(x)$. The balance between these two methods is governed by introducing the concept of the trust region to restrict the step predicted by the classical method to be in the quadratic region and to switch to the steepest decent method that is better when the initial starts are far from the solution. The concept of trust radius and switching policies are given by the author. Digital computer results and their comparison of the 10 bus system, with different initial starts, by the proposed method and by the classical method are also given.}, summary = {A trust-region based method is described for enforcing global convergence in solving power flow problems.}} @article{Tana99, author = {Y. Tanaka}, title = {A trust region method for semi-infinite programming problems }, journal = {International Journal of Systems Science}, volume = 30, number = 2, pages = {199--204}, year = 1999, abstract = {We present a new successive quadratic programming (SQP) approach for semi-infinite programming problems with a trust region technique. Numerical methods for solving semi-infinite programming problems can be divided into continuous methods and discretization methods. We begin with a trust region method for nonlinear programming problems which possesses a fast and global convergence property and obviates the Maratos effect which is an unfavourable phenomenon that sometimes occurs for general SQP-type approaches. Then we apply the method to discretized semi-infinite programming problems by utilizing an $L_{\infty}$-exact penalty function and $\epsilon$-most-active constraints. The $L_{\infty}$-exact penalty function is, in fact, essential for continuity methods for semi-infinite programming problems so as to maintain continuity, of the exact penalty function, and enables the use of epsilon-most-active constraints in discretized semi-infinite programming problems. The results of preliminary computational experiments demonstrate the effectiveness of our approach for discretized semi-infinite programming problems.}, summary = {A trust region SQP method, using second-order corrections, is applied to discretized semi-infinite programming problems, using an $L_{\infty}$-exact penalty function and $\epsilon$-most-active constraints. Preliminary computational experiments demonstrate the viability of this approach.}} @article{TanaFukuHase87, author = {Y. Tanaka and M. Fukushima and T. Hasegawa}, title = {Implementable $L_{\infty}$ penalty-function method for semi-infinite optimization}, journal = {International Journal of Systems Science}, volume = 18, number = 8, pages = {1563--1568}, year = 1987, abstract = {This paper considers general nonlinear semi-infinite programming problems and presents an implementable method which employs an exact $L_{\infty}$ penalty function. Since the $L_{\infty}$ penalty function is continuous even if the number of representative constraints changes, trust-region techniques may effectively be adopted to obtain global convergence. Numerical results are given to show the efficiency of the proposed algorithm.}, summary = {An implementable method for general nonlinear semi-infinite programming problems is described, which employs an exact $L_{\infty}$ penalty function. Since this function is continuous even if the number of representative constraints changes, trust-region techniques may effectively be adopted to obtain global convergence. Numerical results are given to show the efficiency of the proposed algorithm.}} @article{TanaFukuIbar88, author = {Y. Tanaka and M. Fukushima and T. Ibaraki}, title = {A globally convergent SQP method for semi-infinite nonlinear optimization}, journal = JCAM, volume = 23, number = 2, pages = {141--153}, year = 1988, abstract = {A new approach for semi-infinite programming problems is presented, which belongs to the class of successive quadratic programming (SQP) methods with trust region technique. The proposed algorithm employs the exact L/sub infinity / penalty function as a criterion function and incorporates an appropriate scheme for estimating active constraints. It is proved that the algorithm is globally convergent under some assumptions. Numerical experiments show that the algorithm is very promising in practice.}, summary = {A trust-region SQP method for semi-infinite programming is presented. The proposed algorithm employs the exact $L_{\infty}$ penalty function and incorporates a scheme for estimating active constraints. It is proved to be globally convergent. Numerical experiments show that the algorithm is promising.}} @article{Tebo97, author = {M. Teboulle}, title = {Convergence of proximal-like algorithms}, journal = SIOPT, volume = 6, number = 3, pages = {617--625}, year = 1997} @article{Terp95, author = {P. Terpolilli}, title = {Trust region method in nonsmooth optimization}, journal = {Comptes rendus de l'{A}cad\'{e}mie des {S}ciences, s\'{e}rie {M}ath\'{e}matique}, volume = 321, number = 7, pages = {945--948}, year = 1995, abstract = {In this note, a new framework for nonsmooth optimization is introduced. We consider then an algorithm using trust region stategy and prove some global convergence results. We pay particular attention to the use of inexact local models : actually, we give a convergence result in a situation where local models are computed by a numerical procedure.}, summary = {A framework for non-smooth optimization is introduced. An algorithm using trust-region stategy is considered and global convergence results are established. Particular attention is paid to the use of inexact local models. A convergence result is given in the situation where local models are computed by a numerical procedure.}} @phdthesis{Tern94, author = {D. J. Ternet}, title = {A trust region algorithm for reduced {H}essian successive quadratic programming}, school = {Department of Chemical Engineering, Carnegie Mellon University}, address = {Pittsburgh, USA}, year = 1994, abstract = {Successive Quadratic Programming (SQP) has become a powerful tool for solving large-scale process optimization problems. An SQP code has recently been developed at CMU to solve nonlinear programming problems. Some areas of improvement in the current algorithm include the introduction of a barrier function to reduce the constraints, the development of a sparse matrix version of the code for larger problems, and the solution of a trust region constraint to the solution of the quadratic programming (QP) subproblem. Of these possible improvements, the trust region implementation was chosen as the focus of this paper. The Successive Quadratic Programming method of solving nonlinear programming problems is reviewed, as well as the role of line search and trust region methods within this framework. A trust region was added to the current algorithm to increase the algorithms robustness while maintaining its superlinear convergence properties. The benefits of combining a line search method with a trust region method are also explained. A set of test problems for the new algorithm are explained which motivates the need for the addition of the trust region.}, summary = {An SQP algorithm for solving nonlinear programming problems is reviewed, as well as the role of linesearch and trust-region methods within this framework. A trust region is added to the current algorithm to increase the algorithms robustness while maintaining its superlinear convergence properties. The benefits of combining a linesearch method with a trust-region method are also explained. A set of test problems are described which motivate the need for the trust region.}} @phdthesis{Thom75, author = {S. Thomas}, title = {Sequential estimation techniques for quasi-{N}ewton algorithms}, school = {Cornell University}, address = CORNELL-ADDRESS, year = 1975} @article{Tibs96, author = {R. Tibshirani}, title = {Regression shrinkage and selection via the lasso}, journal = {Journal of the Royal Statistical Society B}, volume = 58, number = 1, pages = {267--288}, year = 1996} @article{TinnWalk67, author = {W. F. Tinney and J. W. Walker}, title = {Direct solution of sparse network equations by optimally ordered triangular factorization}, journal = {Proceedings of the IEEE}, volume = 55, pages = {1801--1809}, year = 1967} @article{Toin77a, author = {Ph. L. Toint}, title = {On sparse and symmetric matrix updating subject to a linear equation}, journal = MC, volume = 31, number = 140, pages = {954--961}, year = 1977} @article{Toin78, author = {Ph. L. Toint}, title = {Some Numerical Result Using a Sparse Matrix Updating Formula in Unconstrained Optimization}, journal = MC, volume = 32, number = 143, pages = {839--851}, year = 1978} %abstract = {This paper presents a numerical comparison between % algorithms for unconstrained optimization that take account % of sparsity in the second derivative matrix of the % objective function. Some of the methods % included in the comparison use difference approximation % schemes to evaluate the second derivative matrix and % other use an approximation to it which is updated regularly % using the changes in the gradient. These results show what % method to use in what circumstances and also suggest % interesting future developments.}} @article{Toin79, author = {Ph. L. Toint}, title = {On the Superlinear Convergence of an Algorithm for Solving a Sparse Minimization Problem}, journal = SINUM, volume = 16, pages = {1036--1045}, year = 1979} @inproceedings{Toin80a, author = {Ph. L. Toint}, title = {Sparsity Exploiting Quasi-{N}ewton Methods for Unconstrained Optimization}, crossref = {DixoSpedSzeg80}, pages = {65--90}} @inproceedings{Toin81b, author = {Ph. L. Toint}, title = {Towards an Efficient Sparsity Exploiting {N}ewton Method for Minimization}, booktitle = {Sparse Matrices and Their Uses}, editor = {I. S. Duff}, publisher = AP, address = AP-ADDRESS, pages = {57--88}, year = 1981, abstract = {The paper surveys some recently proposed algorithms for unconstrained minimization when second derivative of the objective function is sparse. Updating and estimation procedures are considered from the efficiency point of view. Special attention is given to the case where the Hessian has a band structure. A new strategy for the choice of the step is also discussed and some numerical results on a specially designed test function are presented.}, summary = {Recently proposed algorithms for unconstrained minimization when second derivative of the objective function is sparse are surveyed. Updating and estimation procedures are considered from the efficiency point of view. Special attention is given to the case where the Hessian has a band structure. A strategy for the choice of the step using truncated conjugate-gradients is also discussed and some numerical results on a specially designed test function are presented.}} @techreport{Toin81f, author = {Ph. L. Toint}, title = {Convergence properties of a class of minimization algorithms that use a possibly unbounded sequence of quadratic approximations}, institution = FUNDP, address = FUNDP-ADDRESS, number = {81/1}, year = 1981, abstract = {Global convergence results are established for a trust-region like algorithm. At variance with previous contributions, this theory does not make assumptions the norm of the Hessian approximations, but rather on the Rayleigh quotients of this approximation in certain specific directions. This allows for the case where Hessian approximations may become arbitrarily large provided they remain reasonable in these directions.}, summary = {Global convergence results are established for a trust-region algorithm without assumptions on the norm of the Hessian approximations, but rather on the Rayleigh quotients of this approximation in certain directions. This allows for the case where Hessian approximations may become arbitrarily large provided they remain reasonable in these directions.}} @techreport{Toin83b, author = {Ph. L. Toint}, title = {User's Guide to the Routine {PSPMIN} for Solving Partially Separable Bounded Optimization Problems}, institution = FUNDP, address = FUNDP-ADDRESS, number = {83/1}, year = 1983} @article{Toin83e, author = {Ph. L. Toint}, title = {{VE08AD}, a routine for partially separable optimization with bounded variables}, journal = {Harwell Subroutine Library}, volume = 2, year = 1983} @article{Toin86b, author = {Ph. L. Toint}, title = {On large scale nonlinear least squares calculations}, journal = SISSC, volume = 8, number = 3, pages = {416--435}, year = 1987, abstract = {The nonlinear model fitting problem is analyzed in this paper, with special emphasis on the practical solution techniques when the number of parameters in the model is large. Classical approaches to small dimensional least squares are reviewed and an extension of them to problems involving many variables is proposed. This extension uses the concept of partially separable structures, which has already proved its applicability for large scale optimization. An adaptable algorithm is discussed, which chooses between various possible models of the objective function. Preliminary numerical experience is also presented, which shows that actual solution of a large class of fitting problems involving several hundreds of nonlinear parameters is possible at a reasonable cost.}, summary = {The nonlinear model fitting problem is analyzed, with special emphasis on the practical solution techniques when the number of parameters in the model is large. An extension of classical approaches to problems involving many variables is proposed, that uses the concept of partially separable structures. An adaptable algorithm is discussed, which chooses between various possible models of the objective function. Preliminary numerical experience shows that the solution of a large class of fitting problems involving several hundreds of nonlinear parameters is possible at a reasonable cost.}} @article{Toin87d, author = {Ph. L. Toint}, title = {{VE10AD}, a routine for large scale nonlinear least squares}, journal = {Harwell Subroutine Library}, volume = 2, year = 1987} @article{Toin88, author = {Ph. L. Toint}, title = {Global convergence of a class of trust region methods for nonconvex minimization in {H}ilbert space}, journal = IMAJNA, volume = 8, number = 2, pages = {231--252}, year = 1988, abstract = {A class of trust-region methods for solving constrained optimization problems in Hilbert space is described. The algorithms of the class use, at every iteration, a local model of the objective, on which very weak conditions are imposed. Global convergence results are then derived for the class without assuming convexity of the objective functional. It is also shown that convergence of the classical projected-gradient method can be viewed as a special case of this theory. An example is finally given that points out some difficulties appearing when using active-set strategies in infinite-dimensional spaces.}, summary = {A trust-region method for solving constrained optimization problems in Hilbert space is described. Global convergence results are derived without assuming convexity of the objective functional. It is also shown that convergence of the classical projected-gradient method can be viewed as a special case of this theory. An example is given that points out some difficulties appearing when using active-set strategies in infinite-dimensional spaces.}} @techreport{Toin94d, author = {Ph. L. Toint}, title = {A non-monotone trust-region algorithm for nonlinear optimization subject to convex constraints: the complete numerical results}, institution = FUNDP, address = FUNDP-ADDRESS, number = {94/26}, year = 1994} %abstract = {The purpose of this paper is to detail the complete % results of all test runs reported on in the companion % paper \citebb{Toin96b}.}, @article{Toin96a, author = {Ph. L. Toint}, title = {An assessment of non-monotone linesearch techniques for unconstrained optimization}, journal = SISSC, volume = 17, number = 3, pages = {725--739}, year = 1996} %abstract = {The purpose of this paper is to discuss the potential of % nonmonotone techniques for enforcing convergence of % unconstrained minimization algorithms from starting points % distant from the solution. Linesearch-based algorithms are % considered for both small and large problems, and % extensive numerical experiments show that this potential % is sometimes considerable. A new variant is introduced in % order to limit some of the identified drawbacks of the % existing techniques. This variant is again numerically % tested and appears to be competitive. Finally, the impact % of preconditioning on the considered methods is examined.}, @article{Toin96b, author = {Ph. L. Toint}, title = {A non-monotone trust-region algorithm for nonlinear optimization subject to convex constraints}, journal = MP, volume = 77, number = 1, pages = {69--94}, year = 1997, abstract = {This paper presents two new trust-region methods for solving nonlinear optimization problems over convex feasible domains. These methods are distinguished by the fact that they do not enforce strict monotonicity of the objective function values at successive iterates. The algorithms are proved to be convergent to critical points of the problem from any starting point. Extensive numerical experiments show that this approach is competitive with the {\sf LANCELOT} package.}, summary = {Two trust-region methods for nonlinear optimization over convex feasible domains are presented. These methods are distinguished by the fact that they do not enforce strict monotonicity of the objective function values at successive iterates. The algorithms are proved to be convergent to critical points of the problem from any starting point. Extensive numerical experiments show that this approach is competitive with {\sf LANCELOT}.}} @techreport{TongZhou99, author = {X. Tong and S. Zhou}, title = {A trust-region algorithm for nonlinear inequality constrained optimization}, institution = {Department of Mathematics, Hunan University}, address = {Changsha, China}, number = {July}, year = 1999, abstract = {This paper presents a new trust-region algorithm for nonlinear optimization subject to nonlinear inequality constraints. An equivalent KKT condition is derived, which is the base of constructing the new algorithm. A global convergence of the algorithm to a first-order KKT point is established under mild conditions on the trial step, and a local quadratic convergence theorem is proved for nondegenerate minimizer point.}, summary = {A trust-region algorithm for nonlinear optimization subject to nonlinear inequality constraints, based on an equivalent reformulations of the KKT conditions, is presented. Global convergence of the algorithm to a first-order KKT point is established under mild conditions on the trial step, and a local Q-quadratic convergence rate is attainable at a nondegenerate minimizer.}} @misc{Tsen98, author = {P. Tseng}, howpublished = {(private communication)}, month = {September}, year = 1998} @techreport{Tsen99, author = {P. Tseng}, title = {A Convergent Infeasible Interior-Point Trust-Region Method for Constrained Optimization}, institution = {Department of Mathematics, University of Washington}, address = {Seattle, USA}, month = {May}, year = 1999, abstract = {We study an infeasible interior-point trust-region method for constrained optimization. This method uses a logarithmic-barrier function for the slack variables and updates the slack variables using a second-order correction. We show that if a certain set containing the iterates is bounded and the origin is not in the convex hull of the nearly active constraint gradients everywhere on this set, then any cluster point of the iterates is a 1st-order stationary point. If the cluster point satisfies an additional assumption (which holds when the constraints are linear or when the cluster point satisfies strict complementarity and a local error bound holds), then it is a 2nd-order stationary point.}, summary = {A primal interior-point method is presented for the inequality constrained nonlinear programming problem, that allows for infeasible points. The method uses a logarithmic barrier term for the slack variables and uses a trust-region to find a step. The associated subproblem is solved exactly. Convergence to first-order critical points is proved, as well as convergence to second-order ones under additional assumptions.}} @article{TsenYamaFuku96, author = {P. Tseng and N. Yamashita and M. Fukushima}, title = {Equivalence of complemetarity problems to differentiable minimization: a unified approach}, journal = SIOPT, volume = 6, number = 2, pages = {446--460}, year = 1996} @inproceedings{TsioMjol96, author = {D. I. Tsioutsias and E. Mjolsness}, title = {A multiscale attentional framework for relaxation neural networks}, booktitle = {Advances in Neural Information Processing Systems. Proceedings of the 1995 Conference}, editor = {D. S. Touretzky and M. C. Mozer and M. E. Hasselmo}, publisher = {MIT Press}, address = {Cambridge, MA, USA}, volume = 8, pages = {631--639}, year = 1996, abstract = {We investigate the optimization of neural networks governed by general objective functions. Practical formulations of such objectives are notoriously difficult to solve and a common problem is the poor local extrema that result by any of the applied methods. In this paper, a novel framework is introduced for the solution of large-scale optimization problems. It assumes little about the objective function and can be applied to general nonlinear, non-convex functions and objectives in thousand of variables are thus efficiently minimized by a combination of techniques-deterministic annealing, multiscale optimization, attention mechanisms and trust region optimization methods.}, summary = {The optimization of neural networks governed by general objective functions is investigated. A novel framework is introduced for the solution of large-scale such problems. It assumes little about the objective function and can be applied to general nonlinear, non-convex functions and objectives in thousand of variables are thus efficiently minimized by a combination of techniques-deterministic annealing, multiscale optimization, attention mechanisms and trust-region optimization methods.}} @article{Tsuc93, author = {T. Tsuchiya}, title = {Global convergence of the affine scaling algorithm for primal degenerate strictly convex quadratic programming problems}, journal = AOR, volume = 47, pages = {509--539}, year = 1993} @book{Turn39, author = {H. W. Turnbull}, title = {Theory of Equations}, publisher = {Oliver and Boyd}, address = {Edinburgh and London}, year = 1939} %%% U %%% @techreport{Ulbr99, author = {M. Ulbrich}, title = {Non-monotone Trust-Region Methods for Bound-Constrained Semismooth Equations with Applications to Nonlinear Mixed Complementarity Problems}, institution = {Faculty of Mathematics, Technische Universit\"{a}t M\"{u}nchen}, number = {TUM-M9906}, year = 1999, abstract = {We develop and analyze a class of trust-region methods for bound-constrained semismooth systems of equations. The algorithm is based on a simply constrained differentiable minimization reformulation. Our global convergence results are developed in a very general setting that allows for non-monotonicity of the function values at subsequent iterates. we propose a way of computing trial steps by a semismooth Newton-like method that is augmented by a projection onto the feasible set. Under a Dennis-Mor\'{e}-type condition we prove that close to a BD-regular solution the trust-region algorithm turns into this projected Newton method, which is shown to converge locally q-superlinearly or quadratically, respectively, depending on the quality of the approximate BD-subdifferentials used. As an important application we discuss in detail how the developed algorithm can be used to solve nonlinear mixed complementarity problems (MCPs). Hereby, the MCP is converted into a bound-constrained semismooth equation by means of an MCP function. We propose and investigate a new class of MCP-functions that are motivated by affine-scaling techniques for nonlinear programming. These functions have attractive theoretical properties and prove to be efficient in practice. This is documented by our numerical results for a subset of the MCPLIB problem collection.}, summary = {A class of trust-region methods for bound-constrained semismooth systems of equations is developed, which is based on a simply constrained differentiable minimization reformulation. Global convergence results are proved that allow for non-monotonicity of the function values at successive iterates. Trial steps are computed by a semismooth Newton-like method augmented by a projection onto the feasible set. Under a suitable condition and close to a regular solution, this technique turns into a projected Newton method, which converges locally $Q$-superlinearly or quadratically, depending on the quality of the approximate subdifferentials used. An application of this method to the solution of nonlinear mixed complementarity problems (MCPs) is then discussed, where the MCP is converted into a bound-constrained semismooth equation by means of an MCP function. A new class of MCP-functions is introduced, that is motivated by affine-scaling techniques for nonlinear programming. Numerical results for a subset of the MCPLIB problem collection illustrate the efficiency of this approach.}} @techreport{UlbrUlbr97, author = {M. Ulbrich and S. Ulbrich}, title = {Superlinear Convergence of Affine-scaling Interior-point {N}ewton Methods for Infinite-dimensional Problems with Pointwise Bounds.}, institution = CAAM, address = RICE-ADDRESS, number = {TR97-05}, year = 1997, abstract = {We develop and analyze a superlinearly convergent affine-scaling interior-point Newton method for infinite-dimensional problems with pointwise bounds in $L^p$-space. The problem formulation is motivated by optimal control problems with $L^p$-controls and pointwise control constraints. The finite-dimensional convergence theory by \citebb{ColeLi96b} makes essential use of the equivalence of norms and the exact identifiability of the active constraints close to an optimizer with strict complementarity. Since these features are not available in our infinite-dimensional framework, algorithmic changes are necessary to ensure fast local convergence. The main building block is a Newton-like iteration for an affine-scaling formulation of the KKT-condition. We demonstrate in an example that a stepsize rule to obtain an interior iterate may require very small stepsizes even arbitrarily close to a nondegenerate solution. Using a pointwise projection instead we prove superlinear convergence under a weak strict complementarity condition and convergence with $Q-$rate $>1$ under a slightly stronger condition if a smoothing step is available. We discuss how the algorithm can be embedded in the class of globally convergent trust-region interior-point methods recently developed by \citebb{UlbrUlbrHein99}. Numerical results for the control of a heating process confirm our theoretical findings.}, summary = {A superlinearly convergent affine-scaling interior-point Newton method for infinite-dimensional problems with pointwise bounds in $L^p$-space is analysed. The problem formulation is motivated by optimal control problems with $L^p$-controls and pointwise control constraints. Adaptations are made to the proposal by \citebb{ColeLi96b} for the infinite-dimensional setting. The main building block is a Newton-like iteration for an affine-scaling formulation of the KKT-condition. Using a pointwise projection, superlinear convergence under a weak strict complementarity condition and convergence with $Q-$rate $>1$ under a slightly stronger condition if a smoothing step is available are established. It is shown how the algorithm can be embedded in the class of globally convergent trust-region interior-point methods of \citebb{UlbrUlbrHein99}. Numerical results for the control of a heating process confirm the theoretical findings.}} @misc{UlbrUlbr99, author = {S. Ulbrich and M. Ulbrich}, title = {Nonmonotone Trust Region Methods for Nonlinear Equality Constrained Optimization Without a Penalty Function}, howpublished = {Presentation at the First Workshop on Nonlinear Optimization ``Interior-Point and Filter Methods'', Coimbra, Portugal}, year = 1999, abstract = {We propose and analyze a class of nonmonotone trust region methods for nonlinear equality constrained optimization problems. The algorithmic framework yields a global convergence without using a merit function like the augmented Lagrangian and allows nonmonotonicity independently for both the constraint violation and the objective function value. Similar to the augmented Lagrangian-based algorithm by \citebb{DennElAlMaci97}, each step is composed of a quasi-normal and a tangential step. Both steps are required to satisfy a fraction of Cauchy decrease condition for their respective trust region subproblems. Our mechanism for accepting steps combines nonmonotone decrease conditions on the constraint violation and/or the objective function, which leads to a flexibility and acceptance behaviour compared to filter-based methods. Preliminary numerical results for the {\sf CUTE} test set confirm that our approach is very promising. The proposed class of algorithms can be extended in a natural way to multilevel trust region algorithms. Hereby, the constraints are grouped in blocks and for each block a normal step is computed that acts tangential to the previous blocks. The generalization of our analysis to this class of algorithms is making progress and we plan to present convergence results also for this extended framework. The extension of our concept to general NLP is under development.}, summary = {A class of non-monotone trust-region methods for nonlinear equality constrained optimization problems is proposed, where each step is composed of a quasi-normal and a tangential step. Both steps are required to satisfy a fraction of Cauchy decrease condition for their respective trust-region subproblems and the mechanism for accepting them combines non-monotone decrease conditions on the constraint violation and/or the objective function. Preliminary numerical results show considerable promise.}} @article{UlbrUlbrHein99, author = {M. Ulbrich and S. Ulbrich and M. Heinkenschloss}, title = {Global convergence of trust-region interior-point algorithms for infinite dimensional nonconvex minimization subject to pointwise bounds}, journal = SICON, volume = 37, number = 3, pages = {731--764}, year = 1999, abstract = {A class of interior-point trust-region algorithms for infinite-dimensional nonlinear optimization subject to pointwise bounds in $L^p$-Banach spaces, $2 \leq p \leq \infty$, is formulated and analyzed. The problem formulation is motivated by optimal control problems, with $L^p$-controled, and pointwise control constraints. The interior-point trust-region algorithms are generalizations of those recently introduced by \citebb{ColeLi96b} for finite dimensional problems. Many of the generalizations derived in this paper are also important in the finite dimensional context. They lead to a better understanding of the method and to considerable improvements in their performance. All first- and second-order global convergence results known for trust-region methods in the finite-dimensional setting are extended to the infinite-dimensional framework of this paper.}, summary = {Interior-point trust-region algorithms for infinite-dimensional nonlinear optimization subject to pointwise bounds in $L^p$-Banach spaces, $2 \leq p \leq \infty$, are analyzed. The problem formulation is motivated by optimal control problems, with $L^p$-controled, and pointwise control constraints. The interior-point trust-region algorithms are generalizations of those introduced by \citebb{ColeLi96b} for finite-dimensional problems. Many of the generalizations lead to a better understanding of the methods and to considerable improvements in their performance. All first- and second-order global convergence results known in the finite-dimensional setting are extended to the infinite-dimensional framework.}} @article{UlbrUlbrVice00, author = {M. Ulbrich and S. Ulbrich and L. Vicente}, title = {A globally convergent primal-dual interior point filter method for nonconvex nonlinear programming}, institution = COIMBRA, address = COIMBRA-ADDRESS, number = {TR00-11}, year = 2000, abstract = {In this paper, the filter technique of \citebb{FletLeyf97} is used to globalize the primal-dual interior-point algorithm for nonlinear programming, avoiding the use of merit functions and the updating of penalty parameters. The new algorithm decomposes the primal-dual step obtained from the perturbed first-order necessary conditions into a normal and a tangential step, whose sizes are controlled by a trust-region parameter. Each entry in the filter is a pair of coordinates: one resulting from feasibility and centrality, and associated with the normal step; the other resulting from optimality (complementarity and duality) and related with the tangential step. Global convergence to first-order critical points is proved for the new primal-dual interior-point filter algorithm.}, summary = {The filter technique of \citebb{FletLeyf97} is used to globalize the primal-dual interior-point algorithm for nonlinear programming. The new algorithm decomposes the primal-dual step obtained from the perturbed first-order necessary conditions into a normal and a tangential step, whose sizes are controlled by a trust-region parameter. Each entry in the filter is a pair of coordinates: one resulting from feasibility and centrality, and associated with the normal step; the other resulting from optimality (complementarity and duality) and related with the tangential step. Global convergence to first-order critical points is proved for the new primal-dual interior-point filter algorithm.}} @techreport{UrbaTitsLawr98, author = {T. Urban and A. L. Tits and C. L. Lawrence}, title = {A primal-dual interior-point method for nonconvex optimization with multiple logarithmic barrier parameters and with strong convergence properties}, institution = {Electrical Engineering and the Institute for Systems Research, University of Maryland}, address = {College Park, USA}, number = {TR 98-27}, year = 1998, abstract = {It is observed that an algorithm proposed in the 1980s for the solution of nonconvex constrained optimization problems is in fact a primal-dual logarithmic barrier interior-point method closely related to methods under current investigation in the research community. Its main distinguishing features are judicious selection and update of the multiple barrier parameters (one per constraint), use of the objective function as merit function, and a careful bending of the search direction. As a pay-off, global convergence and fast local convergence ensue. The purpose of the present note is to describe the algorithm in the interior-point framework and language and to provide a preliminary numerical evaluation. The latter shows that the method compares well with algorithms recently proposed by other research groups.}, summary = {An algorithm proposed in 1988 by Panier, Tits and Herskovits for the solution of non-convex constrained optimization problems is shown to be a primal-dual logarithmic barrier interior-point method. Its distinguishing features are multiple barrier parameters, the use of the objective function as a merit function, and the bending of the search direction. Global convergence and fast local convergence ensue. The algorithm is described in the interior-point framework and preliminary numerical results are discussed.}} %%% V %%% @article{VandBoyd96, author = {L. Vandenberghe and S. Boyd}, title = {Semidefinite programming}, journal = SIREV, volume = 38, number = 1, pages = {49--95}, year = 1996} @article{VandWhin69, author = {Van de Panne, C. and A. Whinston}, title = {The symmetric formulation of the simplex method for quadratic programming}, journal = {Econometrica}, volume = 37, number = {}, pages = {507--527}, year = 1969} @inproceedings{Vand85, author = {J. S. Vandergraft}, title = {Efficient optimization methods for maximum likelihood parameter estimation}, booktitle = {Proceedings of the 24th IEEE Conference on Decision and Control}, publisher = {IEEE}, address = {New York, NY, USA}, volume = 3, pages = {1906--1909}, year = {1985}, abstract = {Recent research in numerical optimization has led to development of efficient algorithms based on update methods and model trust region techniques. The update methods are a class of iterative schemes that avoid expensive evaluations of (approximate) Hessians, yet retain the rapid convergence properties of Newton-like methods that require second-derivative (Hessian) information. Model trust region techniques avoid the costly step-length calculations that are required by standard iterative methods based on approximate Newton methods. The author describes the most successful of the update methods and shows how it, or more conventional methods such as scoring, can be combined with a model trust region technique to produce numerical algorithms that are ideally suited to maximum-likelihood (ML) parameter estimation. Specific properties of these algorithms include a fast (superlinear) rate of convergence together with the ability to handle parameter constraints easily and efficiently.}, summary = {It is shown how the most successful of the quasi-Newton methods and more conventional methods such as scoring can be combined with a trust-region technique to produce numerical algorithms that are ideally suited to maximum-likelihood parameter estimation. Specific properties of these algorithms include a superlinear rate of convergence and the ability to handle parameter constraints easily and efficiently.}} @book{VanHVand91, author = {Van Huffel, S. and J. Vandewalle}, title = {The Total Least-Squares Problem: Computational Aspects and Analysis}, publisher = SIAM, address = SIAM-ADDRESS, number = 9, series = {Frontiers in Applied Nathematics}, year = 1991} @techreport{Vand94, author = {R. J. Vanderbei}, title = {{LOQO}: an interior point code for quadratic programming}, institution = {Program in Statistics and Operations,Research}, address = {Princeton University, New Jersey, USA}, type = {Technical Report}, number = {SOR 94-15}, year = 1994} @techreport{VandShan97, author = {R. J. Vanderbei and D. F. Shanno}, title = {An interior point algorithm for nonconvex nonlinear programming}, institution = {Program in Statistics and Operations Research}, address = {Princeton University, New Jersey, USA}, type = {Technical Report}, number = {SOR 97-21}, year = 1997} @article{Vard85, author = {A. Vardi}, title = {A trust region algorithm for equality constrained minimization: convergence properties and implementation}, journal = SINUM, volume = 22, number = 3, pages = {575--591}, year = 1985, abstract = {In unconstrained minimization, trust region algorithms use directions that are a combination of the quasi-Newton direction and the steepest descent direction, depending on the fit between the quadratic approximation of the function and the function itself. Algorithms for nonlinear constrained optimization problems usually determine a quasi-Newton direction and use a line search technique to determine the step. Since trust region strategies have proved to be successful in unconstrained minimization, we develop a new trust region strategy for equality constrained minimization. This algorithm is analyzed and global as well as local superlinear convergence theorems are proved for various versions. We demonstrate how to implement this algorithm in a numerically stable way. A computer program based on this algorithm has performed very satisfactorily on test problems; numerical results are provided.}, summary = {A trust-region strategy for equality constrained minimization is developed. This algorithm is analyzed and global as well as local superlinear convergence theorems are proved. It is demonstrated how to implement this algorithm in a numerically stable way.}} @article{Vard92, author = {A. Vardi}, title = {New Minimax Algorithms}, journal = JOTA, volume = 75, number = 3, pages = {613--634}, year = 1992, abstract = {The purpose of this paper is to suggest a new, efficient algorithm for the nonlinear minimax problem. The problem is transformed into an equivalent inequality-constraint minimization problem. The algorithm has these features: an active-set strategy with three types of constraints; the use of slack variables to handle inequality constraints; and a trust-region strategy taking advantage of the structure of the problem. Following Tapia, this problem is solved by an active set strategy which uses three types of active constraints (called here nonactive, semiactive and active). Active constraints are treated as equality constraints and are assigned slack variables. This strategy helps to prevent zigzagging. Numerical results are provided.}, summary = {A algorithm for nonlinear min-max is described that reformulates the min-max function as a set of inequality constraints and uses an active-set and a trust-region method that exploits the structure of the problem. Numerical results are presented.}} @book{Vava92, author = {S. A. Vavasis}, title = {Nonlinear Optimization: Complexity Issues}, publisher = OUP, address = OUP-ADDRESS, series = {International Series of Monographs on Computer Science}, year = 1992} @article{Vava92b, author = {S. A. Vavasis}, title = {Approximation algorithms for indefinite quadratic programming}, journal = MP, volume = 57, number = 2, pages = {279--311}, year = 1992} @techreport{VavaZipp90, author = {S. A. Vavasis and R. Zippel}, title = {Proving polynomial-time for sphere-constrained quadratic programming}, institution = CS-CORNELL, address = CORNELL-ADDRESS, number = {TR 90-1182}, year = 1990, abstract = {Recently \citebb{Ye89} and Karmarkar have proposed similar algorithms for minimizing a nonconvex quadratic function on a sphere. These algorithms are based on trust-region work going back to \citebb{Leve44} and \citebb{Marq63}. Although both authors state that their algorithm is polynomial time, neither makes estimates necessary to prove that conclusion in a formal sense. In this report we derive estimates for the convergence of the algorithm. Our estimates are based on bounds for separation of roots of polynomials. These bounds prove that the underlying decision problem is polynomial time in the Turing machine sense.}, summary = {\citebb{Ye89} and Karmarkar have proposed similar algorithms for minimizing a non-convex quadratic function on a sphere. Estimates are derived for the convergence of the algorithm, based on bounds for separation of roots of polynomials. These bounds prove that the underlying decision problem is polynomial time in the Turing machine sense.}} @phdthesis{Vice96, author = {L. N. Vicente}, title = {Trust-region interior-point algorithms for a class of nonlinear programming problems}, school = CAAM, address = RICE-ADDRESS, year = 1995, note = {Report TR96-05}, abstract = {This thesis introduces and analyzes a family of trust-region interior-point (TRIP) reduced sequential quadratic programming (SQP) algorithms for the solution of minimization problems with nonlinear equality constraints and simple bounds on some of the variables. These nonlinear programming problems appear in applications in control, design, parameter identification, and inversion. In particular they often arise in the discretization of optimal control problems. The TRIP reduced SQP algorithms treat states and controls as independent variables. They are designed to take advantage of the structure of the problem. In particular they do not rely on matrix factorizations of the linearized constraints, but use solutions of the linearized state and adjoint equations. These algorithms result from a successful combination of a reduced SQP algorithm, a trust-region globalization, and a primal-dual affine scaling interior-point method. The TRIP reduced SQP algorithms have very strong theoretical properties. It is shown in this thesis that they converge globally to points satisfying first and second order necessary optimality conditions, and in a neighborhood of a local minimizer the rate of convergence is quadratic. Our algorithms and convergence results reduce to those of \citebb{ColeLi96b} for box-constrained optimization. An inexact analysis is presented to provide a practical way of controlling residuals of linear systems and directional derivatives. Complementing this theory, numerical experiments for two nonlinear optimal control problems are included showing the robustness and effectiveness of these algorithms. Another topic of this dissertation is a specialized analysis of these algorithms for equality-constrained optimization problems. The important feature of the way this family of algorithms specializes for these problems is that they do not require the computation of normal components for the step and an orthogonal basis for the null space of the Jacobian of the equality constraints. An extension of \citebb{MoreSore83}'s result for unconstrained optimization is presented, showing global convergence for these algorithms to a point satisfying the second-order necessary optimality conditions}, summary = {A family of trust-region interior-point (TRIP) reduced sequential quadratic programming (SQP) algorithms, for the solution of minimization problems with nonlinear equality constraints and simple bounds, is introduced and analyzed. These problems appear in control, design, parameter identification, and inversion. In particular they often arise in the discretization of optimal control problems. The TRIP reduced SQP algorithms treat states and controls as independent variables: they do not rely on matrix factorizations of the linearized constraints, but use solutions of the linearized state and adjoint equations. These algorithms result from a combination of a reduced SQP algorithm, a trust-region globalization, and a primal-dual affine-scaling interior-point method. They converge globally and quadratically to points satisfying first and second order necessary optimality conditions. The algorithms and convergence results reduce to those of \citebb{ColeLi96b} for box-constrained optimization. An inexact analysis is presented to provide a practical way of controlling residuals of linear systems and directional derivatives. Numerical experiments for two nonlinear optimal control problems illustrate the robustness and effectiveness of these algorithms. A specialized analysis for equality-constrained problems shows that these algorithms do not require the computation of normal components for the step and an orthogonal basis for the null space of the Jacobian of the equality constraints.}} @article{Vice96b, author = {L. N. Vicente}, title = {A comparison between line searches and trust regions for nonlinear optimization}, journal = {Investiga\c{c}\~{a}o Operacional}, volume = 16, number = 2, pages = {173--179}, year = 1996, abstract = {Line searches and trust regions are two techniques to globalize nonlinear optimization algorithms. We claim that the trust-region technique has built-in an appropriate regularization of ill-conditioned second-order approximation. The question we ask and then answer in this short paper supports this claim. We force the trust-region technique to act like a line search and we accomplish this by always choosing the step along the quasi-Newton direction. We obtain global convergence to a stationary point as long as the condition number of the second-order approximation is uniformly bounded, a condition that is required in line searches but not in trust regions.}, summary = {It is claimed that the trust-region technique has built-in an appropriate regularization of ill-conditioned second-order approximation which is lacking in linesearch methods. The trust-region technique is forced to act like a linesearch by always choosing the step along the quasi-Newton direction. Global convergence to a critical point is obtained so long as the condition number of the second-order approximation is uniformly bounded, a condition required in linesearches but not in trust regions.}} @article{Voge90, author = {C. R. Vogel}, title = {A constrained least squares regularization method for nonlinear ill-posed problems}, journal = SICON, volume = 28, number = 1, pages = {34--49}, year = 1990, abstract = {This paper deals with a method for solving ill-posed, nonlinear Hilbert space operator equations $F(x)=y$. Regularization is obtained by solving a constrained least squares regularization problem min $\|F(x)-y\|_2$ subject to $J(x) \leq beta_2$. $\beta$ serves as a regularization parameter, and $J(x)$ is a quadratic penalty functional. To robustly and efficiently solve this regularization problem, the author applies a trust region method. At each iteration, the quadratic penalty constraint is retained, a Gauss-Newton approximation to the objective functional is taken, and he adds a quadratic trust region constraint. The resulting quadratic subproblem is then reformulated as a nonlinear complementarity problem and solved using Newton's method. He then applies methods to find approximate solutions to a severely ill-posed nonlinear first kind integral equation arising in geophysics. The method of generalized cross validation is used to pick the regularization parameter when random error is present in the discrete data.}, summary = {A trust-region method is applied for regularizing ill-posed, nonlinear Hilbert space operator equations. The subproblem is reformulated as a nonlinear complementarity problem and solved using Newton's method. The method of generalized cross validation is used to pick the regularization parameter when random error is present in the discrete data. The method is applied to find approximate solutions to a severely ill-posed nonlinear first kind integral equation arising in geophysics.}} %%% W %%% @book{Wats80, author = {G. A. Watson}, title = {Approximation Theory and Numerical Methods}, publisher = WILEY, address = WILEY-ADDRESS, year = 1980} @article{WatsBillMorg87, author = {L. T. Watson and S. C. Billups and A. P. Morgan}, title = {{HOMPACK}: a suite of codes for globally convergent homotopy algorithms}, journal = TOMS, volume = 13, number = 3, pages = {281--310}, year = 1987} @article{WatsKamReas85, author = {L. T. Watson and M. P. Kamat and M. H. Reaser}, title = {A robust hybrid algorithm for computing multiple equilibrium solutions}, journal = {Engineering Computations}, volume = 2, pages = {30--34}, year = 1985, abstract = {A hybrid method is described that seeks to combine the efficiency of a quasi-Newton method capable of locating stable and unstable equilibrium configurations with a robust homotopy method that is capable of tracking equilibrium paths with turning points while exploiting symmetry and sparsity of the Jacobian matrices. Numerical results are presented for a shallow arch problem.}, summary = {A hybrid method is described that combines the efficiency of a quasi-Newton method capable of locating stable and unstable equilibrium configurations with a robust homotopy method that is capable of tracking equilibrium paths with turning points while exploiting symmetry and sparsity of the Jacobian matrices. The quasi-Newton method uses a double dogleg trust-region strategy. Numerical results are presented for a shallow-arch problem.}} @article{WeihCalzPana87, author = {C. Weihs and G. Calzolari and L. Panattoni}, title = {The behavior of trust-region methods in {FIML}-estimation}, journal = {Computing}, volume = 38, number = 2, pages = {89--100}, year = 1987, abstract = {This paper presents a Monte-Carlo study of the practical reliability of numerical algorithms for FIML- estimation in nonlinear econometric models. The performance of different techniques of Hessian approximation in trust-region algorithms is compared regarding their ``robustness'' against ``bad'' starting points and their ``global'' and ``local'' convergence speed, i.e. the gain in the objective function caused by individual iteration steps far off from and near to the optimum. Concerning robustness and global convergence speed, the crude GLS-type Hessian approximations performed best, efficiently exploiting the special structure of the likelihood function. But, concerning local speed, general purpose techniques were strongly superior. So, some appropriate mixture of these two types of approximations turned out to be the only techniques to be recommended.}, summary = {The reliability of numerical algorithms for FIML-estimation in nonlinear econometric models is explored by a Monte-Carlo study. Techniques of Hessian approximation in trust-region algorithms are compared regarding their robustness and their global and local convergence speed. Concerning robustness and global convergence speed, the crude GLS-type Hessian approximations performed best, efficiently exploiting the special structure of the likelihood function. Concerning local speed, general purpose techniques were strongly superior. Some appropriate mixture of these two types of approximations is recommended.}} @book{Wilk63, author = {J. H. Wilkinson}, title = {Rounding Errors in Algebraic Processes}, publisher = {Her Majesty's Stationery Office}, address = {London}, year = 1963} @book{Wilk65, author = {J. H. Wilkinson}, title = {The Algebraic Eigenvalue Problem}, publisher = OUP, address = OUP-ADDRESS, year = 1965} @inproceedings{Wilk68, author = {J. H. Wilkinson}, title = {A priori error analysis of algebraic processes}, booktitle = {Proceedings of the International Congress of Mathematicians}, editor = {I. G. Petrovsky}, publisher = {Mir Publishers}, address = {Moscow, USSR}, pages = {629--640}, year = 1968} @article{Will64, author = {J. W. J. Williams}, title = {Algorithm 232, {H}eapsort}, journal = {Communications of the ACM}, volume = 7, pages = {347--348}, year = 1964} @techreport{Will90, author = {K. A. Williamson}, title = {A Robust Trust Region Algorithm for Nonlinear Programming}, institution = CAAM, address = RICE-ADDRESS, number = {TR90-22}, year = 1990, abstract = {This work develops and tests a trust region algorithm for the nonlinear equality constrained optimization problem. Our goal is to develop a robust algorithm that can handle lack of second-order sufficiency away from the solution in a natural way. \citebb{CeliDennTapi85} give a trust region algorithm for this problem, but in certain situations their trust region subproblem is too difficult to solve. The algorithm given here is based on the restriction of the trust region subproblem given by \citebb{CeliDennTapi85} to a relevant two dimensional subspace. This restriction greatly facilitates the solution of the subproblem. The trust region subproblem that is the focus of this work requires the minimization of a possibly non-convex quadratic subject to two quadratic constraints in two dimensions. The solution of this problem requires the determination of all the global solutions, and the non-global solution, if it exists, to the standard unconstrained trust region subproblem. Algorithms for approximating a single global solution to the unconstrained trust region subproblem have been well-established. Analytical expressions for all of the solutions will be derived for a number of special cases, and necessary and sufficient conditions are given for the existence of a non-global solution for the general case of the two-dimensional unconstrained trust region subproblem. Finally, numerical results are presented for a preliminary implementation of the nonlinear programming algorithm, and these results verify that it is indeed robust.}, summary = {A variant of the trust-region algorithm by \citebb{CeliDennTapi85} for general nonlinear programming is developed that uses a two-dimensional subproblem. This subproblem consists of globally minimizing a possibly non-convex quadratic subject to two quadratic constraints in two dimensions. A detailed study of this subproblem is supplied for a number of special cases. Preliminary numerical experiments illustrate the robustness of the resulting algorithm.}} @phdthesis{Wils63, author = {R. B. Wilson}, title = {A simplicial algorithm for concave programming}, school = {Harvard University}, address = {Massachusetts, USA}, year = 1963} @phdthesis{Winf69, author = {D. Winfield}, title = {Function and functional optimization by interpolation in data tables}, school = {Harvard University}, address = {Cambridge, USA}, year = 1969} @article{Winf73, author = {D. Winfield}, title = {Function Minimization by Interpolation in a Data Table}, journal = JIMA, volume = 12, pages = {339--347}, year = 1973, abstract = {A method is described for unconstrained function minimization using function values and no derivatives. A quadratic model of the function is formed by interpolation to points in a table of function values. The quadratic model (not necessarily positive definite) is minimized over a constraining region of validity to locate the next trial point. The points of interpolation are chosen from a data table containing function values at an initial grid and at subsequent trial points. The method is efficient in its use of function evaluations, but expensive in computation required to choose new trial points.}, summary = {A method is described for unconstrained function minimization using function values and no derivatives. A quadratic model of the function is formed by interpolation to points in a table of function values. The quadratic model (not necessarily positive definite) is minimized over a constraining region of validity to locate the next trial point. The points of interpolation are chosen from a data table containing function values at an initial grid and at subsequent trial points. The method is efficient in its use of function evaluations, but expensive in computation required to choose new trial points.}} @book{Wlok87, author = {J. Wloka}, title = {Partial Differential Equations}, publisher = CUP, address = CUP-ADDRESS, year = 1987} @article{Wolf59, author = {P. Wolfe}, title = {The {S}implex method for quadratic programming}, journal = {Econometrica}, volume = 27, pages = {382--398}, year = 1959} @article{Wome82, author = {R. S. Womersley}, title = {Optimality conditions for piecewise smooth functions}, journal = MPS, volume = 17, pages = {13-27}, year = 1982} @book{WonnWonn90, author = {T. H. Wonnacott and R. J. Wonnacott}, title = {Introductory Statistics}, publisher = WILEY, address = WILEY-ADDRESS, edition = {fifth}, year = 1990} @phdthesis{Wrig76, author = {M. H. Wright}, title = {Numerical methods for nonlinearly constrained optimization}, school = STANFORD, address = STANFORD-ADDRESS, year = 1976} @article{Wrig92, author = {M. H. Wright}, title = {Interior methods for constrained optimization}, journal = {Acta Numerica}, volume = 1, pages = {341--407}, year = 1992} @article{Wrig95, author = {M. H. Wright}, title = {Why a pure primal {N}ewton barrier step may be infeasible}, journal = SIOPT, volume = 5, number = 1, pages = {1--12}, year = 1995} @inproceedings{Wrig98b, author = {M. H. Wright}, title = {The interior-point revolution in constrained optimization}, crossref = {DeLeMurlPardTora98}, pages = {359--381}} @article{Wrig99, author = {M. H. Wright}, title = {Ill-conditioning and computational error in interior methods for nonlinear programming}, journal = SIOPT, volume = 9, number = 1, pages = {84--111}, year = 1999} @article{WrigHolt85, author = {S. J. Wright and J. N. Holt}, title = {Algorithms for nonlinear least squares with linear inequality constraints}, journal = SISSC, volume = 6, number = 4, pages = {1033--1048}, year = 1985, abstract = {Two algorithms for solving nonlinear least squares problems with general linear inequality constraints are described. At each step, the problem is reduced to an unconstrained linear least squares problem in the subspace defined by the active constraints, which is solved using the Levenberg-Marquardt method. The desirability of leaving an active constraint is evaluated at each step, using a different technique for each of the two algorithms. Each step is constrained to be within a circular region of trust about the current approximate minimiser, whose radius is updated according to the quality of the step after each iteration. Comparisons of the relative performance of the two algorithms on small problems and on a larger exponential data-fitting problem are presented.}, summary = {Two algorithms for solving nonlinear least squares problems with general linear inequality constraints are described. At each step, the problem is reduced to an unconstrained linear least squares problem in the subspace defined by the active constraints, which is solved using the Levenberg-Morrison-Marquardt method. The desirability of leaving an active constraint is evaluated at each step, using a different technique for each of the two algorithms. Comparisons of the relative performance of the two algorithms on small problems and on a larger exponential data-fitting problem are presented.}} @article{Wrig87, author = {S. J. Wright}, title = {Local properties of inexact methods for minimizing nonsmooth composite functions}, journal = MP, volume = 37, number = 2, pages = {232--252}, year = 1987} @article{Wrig89, author = {S. J. Wright}, title = {Convergence of {SQP}-like methods for constrained optimization}, journal = SICON, volume = 27, number = 1, pages = {13--26}, year = 1989} @article{Wrig89b, author = {S. J. Wright}, title = {An inexact algorithm for composite nondifferentiable optimization}, journal = MP, volume = 44, number = 2, pages = {221--234}, year = 1989, abstract = {We describe an inexact version of \citebb{Flet87}'s $QL$ algorithm with second-order corrections for minimizing composite nonsmooth functions. The method is shown to retain the global and local convergence properties of the original version, if the parameters are chosen appropriately. It is shown how the inexact method can be implemented for the case in which the function to be minimized is an exact penalty function arising from the standard nonlinear programming problem. The method can also be applied to the problems of nonlinear $\ell_1$- and $\ell_{\infty}$- approximation.}, summary = {An inexact version of \citebb{Flet87}'s $QL$ algorithm with second-order corrections for minimizing composite non-smooth functions, which retains the global and local convergence properties of the original version, is given. It is shown how the inexact method can be implemented for exact penalty functions arising from nonlinear programming problems, as well as problems of nonlinear $\ell_1$- and $\ell_{\infty}$- approximation.}} @article{Wrig90, author = {S. J. Wright}, title = {Convergence of an inexact algorithm for composite nonsmooth optimization}, journal = IMAJNA, volume = 10, number = 3, pages = {299--321}, year = 1990, abstract = {This paper describes an inexact version of \citebb{Flet82}'s second-order correction algorithm for minimizing composite nondifferentiable functions, and adds a test which allows global convergence to be demonstrated without the assumption that a global minimum of the model function is found at each iteration. Implementable criteria for accepting inexact solutions of the subproblem, while retaining local convergence properties, are also given.}, summary = {An inexact version of \citebb{Flet82}'s second-order correction algorithm for minimizing composite non-differentiable functions is described. A test is suggested, which allows global convergence to be proved without the assumption that a global minimum of the model function is found at each iteration. Implementable criteria for accepting inexact solutions of the subproblem, while retaining local convergence properties, are also given.}} @book{Wrig97, author = {S. J. Wright}, title = {Primal-Dual Interior-Point Methods}, publisher = SIAM, address = SIAM-ADDRESS, year = 1997} @techreport{Wrig98, author = {S. J. Wright}, title = {Effects of finite-precision arithmetic on interior-point methods for nonlinear programming}, institution = ANL, address = ANL-ADDRESS, number = {MCS-P705-0198}, year = 1998} @article{Wrig99b, author = {S. J. Wright}, title = {Superlinear convergence of a stabilized {SQP} method to a degenerate solution}, journal = COAP, volume = 11, number = 3, pages = {253--275}, year = 1999} @misc{WrigOrba99, author = {S. J. Wright and D. Orban}, title = {Properties of the Log-Barrier Function for Degenerate Nonlinear Programs}, howpublished = {Presentation at the First Workshop on Nonlinear Optimization ``Interior-Point and Filter Methods'', Coimbra, Portugal}, year = 1999} @article{Wome85, author = {R. S. Womersley}, title = {Local properties of algorithms for minimizing nonsmooth composite functions}, journal = MP, volume = 32, number = 1, pages = {69--89}, year = 1985} %%% X %%% @article{XiaoZhou92, author = {Y. Xiao and F. Zhou}, title = {Nonmonotone Trust Region Methods with Curvilinear Path in Unconstrained Optimization}, journal = {Computing}, volume = 48, number = {3--4}, pages = {303--317}, year = 1992, abstract = {A general nonmonotone trust region method with curvilinear path for unconstrained optimization problem is presented. Although this method allows the sequence of objective function values to be nonmonotone, convergence properties similar to those of the usual trust region methods with curvilinear path are proved under certain conditions. Some numerical results are reported which show the superiority of the nonmonotone trust region method with respect to the numbers of gradient evaluations and function evaluations.}, summary = {A non-monotone trust-region algorithm is proposed for unconstrained optimization, whose convergence properties are similar to those of the monotone versions. Numerical experiments show the potential benefits of the approach.}} @techreport{XiaoChu95, author = {Y. Xiao and E. K. W. Chu}, title = {Nonmonotone Trust Region Methods}, institution = {Monash University}, address = {Clayton, Australia}, number = {95/17}, year = 1995, abstract = {In this report, we study non-monotone techniques in trust region methods for unconstrained minimization problems. Two non-monotone trust region methods are developed and assessed based on extensive numerical experiments with {\sf CUTE}. Strategies for automatic selection and adjustment of parameters are discussed, which enable switching between non-monotone and monotone algorithms at different stages of calculation according to the intermediate information obtained. Numerical results show that these strategies improve the efficiency and act as safeguards against the possible inferior behavior of non-monotone algorithms. In addition, the global convergence of the algorithms are proved, and further modifications and possible improvements are discussed.}, summary = {Two non-monotone trust-region methods are developed and assessed based on extensive numerical experiments with {\sf CUTE}. Strategies for automatic adjustment of parameters are discussed, which enable switching between non-monotone and monotone algorithms at different stages of calculation according to the intermediate information obtained. Numerical results show that these strategies improve the efficiency of non-monotone algorithms. Global convergence of the algorithms is proved, and further modifications discussed.}} @phdthesis{Xiao96, author = {Y. Xiao}, title = {Non-monotone algorithms in optimization and their applications}, school = {Monash University}, address = {Clayton, Australia}, year = 1996} @article{XuZhan95, author = {C. Xu and J. Zhang}, title = {An active set method for general $\ell_1$ linear problem subject to box constraints}, journal = {Optimization}, volume = 34, number = 1, pages = {67--80}, year = 1995, abstract = {An active set algorithm is presented for the solution of general $\ell_1$ linear problem with simple bound constraints on variables. These problems appear as subproblems when trust region type linear approximation methods are used to minimize an unconstrained nonsmooth composite function. The method finds an optimal solution among dead points of the problem and eventually terminates at an optimal solution in a finite number of steps.}, summary = {An active set algorithm is presented for the solution of general $\ell_1$ linear problem with simple bound constraints on its variables.}} @article{XuZhan99, author = {C. Xu and J. Zhang}, title = {A Scaled Optimal Path Trust Region Algorithm}, journal = JOTA, volume = 102, number = 1, year = 1999, abstract = {Optimal path trust algorithm intends to determine a trajectory along which the solution to a trust region subproblem at a given point with any trust region radius is located. Although its idea is attractive, the existing optimal path method seems impractical because it requires, in addition to a factorization, the calculation of full eigensystem of the working matrix. We propose a scaled optimal path trust region algorithm which finds a solution of the subproblem in full dimensional space by just one \citebb{BuncParl71} factorization for working matrix at each iteration and by using the resulting unit lower triangular factor to scale the variables in the problem. A scaled optimal path can then be formed easily. The algorithm has good convergence properties under commonly used conditions. Computational results are presented to show that this algorithm is robust and effective.}, summary = {A scaled optimal path trust-region algorithm is proposed, which finds a solution of the subproblem in full-dimensional space by just one \citebb{BuncParl71} factorization and by using the resulting unit lower triangular factor to scale the variables. The resulting algorithm has good convergence properties. Computational results show that this algorithm is robust and effective.}} %%% Y %%% @article{YabeYama97, author = {H. Yabe and H. Yamashita}, title = {Q-superlinear Convergence of Primal-Dual Interior Point Quasi-{N}ewton Methods for Constrained Optimization}, journal = {Journal of the Operations Research Society of Japan}, volume = 40, number = 3, pages = {415--436}, year = 1997} @article{YamaFukuIbar89, author = {E. Yamakawa and M. Fukushima and T. Ibaraki}, title = {An efficient trust region algorithm for minimizing nondifferentiable composite functions}, journal = SISSC, volume = 10, number = 3, pages = {562--580}, year = 1989, abstract = {This paper presents a trust region method for solving the following problem. Minimize $\phi(x)=f(x)+c(x(x))$ over $x \in \Re^n$, where $f$ and $c$ are smooth functions and $h$ is a polyhedral convex function. Problems of this form include various important applications such as min-max optimization, Chebychev approximation and minimization of exact penalty functions in nonlinear programming. The algorithm is an adaptation of a recently proposed successive quadratic programming method for nonlinear programming and makes use of second-order approximations to both $f$ and $c$ in order to avoid the Maratos effect. It is proved under appropriate assumptions that the algorithm is globally and quadratically convergent to a solution of the problem. Some numerical results exhibiting the effectiveness of the algorithm are also reported.}, summary = {A trust-region method for solving the problem of minimizing $\phi(x)=f(x)+h(c(x))$ over $x \in \Re^n$, where $f$ and $c$ are smooth functions and $h$ is a polyhedral convex function. The algorithm is an adaptation of the SQP method by \citebb{Flet82b} and makes use of second-order approximations to both $f$ and $c$ in order to avoid the Maratos effect. Global and quadratic convergence is proved. Numerical results illustrate the effectiveness of the algorithm.}}} @article{Yama82, author = {H. Yamashita}, title = {A globally convergent constrained quasi-{N}ewton method with an augmented {L}agrangian type penalty-function}, journal = MP, volume = 23, number = 1, pages = {75--86}, year = 1982} @techreport{YamaYabe96, author = {H. Yamashita and H. Yabe}, title = {Nonmonotone {SQP} methods with global and superlinear convergence properties}, institution = {Mathematical Systems, Inc.}, address = {Sinjuku-ku, Tokyo, Japan}, year = 1996} @article{YamaYabe96b, author = {H. Yamashita and H. Yabe}, title = {Superlinear and quadratic convergence of some primal-dual interior point methods for constrained optimization}, journal = MPA, volume = 75, number = 3, pages = {377--397}, year = 1996} @techreport{YamaYabeTana97, author = {H. Yamashita and H. Yabe and T. Tanabe}, title = {A Globally and superlineraly convergent primal-dual point trust region method for large scale constrained optimization}, institution = {Mathematical Systems, Inc.}, address = {Sinjuku-ku, Tokyo, Japan}, year = 1997, abstract = {This paper proposes a primal-dual interior point method for solving large scale nonlinearly constrained optimization problems. To solve large scale problems, we use a trust region method that uses second derivatives of functions for minimizing the barrier-penalty function instead of the usual line search strategies. By carefully controlling parameters in the algorithm, superlinear convergence of the iteration is also proved. A nonmonotone strategy is adopted to avoid Maratos effect as in the nonmonotone SQP method of \citebb{YamaYabe96}. The method is implemented and tested with a variety of problems given by Hock and Schittkowski's book and by {\sf CUTE}. The results of our numerical experiment show that the given method is efficient for solving large scale nonlinearly constrained optimization problems.}, summary = {A primal-dual interior-point method is proposed, where the barrier function is minimized by a trust-region method that uses second derivatives. Superlinear convergence is proved. A non-monotone strategy is adopted to avoid the Maratos effect as in \citebb{YamaYabe96}. Results are reported for a variety of problems given by Hock and Schittkowski and by {\sf CUTE}. }} @article{YamaFuku95, author = {N. Yamashita and M. Fukushima}, title = {On Stationary Pointss of the Implicit {L}agrangian for Nonlinear Complementarity Problems}, journal = JOTA, volume = 84, number = 3, pages = {653--663}, year = 1995} @techreport{YangLiZhou98, author = {Y. Yang and D. Li and S. Zhou}, title = {A trust region method for a semismooth reformulation to variational inequality problems}, institution = {Department of Applied Mathematics, Hunan University}, address = {Changsha, China}, number = {May, 15}, year = 1998, abstract = {We consider the variational inequality problem (denoted by $VI(X,F)$): find an $x^* \in X$ such that $F(x^*)^T(x-x^*)\geq 0, \forall x \in X$, where $F:\Re^n \rightarrow \Re^n$ is continuously differentiable and the set $X$ has the following form $X:= \{x \in \Re^n \mid g(x) \geq 0, h(x) = 0 \}$ where $g: \Re^n \rightarrow \Re^m$ and $h: \Re^n \rightarrow \Re^l$ are twice continuously differentiable. The KKT system of the above $VI(X,F)$ is \[F(x)-g'(x)^Ty + h'(x)^Tz = 0,\] \[ h(x) = 0,\ms g(x) \geq 0,\ms y \geq 0,\ms y^Tg(x) = 0.\] In this paper, we present a well-defined trust region method for solving this system based on its semismooth reformulation. The proposed method solves subproblems inexactly. We show that the proposed method converges globally without monotone assumption on the function $F$. Moreover, the rate of convergence is Q-superlinear/Q-quadratic even if strict complementarity does not hold at the solution.}, summary = {A trust region method is proposed for solving general nonlinearly constrained variational inequality problems. It is based on the semi-smooth reformulation of the first-order optimality conditions, allows for inexact solution of subproblems and is globally convergent even in the non-monotone case. Its rate of convergence is Q-superlinear or Q-quadratic even without strict complementarity.}} @article{YangToll91, author = {E. K. Yang and J. W. Tolle}, title = {A class of methods for solving large, convex quadratic programs subject to box constraints}, journal = MP, volume = 51, number = 2, pages = {223--228}, year = 1991} @article{YangZhanYou96, author = {B. Yang and K. Zhang and Z. You}, title = {A successive quadratic programming method that uses new corrections for search directions}, journal = JCM, volume = 71, number = 1, pages = {15--31}, year = 1996} @inproceedings{Ye89, author = {Y. Ye}, title = {An extension of {K}amarkar's algorithm and the trust region method for quadratic-programming}, booktitle = {Progress in Mathematical Programming}, editor = {N. Megiddo}, publisher = SPRINGER, address = SPRINGER-ADDRESS, pages = {49--63}, year = 1989, abstract = {An extension of \citebbs{Karm84} algorithm and the trust region method is developed for solving quadratic programming problems. This extension is based on the affine scaling technique, followed by optimization over a trust ellipsoidal region. It creates a sequence of interior feasible points that converge to the optimal feasible solution. The initial computational results reported here suggest the potential usefulness of this algorithm in practice.}, summary = {An extension of \citebbs{Karm84} algorithm and the trust-region method is developed for solving quadratic programming problems. It is based on the affine scaling technique, followed by optimization over a trust ellipsoidal region, and creates a sequence of interior feasible points that converge to the optimal solution. Computational results suggest its potential usefulness.}} @article{YeTse89, author = {Y. Ye and E. Tse}, title = {An extension of {K}armarkar's projective algorithm for convex quadratic programming}, journal = MP, volume = 44, number = 2, pages = {157--179}, year = 1989} @article{Ye92, author = {Y. Ye}, title = {On an affine scaling algorithm for nonconvex quadratic programming}, journal = MP, volume = 56, pages = {285--300}, year = 1992, abstract = {We investigate the use of interior algorithms, especially the affine-scaling algorithm, to solve nonconvex---indefinite or negative definite---quadratic programming (QP) problems. Although the nonconvex QP with a polytope constraint is a "hard" problem, we show that the problem with an ellipsoidal constraint is "easy". When the "hard" QP is solved by successively solving the "easy" QP, the sequence of points monotonically converge to a feasible point satisfying both the first and the second order optimality conditions.}, summary = {The use of interior algorithms, especially the affine-scaling algorithm, to solve non-convex---indefinite or negative definite---quadratic programming (QP) problems is investigated. Although the non-convex QP with a polytope constraint is a ``hard'' problem, it is shown that the problem with an ellipsoidal constraint is ``easy''. When the ``hard'' QP is solved by successively solving the "easy" QP, the sequence of points monotonically converge to a feasible point satisfying both the first and the second order optimality conditions.}} @techreport{Ye97, author = {Y. Ye}, title = {Approximating quadratic programming with bound constraints}, institution = {Department of Management Sciences}, address = {The University of Iowa, USA}, type = {Working Paper}, year = 1997, abstract = {We consider the problem of approximating the global maximum of a quadratic program with $n$ variables subject to bound constraints. Based on the results of Goemans and Williamson (1996) and Nesterov (1997), we show that a $4/7$ approximate solution can be obtained in polynomial time}, summary = {The problem of approximating the global maximum of a quadratic program with $n$ variables subject to bound constraints is considered. Based on the results of Goemans and Williamson (1996) and Nesterov (1997), it is shown that a $4/7$ approximate solution can be obtained in polynomial time.}} @misc{YinHan98, author = {H. Yin and J. Han}, title = {A new interior-point trust-region algorithm for nonlinear minimization problems with simple bound constraints}, howpublished = {Presentation at the International Conference on Nonlinear Programming and Variational Inequalities, Hong Kong}, year = 1998, abstract = {In the paper, a new interior-point trust-region algorithm is given for nonlinear minimization problems with simple bound constraints. The objective function of the quadratic subproblem model is obtained based on the Newton step for the first-order KKT condition of the problem, and the constraints are a sphere trust-region constraints and a system of bound constraints. We accept or reject the trial steps only depending on the objective function and its approximation. The global convergence to a KKT point is obtained for our algorithm under some mild conditions. The second-order convergence is also hold under some assumptions}, summary = {In the method of \citebb{ColeLi96b}, feasibility of the trial step is obtained by backtracking from a possibly infeasible step into the interior of the feasible region. A variant of this method is discussed, in which the initial trial step is not allowed to be infeasible, therefore avoiding the need of backtracking.}} @book{Yosi70, author = {K. Yosida}, title = {Functional Analysis}, publisher = SPRINGER, address = SPRINGER-ADDRESS, year = 1970} @techreport{Yuan83, author = {Y. Yuan}, title = {Global convergence of trust region algorithms for nonsmooth optimization}, institution = DAMTP, address = DAMTP-ADDRESS, number = {DAMTP/NA13}, year = 1983, summary = {See \citebb{Yuan85a}.}} @article{Yuan84, author = {Y. Yuan}, title = {An example of only linear convergence of trust region algorithms for nonsmooth optimization}, journal = IMAJNA, volume = 4, number = 3, pages = {327--335}, year = 1984, abstract = {Most superlinear convergence results about trust-region algorithms for non-smooth optimization are dependent on the inactivity of trust region restrictions. An example is constructed to show that it is possible that at every iteration the trust region bound is active and the rate of convergence is only linear, though strict complementarity and second order sufficiency conditions are satisfied.}, summary = {An example is constructed where, at every iteration of a trust-region method for non-smooth optimization the trust-region bound is active and the rate of convergence is only linear, though strict complementarity and second order sufficiency conditions hold.}} @article{Yuan85a, author = {Y. Yuan}, title = {Conditions for convergence of trust region algorithms for nonsmooth optimization}, journal = MP, volume = 31, number = 2, pages = {220--228}, year = 1985, abstract = {This paper discusses some properties of trust-region algorithms for nonsmooth optimization. The problem is expressed as the minimization of a function $h(f(x))$ where $h(\cdot)$ is convex and where $f$ is a continuously differentiable mapping from $\Re^n$ to $\Re^n$. Bounds for the second order derivative approximation matrices are discussed. It is shown that the results of \citebb{Powe75} and \citebb{Powe84} hold for nonsmooth optimization.}, summary = {Properties of trust-region algorithms for non-smooth optimization are discussed. The problem is expressed as the minimization of a function $h(f(x))$ where $h(\cdot)$ is convex and where $f$ is a continuously differentiable mapping from $\Re^n$ to $\Re^n$. It is shown that the results of \citebb{Powe75} and \citebb{Powe84} hold for non-smooth optimization.}} @article{Yuan85b, author = {Y. Yuan}, title = {On the superlinear convergence of a trust region algorithm for nonsmooth optimization}, journal = MP, volume = 31, number = 3, pages = {269--285}, year = 1985, abstract = {It is proved that the second-order correction trust-region algorithm of \citebb{Flet82} ensures superlinear convergence if some mild conditions are satisfied.}, summary = {It is proved that the second-order correction trust-region algorithm of \citebb{Flet82} ensures superlinear convergence if some mild conditions are satisfied.}} @article{Yuan90, author = {Y. Yuan}, title = {On a Subproblem of Trust Region Algorithms for Constrained Optimization}, journal = MP, volume = 47, number = 1, pages = {53--63}, year = 1990, abstract = {We study a subproblem that arises in some trust region algorithms for equality constrained optimization. It is the minimization of a general quadratic function with two special quadratic constraints. Properties of such subproblems are given. It is proved that the Hessian of the Lagrangian has at most one negative eigenvalue, and an example is presented to show that the Hessian may have a negative eigenvalue when one constraint is inactive at the solution.}, summary = {A subproblem that arises in some trust-region algorithms for equality constrained optimization is studied. It is the minimization of a general quadratic function with two special quadratic constraints. Properties of such subproblems are given. It is proved that the Hessian of the Lagrangian has at most one negative eigenvalue, and an example is presented to show that the Hessian may have a negative eigenvalue when one constraint is inactive at the solution.}} @article{Yuan91, author = {Y. Yuan}, title = {A dual algorithm for minimizing a quadratic function with two quadratic constraints}, journal = {Journal of Computational Mathematics}, volume = 9, number = 4, pages = {348--359}, year = 1991, abstract = {In this paper, we present a dual algorithm for minimizing a convex quadratic function with two quadratic constraints. Such a minimization problem is a subproblem that appears in some trust region algorithms for general nonlinear programming. Some theoretical properties of the dual problem are given. Global convergence of the algorithm is proved and a local superlinear result is presented. Numerical examples are also provided.}, summary = {A dual globally and superlinearly convergent algorithm is proposed for minimizing a convex quadratic objective subject to two quadratic constraints. Numerical examples are provided.}} @inproceedings{Yuan93, author = {Y. Yuan}, title = {A new trust-region algorithm for nonlinear optimization}, booktitle = {Proceedings of the First International Colloquium on Numerical Analysis}, editor = {D. Bainov and V. Covachev}, publisher = {VSP}, address = {Zeist, The Netherlands}, pages = {141--152}, year = 1993, abstract = {Trust region algorithms are a class of numerical algorithms for optimization. In this paper we present a new trust region algorithm for general nonlinear constrained optimization problems. The algorithm is bases on the $\ell_{\infty}$ exact penalty function. Under very mild conditions, global convergence results for the algorithm are given.}, summary = {Trust-region algorithms are a class of numerical algorithms for optimization. A trust-region algorithm for general nonlinear constrained optimization problems is presented. The algorithm is bases on the $\ell_{\infty}$ exact penalty function. Under very mild conditions, global convergence results for the algorithm are given.}} @inproceedings{Yuan94, author = {Y. Yuan}, title = {Trust region algorithms for nonlinear programming}, booktitle = {Contemporary Mathematics}, editor = {Z. C. Shi}, publisher = AMS, address = AMS-ADDRESS, volume = 163, pages = {205--225}, year = 1994, abstract = {Nonlinear programming, or nonlinear optimization, is to minimize or maximize a nonlinear function, possibly subject to finitely many algebraic equations and inequalities. Trust region algorithms are a class of numerical algorithms for optimization. In this paper we review some main results of trust region algorithms for nonlinear optimization.}, summary = {A review of the many results in the domain of trust-region methods for nonlinear optimization and the solution of nonlinear systems of algebraic equations is presented.}} @article{Yuan94b, author = {Y. Yuan}, title = {On the convergence of trust region algorithms}, journal = {Mathematica Numerica Sinica}, volume = 16, number = 3, pages = {333-346}, year = 1994, abstract = {Trust region algorithms for nonlinear optimization and their convergence properties are discussed. Convergence results and techniques for convergence analysis are studied. An $A ( \delta , \eta)$ descent trial step is defined, and is used to obtain a unified proof for global convergence of trust region algorithms.}, summary = {Trust region algorithms for nonlinear optimization and their convergence properties are discussed. An $A(\delta ,\eta)$ descent trial step is defined, and is used to obtain a unified proof for global convergence of such algorithms.}, note = {(in chinese)}} @inproceedings{Yuan94c, author = {Y. Yuan}, title = {Trust region algorithms for constrained optimization}, booktitle = {Proceedings of Conference on Scientific and Engineering Computing for Young Chinese Scientists}, editor = {J.Z. Cui and Z.C. Shi and D.L. Wang}, publisher = {National Defence Industry Press}, address = {Beijing, China}, pages = {105--110}, year = 1994, abstract = {In this paper, we review the trust region algorithms for nonlinear optimization and the fundamental ideas of trust region algorithms are discussed. Model algorithms for unconstrained optimization, constrained optimization, and nonsmooth optimization are given. Main techniques for global convergence and local superlinear convergence are analyzed.}, summary = {A survey of trust-region methods for nonlinear optimization is given, with emphasis on global and locallys superlinear convergence properties.}} @inproceedings{Yuan94d, author = {Y. Yuan}, title = {Nonlinear Programming: trust region algorithms}, editor = {S. T. Xiao and F. Wu}, booktitle = {Proceedings of Chinese SIAM annual meeting}, publisher = {Tsinghua University Press}, address= {Beijing, China}, pages = {83--97}, year = 1994, abstract = {We review the main techniques used in trust region algorithms for nonlinear constrained optimization}, summary = {A brief survey of trust-region methods for constrained nonlinear optimization is presented.}} @article{Yuan95, author = {Y. Yuan}, title = {On the convergence of a new trust region algorithm}, journal = NUMMATH, volume = 70, number = 4, pages = {515--539}, year = 1995, abstract = {In this paper we present a new trust region algorithm for general nonlinear constrained optimization problems. The algorithm is based on the $L_{\infty}$ exact penalty function. Under very mild conditions, global convergence results for the algorithm are given. Local convergence properties are also studied. It is shown that the penalty parameter generated by the algorithm will be eventually not less than the $l_1$ norm of the Lagrange multipliers at the accumulation point. It is proved that the method is equivalent to the sequential quadratic programming method for all large $k$, hence superlinearly convergent results of the SQP method can be applied. Numerical results are also reported.}, summary = {A trust-region algorithm is presented for general nonlinear constrained problems, based on the $L_{\infty}$ exact penalty function. Under very mild conditions, global and local convergence results for the algorithm are given. It is shown that the penalty parameter generated by the algorithm will be eventually not less than the $l_1$ norm of the Lagrange multipliers at the accumulation point. It is proved that the method is equivalent to the sequential quadratic programming method for all large $k$, hence superlinearly convergent results for the SQP method can be applied. Numerical results are reported.}} @article{Yuan96, author = {Y. Yuan}, title = {A short note on the {D}uff-{N}ocedal-{R}eid algorithm}, journal = {SEA Bull. Math.}, volume = 20, number = 3, pages = {137--144}, year = 1996, abstract = {In this short note, an example is given to show that the algorithm of \citebb{DuffNoceReid87} for nonlinear equations may converge to a non-optimal solution. It is also shown that a slightly modification can ensure the global convergence of the algorithm.}, summary = {An example is given to show that the algorithm of \citebb{DuffNoceReid87} for nonlinear equations may converge to a non-optimal solution. It is also shown that a slightly modification can ensure the global convergence of the algorithm.}} @misc{Yuan97, author = {Y. Yuan}, title = {Some properties of a trust region subproblem}, howpublished = {Presentation at the XVIth International Symposium on Mathematical Programming, Lausanne}, year = 1997, summary = {It is shown that the model reduction obtained by applying the Steihaug-Toint algorithm on a convex quadratic model in dimension two is at least half of that obtained by the exact minimizer of the model within the trust region.}} @article{Yuan98a, author = {Y. Yuan}, title = {Trust region algorithms for nonlinear equations}, journal = {Information}, volume = 1, pages = {7--20}, year = 1998, abstract = {In this paper, we consider the problem of solving nonlinear equations $F(x)=0$, where $F(x)$ from $\Re^n$ to $\Re^m$ is continuously differentiable. We study a class of general trust region algorithms for solving nonlinear equations by minimizing a given norm $\|F(x)\|$. The trust region algorithm for nonlinear equations can be viewed as an extension of the Levenberg-Marquardt algorithm for nonlinear least squares. Global convergence of trust region algorithms for nonlinear equations are studied and local convergence analyses are also given.}, summary = {The problem of solving nonlinear equations $F(x)=0$, where $F(x)$ from $\Re^n$ to $\Re^m$ is continuously differentiable, is considered. We study a class of general trust-region algorithms for solving nonlinear equations by minimizing a given norm $\|F(x)\|$. The trust-region algorithm for nonlinear equations can be viewed as an extension of the Levenberg-Morrison-Marquardt algorithm for nonlinear least-squares. Global convergence of trust-region algorithms for nonlinear equations are studied and local convergence analyses are also given.}} @inproceedings{Yuan98b, author = {Y. Yuan}, title = {An example of non-convergence of trust region algorithms}, crossref = {Yuan98}, pages = {205--218}, abstract = {It is well known that trust region algorithms have very nice convergence properties. Trust region algorithms can be classified into two kinds: one requires sufficient reduction in objective function value (merit function value, in the case of constrained optimization), the other only needs reduction in objective function value. In general, it can be shown that the algorithms that require sufficient reductions have strong convergence result, namely all accumulation points are stationary points. The algorithms that do not require sufficient reductions have the nice properties of accepting any better iterates, but the convergence result is weak, only one accumulation point is a stationary point. In this paper, we construct an example to show that it can happen that for a class of trust region algorithms that do not require sufficient reductions the whole sequence need not to converge. In our example, only one accumulation point is a stationary point while all other accumulation points are non-stationary points.}, summary = {An example is constructed which shows that it can happen that for a class of trust-region algorithms that do not require sufficient reductions the whole sequence need not to converge. In the example, only one accumulation point is a stationary point while all other accumulation points are non-stationary.}} @misc{Yuan98c, author = {Y. Yuan}, title = {Optimality conditions for the {C}elis-{D}ennis-{T}apia subproblems}, howpublished = {Presentation at the Optimization 98 Conference, Coimbra}, year = 1998, abstract = {We give necessary and sufficient optimality conditions which can be easily verified for local solutions of Celis-Dennis-Tapia subproblem, which is a subproblem in trust region algorithms for nonlinear constrained optimization. If the CDT suproblem has no global solution with the Hessian of Lagrangian positive semi-definite, the Hessian at any local solution has at least negative eigenvalue. Some other characters of local solutions are also given. We also discuss the gap between necessary conditions and sufficient conditions.}, summary = {Easily verifiable necessary and sufficient optimality conditions are given for local solutions of the Celis-Dennis-Tapia (CDT) subproblem, which is a subproblem in trust-region algorithms for nonlinear constrained optimization. If the CDT suproblem has no global solution at with the Hessian of the Lagrangian is positive semi-definite, the Hessian at any local solution has at least one negative eigenvalue. Some other characteristics of local solutions are also given. The gap between necessary and sufficient conditions is also discussed.}} @inproceedings{Yuan98d, author = {Y. Yuan}, title = {Matrix computation problems in trust region algorithms for optimization}, booktitle = {Proceedings of the 5th CSIAM annual meeting}, editor = {Q. C. Zeng and T. Q. Li and Z. S. Xue and Q. S. Cheng}, publisher = {Tsinghua University Press}, address = {Beijing, China}, pages = {54--64}, year = 1998, abstract = {Trust region algorithms are a class of recently developed algorithms for solving optimization problems. The subproblems appeared in trust region algorithms are usually minimizing a quadratic function subject to one or two quadratic constraints. In this paper we review some of the widely used trust region subproblems and some matrix computation problems related to these trust region subproblems.}, summary = {The linear algebra aspects of the solution methods for various trust-region subproblems are reviewed.}} @techreport{Yuan99, author = {Y. Yuan}, title = {On the Truncated Conjugate-Gradient Method}, institution = ICMSEC, address = ICMSEC-ADDRESS, number = {ICM-99-003}, year = 1999, abstract = {In this paper, we consider the truncated conjugate-gradient method for minimizing a convex quadratic function subject to a ball trust region constraint. It is showed that the reduction in the objective function by the solution obtained by the truncated CG method is at least half of the reduction by the global minimizer in the trust region.}, summary = {It is shown that the model reduction obtained by applying the Steihaug-Toint algorithm on a convex quadratic model is at least half of that obtained by the exact minimizer of the model within the trust region.}} @techreport{Yuan99b, author = {Y. Yuan}, title = {A review of trust region algorithms for optimization}, institution = ICMSEC, address = ICMSEC-ADDRESS, number = {ICM-99-038}, year = 1999, abstract = {Iterative methods for optimization can be classified into two categories: line search methods and trust region methods. In this paper we give a review on trust region algorithms for nonlinear optimization. Trust region methods are robust, and can be applied ti ill-conditioned problems. A model trust region algorithm is presented to demonstrate the trust region approaches. Various trust region subproblems and their properties are presented. Convergence properties of trust region algorithms are given. Techniques such as backtracking, non-monotone and second-order correction are also briefly discussed.}, summary = {A survey of trust-region methods is presented, with special emphasis on the possible subproblem solution techniques. Convergence properties for the unconstrained case are reviewed and techniques such as backtracking, non-monotone and second-order correction are also briefly discussed.}} %%% Z %%% @inproceedings{Zara71, author = {E. H. Zarantonello}, title = {Projections on convex sets in {H}ilbert space and spectral theory}, booktitle = {Contributions to Nonlinear Functional Analysis}, editor = {E. H. Zarantonello}, publisher = AP, address = AP-ADDRESS, pages = {237--424}, year = 1971} @article{Zhan89, author = {J. Zhang}, title = {Superlinear convergence of a trust region-type successive linear-programming method}, journal = JOTA, volume = 61, number = 2, pages = {295--310}, year = 1989, abstract = {The convergence rate of the SLP method suggested by \citebb{ZhanKimLasd85} is discussed for composite nondifferentiable optimization problems. A superlinear rate is assured under a growth condition, and it is further strengthened to a quadratic rate if the inside function is twice differentiable. Several sufficient conditions are given which make the growth condition true. The conditions can be relaxed considerably in practical use.}, summary = {The convergence rate of the SLP method suggested by \citebb{ZhanKimLasd85} is discussed for composite non-differentiable optimization problems. A superlinear rate is assured under a growth condition, and it is further strengthened to a quadratic rate if the inside function is twice differentiable. Several sufficient conditions are given which make the growth condition true. The conditions can be relaxed considerably in practical use.}} @article{ZhanZhu90, author = {J. Zhang and D. Zhu}, title = {Projected quasi-{N}ewton algorithm with trust-region for constrained optimization}, journal = JOTA, volume = 67, pages = {369--393}, year = 1990, abstract = {\citebb{NoceOver85} proposed a two-sided projected Hessian updating technique for equality constrained optimization problems. Although local two-step Q-superlinear rate was proved, its global convergence is not assured. In this paper, we suggest a trust-region-type, two-sided, projected quasi-Newton method, which preserves the local two-step superlinear convergence of the original algorithm and also ensures global convergence. The subproblem that we propose is as simple as the one often used when solving unconstrained optimization problems by trust-region strategies and therefore is easy to implement.}, summary = {A trust-region-type, two-sided, projected quasi-Newton method is proposed, which preserves the local two-step superlinear convergence of the original algorithm of \citebb{NoceOver85} and also ensures global convergence. The proposed subproblem is as simple as the one used when solving unconstrained problems by trust-region strategies.}} @article{ZhanZhu94, author = {J. Zhang and D. Zhu}, title = {A projective quasi-{N}ewton method for nonlinear optimization}, journal = JCAM, volume = 53, number = 3, pages = {291-307}, year = 1994, abstract = {A trust region method for nonlinear optimization problems with equality constraints is proposed in this paper. This method incorporates quadratic subproblems in which orthogonal projective matrices of the Jacobian of constraint functions are used to replace QR decompositions. As QR decomposition does not ensure continuity, but projective matrix does, convergence behaviour of the new method can be discussed under more reasonable assumptions. The method maintains a two-step feature: one movement in the range space of the Jacobian, whereas the other one in the null space. It is proved that all accumulation points of iterates are KKT (Karush-Kuhn-Tucker) points and the method has a one-step superlinear convergence rate.}, summary = {A trust region method for nonlinear optimization problems with equality constraints is proposed that incorporates quadratic subproblems in which orthogonal projective matrices of the Jacobian of constraint functions are used to replace QR decompositions. As QR decomposition does not ensure continuity, but projective matrix does, convergence behaviour of the new method is studied by exploiting the continuity of these matrices. A one-step superlinear convergence rate is also proved.}} @article{ZhanZhu99, author = {J. Zhang and D. Zhu}, title = {A nonmonotonic trust region method for constrained optimization problems}, journal = {Journal of the Australian Mathematical Society (Series B)}, volume = 40, number = 4, pages = {542--567}, year = 1999, abstract = {In this paper we propose an easy-to-implement algorithm for solving general nonlinear optimization problems with nonlinear equality constraints. A nonmonotonic trust region strategy is suggested which does not require the merit function to reduce its value in every iteration. In order to deal with large problems, a reduced Hessian is used to replace a full Hessian matrix. To avoid solving quadratic trust region subproblems exactly which usually takes substantial computation, we only require an approximate solution which requires less computation. The calculation of correction steps, necessary from a theoretical view point to overcome the Maratos effect but which often brings in negative results in practice, is avoided in most cases by setting a criterion to judge its necessity. Global convergence and. a local superlinear rate are then proved. This algorithm has a good performance. }, summary = {A nonmonotonic, reduced Hessian trust-region method is used to solve equality constrained nonlinear optimization problems. Approximate solutions to the trust-region subproblems are allowed. Although theory dictates that second-order correction steps be used to overcome the Maratos effect, a suitable scheme is developed to ensure that they are only used when absolutely necessary. Global convergence at a local superlinear rate is established, and the resulting algorithm performs well in practice.}} @article{ZhanZhuFan93, author = {J. Zhang and D. Zhu and Y. Fan}, title = {A practical trust region method for equality constrained optimization problems}, journal = OMS, volume = 2, number = 1, pages = {45--68}, year = 1993, abstract = {In this paper we propose an easy-to-implement algorithm for solving general nonlinear optimization problems with nonlinear equality constraints. In order to deal with large scale problems, a reduced Hessian is used to replace the full Hessian matrix. To avoid solving quadratic trust region subproblems exactly, which usually takes most computing time, we only require an approximate solution with less computation. The calculation of correction steps, that is necessary from the theoretical point of view to overcome the \citebb{Mara78} effect but often brings in negative results in practice, is avoided in most cases by setting a criterion to judge its necessity. Global convergence and a local superlinear rate are proved. Numerical results are reported to show that this algorithm has good performance.}, summary = {An easy-to-implement algorithm for solving general nonlinear optimization problems with nonlinear equality constraints is proposed, that uses a reduced Hessian matrix. The quadratic trust-region subproblems are solved approximately. The calculation of correction steps, that are necessary from the theoretical point of view to overcome the Maratos effect but prove costly in practice, is avoided in most cases by a suitable test. Global and superlinear convergence are proved. Numerical results are reported.}} @article{ZhanKimLasd85, author = {J. Zhang and N. H. Kim and L. S. Lasdon}, title = {An improved successive linear programming algorithm}, journal = {Management Science}, volume = 31, pages = {1312--1331}, year = 1985} @article{ZhanXu99, author = {J. Zhang and C. Xu}, title = {A Class of Indefinite Dogleg Path Methods for Unconstrained Optimization}, journal = SIOPT, volume = 9, number = 3, pages = {646--676}, year = 1999, abstract = {In this paper we propose a convenient curvilinear search method to solve the trust region subproblems arising from unconstrained optimization problems. The curvilinear paths we set forth are dogleg paths, generated mainly by employing Bunch-Parlett factorizations for general symmetric matrices that may be indefinite. This method is easy to implement and globally convergent. It is proved that the method satisfies the first- and second-order stationary point convergence properties and that the convergence rate is quadratic under commonly used conditions on functions. Numerical experiments are conducted to compare this method with some existing methods.}, summary = {A variant of the dogleg method for approximately solving the trust-region subproblem is proposed. This variant is adequate for the case where the subproblem is non-convex. In this case, it defines a family of path that use a direction of negative curvature obtained from the Bunch-Parlett factorization of the Hessian. Global convergence to unconstrained second-order stationary points is proved for the resulting trust-region method, as well as quadratic convergence of the associated version of Newton's method. Numerical results are shown.}} @article{ZhanXu99b, author = {J. Zhang and C. Xu}, title = {Trust region dogleg path algorithms for unconstrained minimization}, journal = AOR, volume = 87, pages = {407--418}, year = 1999, abstract = {In this paper, we propose a class of convenient curvilinear search algorithms to solve trust region problems arising from unconstrained optimization. The curvilinear paths we set are dogleg paths, generated mainly by employing the Bunch-Parlett factorization for general symmetric matrices which may be indefinite. These algorithms are easy to use and globally convergent. It is proved that these algorithms satisfy the first- and second-order stationary point convergence properties and that the rate of convergence is quadratic under commonly used assumptions}, summary = {Trust-region algorithms using curvilinear search to approximately solve the possibly nonconvex subproblem are proved to be globally convergent to first- and second-order critical points. The rate of convergence is quadratic under typical assumptions.}} @article{ZhanXu99c, author = {J. Zhang and C. Xu}, title = {A projected indefinite dogleg-path method for equality constrained optimization}, journal = BIT, volume = 39, number = 3, pages = {555-578}, year = 1999, abstract = {In this paper, we propose a 2-step trust-region indefinite dogleg path method for the solution of nonlinear equality constrained optimization problems. The method is a globally convergent Fontecilla method and an indefinite dogleg path method is proposed to get approximate solutions of quadratic programming subproblems even if the Hessian of the model is indefinite. The dogleg path lie in the null space of the Jacobian matrix of the constraints. An $\ell_1$ exact penalty function method is used in the method to determine if a trial point is accepted. The global convergence and the local two-step superlinear convergence rate is proved. Some numerical results are presented.}, summary = {A two-step trust-region algorithm is presented for the solution of optimization problems with nonlinear equality constraints. The method is based on that of \citebb{Font90}, but uses an indefinite dogleg strategy in the nullspace of the constraints Jacobian as a way to obtain an approximate solution of the quadratic programming subproblem. Global and locally two-step superlinear convergence are proved and some numeruical experiements shown.}} @inproceedings{ZhanXuDu98, author = {J. Zhang and C. Xu and L. Du}, title = {A more efficient variation of an indefinite dogleg path method}, booktitle = {Operations Research and its Applications. Third International Symposium, ISORA'98.}, publisher = {World Publishing Corp}, address = {Beijing, China}, pages = {428--434}, year = 1998, abstract = {The authors previously suggested a class of indefinite dogleg path methods that can be used to find approximate solutions of a trust region model in which the working matrix is not necessary positive definite. In this paper we propose a scaled version of the indefinite dogleg path method. The main advantage of the revision is that the scaled subproblem has a 1 by 1 or 2 by 2 block diagonal matrix as its working matrix so that the solution of the Newton equation when the working matrix is positive definite and the calculation of a direction of negative curvature when the matrix is indefinite become much easier. Also, it can save a substantial number of matrix factorizations.}, summary = {A scaled version of the author's previous indefinite dogleg path method is considered. The main advantage is that the scaled subproblem has a 1 by 1 or 2 by 2 block diagonal Hessian, so that the solution of the Newton equations and directions of negative curvature are simple to obtain.}} @article{Zhan92, author = {Y. Zhang}, title = {Computing a {C}elis-{D}ennis-{T}apia trust-region step for equality constrained optimization}, journal = MP, volume = 55, number = 1, pages = {109--124}, year = 1992, abstract = {We study an approach for minimizing a convex quadratic function subject to two quadratic constraints. This problem stems from computing a trust-region step for an SQP algorithm proposed by \citebb{CeliDennTapi85} for equality constraint optimization. Our approach is to reformulate the problem into a univariate nonlinear equation $\phi(\mu)=0$, where the function $\phi(\mu)$ is continuous, at least piecewise differentiable and monotone. Well-established methods then can be readily applied. We also consider an extension of our approach to a class of non-convex quadratic functions and show that our approach is applicable to reduced Hessian SQP algorithms. Numerical results are presented indicating that our algorithm is reliable, robust and has the potential to be used as a building block to construct trust-region algorithms for small-sized problems in constrained optimization.}, summary = {An approach to minimizing a convex quadratic function subject to two quadratic constraints is studied. This problem stems from computing a trust-region step for an SQP algorithm proposed by \citebb{CeliDennTapi85} for equality constraint optimization. The approach taken is to reformulate the problem as a univariate nonlinear equation $\phi(\mu)=0$, where the function $\phi(\mu)$ is continuous, at least piecewise differentiable and monotone. Well-established methods then can be readily applied. An extension of this approach to a class of non-convex quadratic functions is considered, and it is shown that the approach is applicable to reduced Hessian SQP algorithms. Numerical results are presented.}} @article{Zhan94, author = {Y. Zhang}, title = {On the convergence of infeasible interior-point methods for the horizontal linear complementarity problem}, journal = SIOPT, volume = 4, number = 1, pages = {208--227}, year = 1994} @inproceedings{ZhaoWang93, author = {M. Zhao and X. Wang}, title = {Model trust region technique in parallel {N}ewton method for training neural networks}, booktitle = {IEEE International Symposium on Circuits and Systems (ISCAS 93)}, publisher = {IEEE}, address = {New York}, volume = 4, pages = {2399--2402}, year = 1993, abstract = {In this article, the double dogleg trust region approach of unconstrained minimization is introduced in the parallel Newton's (PN) algorithm proposed in Zhao (1993). The PN algorithm uses a recursive procedure for computing both the Hessian matrix and the Newton direction. The input weights of each neuron in the network are updated after each presentation of the training data with a global strategy. Experimental results indicate that the double dogleg trust region approach is superior to the line search technique in the PN algorithm, and that the PN algorithm with both global strategies exhibits better convergence performance than the well-known backpropagation algorithm.}, summary = {The double-dogleg trust-region approach of unconstrained minimization is introduced into the parallel Newton's (PN) algorithm, which uses a recursive procedure for computing both the Hessian matrix and the Newton direction. The input weights of each neuron in the network are updated after each presentation of the training data with a global strategy. Experimental results indicate that the double-dogleg trust-region approach is superior to the linesearch technique in the PN algorithm, and that the PN algorithm with both global strategies exhibits better convergence performance than backpropagation.}} @inproceedings{ZhouSi98, author = {G. Zhou and J. Si}, title = {Subset based training and pruning of sigmoid neural networks}, booktitle = {Proceedings of the 1998 American Control Conference, Evanston, IL, USA}, pages = {58--62}, year = 1998, abstract = {In the present paper we develop two algorithms, subset based training (SBT) and subset based training and pruning (SBTP), using the fact that the Jacobian matrices in sigmoid network training problems are usually rank deficient. The weight vectors are divided into two parts during training, according to the Jacobian rank sizes. Both SBT and SBTP are trust region methods. Comparing to the standard Levenberg-Marquardt (LM) method, these two algorithms can achieve similar convergence properties as the LM but with less memory requirements. Furthermore the SBTP combines training and pruning of a network into one comprehensive procedure. Some convergence properties of the two algorithms are given to qualitatively evaluate the performance of the algorithms.}, summary = {Two trust-region algorithms, subset based training (SBT) and subset based training and pruning (SBTP), are developed using the fact that the Jacobian matrices in sigmoid network training problems are usually rank deficient. The weight vectors are divided into two parts during training, according to the Jacobian rank sizes. These two algorithms have convergence properties similar to those of the Levenberg-Morrison-Marquardt method but with less memory requirements. Furthermore the SBTP combines training and pruning of a network into one comprehensive procedure. }} @article{ZhouSi98b, author = {G. Zhou and J. Si}, title = {Advanced neural-network training algorithm with reduced complexity based on Jacobian deficiency}, journal = {IEEE Transactions on Neural Networks}, volume = 9, number = 3, pages = {448--453}, year = 1998, abstract = {We introduce an advanced supervised training method for neural networks. It is based on Jacobian rank deficiency and it is formulated, in some sense, in the spirit of the Gauss-Newton algorithm. The Levenberg-Marquardt algorithm, as a modified Gauss-Newton, has been used successfully in solving nonlinear least squares problems including neural-network training. It outperforms the basic backpropagation and its variations with variable learning rate significantly, but with higher computation and memory complexities within each iteration. The mew method developed in this paper is aiming at improving convergence properties, while reducing the memory and computation complexities in supervised training of neural networks. Extensive simulation results are provided to demonstrate the superior performance of the new algorithm over the Levenberg-Marquardt algorithm.}, summary = {A supervised training method for neural networks based on Jacobian rank deficiency is formulated in the spirit of the Gauss-Newton algorithm. The new method aims at improving convergence properties compared to the Levenberg-Morrison-Marquardt method, while reducing the memory and computation complexities in supervised training of neural networks. Extensive simulation results demonstrate the superior performance of the new algorithm over the Levenberg-Morrison-Marquardt algorithm.}} @article{ZhouSi99, author = {C. Zhou and J. Si}, title = {Subset-based training and pruning of sigmoid neural networks}, journal = {Neural Networks}, volume = 12, number = 1, pages = {79--89}, year = 1999, abstract = {In the present paper we develop two algorithms, subset-based training (SBT) and subset-based training and pruning (SBTP), using the fact that the Jacobian matrices in sigmoid network training problems are usually rank deficient. The weight vectors are divided into two parts during training, according to the Jacobian rank sizes. Both SBT and SBTP are trust-region methods. Compared with the standard Levenberg-Marquardt (LM) method, these two algorithms can achieve similar convergence properties as the LM but with fewer memory requirements. Furthermore the SBTP combines training and pruning of a network into one comprehensive procedure. The effectiveness of the two algorithms is evaluated using three examples. Comparisons are made with some existing algorithms. Some convergence properties of the two algorithms are given to qualitatively evaluate the performance of the algorithms.}, summary = {Subset-based training (SBT) and subset-based training and pruning (SBTP) trust-region algorithms are developed to cope with the fact that the Jacobian matrices in sigmoid network training problems are usually rank deficient. Both methods prove to be as effective as the Levenberg-Morrison-Marquardt approach, but have significantly smaller memory requirements. Additionally SBTP combines training and pruning of a network into one comprehensive procedure. The effectiveness of the two algorithms is evaluated using three examples. Comparisons are made with existing algorithms, and convergence properties are investigated.}} @article{ZhouXiao94, author = {F. Zhou and Y. Xiao}, title = {A Class of nonmonotone stabilization trust region methods}, journal = {Computing}, volume = 53, number = 2, pages = {119--136}, year = 1994, abstract = {A class of trust region methods in unconstrained optimization is presented, by adopting a nonmonotone stabilization strategy. Under some regularity conditions, the convergence properties of these methods are discussed. Extensive numerical results which are reported show that these methods are very efficient.}, summary = {A class of trust-region methods for unconstrained optimization is presented which use a non-monotone stabilization strategy. Under some regularity conditions, the convergence properties of these methods are discussed. Extensive numerical results which are reported.}} @article{ZhouTits93, author = {J. Zhou and A. L. Tits}, title = {Nonmonotone line search for minimax problems}, journal = JOTA, volume = 76, number = 3, pages = {455--476}, year = 1993} @article{Zhu92, author = {D. Zhu}, title = {Convergence of a projected gradient method with trust region for nonlinear constrained optimization}, journal = {Optimization}, volume = 23, number = 3, pages = {215--235}, year = 1992, abstract = {Describes a projected gradient algorithm with trust region, introducing nondifferentiable merit function for solving nonlinear constrained optimization problems. The author shows that this method is globally convergent even if conditions are weak. It is also proved that, when the strict complementarity condition holds, the proposed algorithm can be solved by an equality constrained problem, allowing locally rate of superlinear convergence.}, summary = {A globally convergent trust-region projected-gradient algorithm is described that uses a non-differentiable merit function.}} @article{Zhu95, author = {D. Zhu}, title = {A Nonmonotonic Trust Region Technique for nonlinear Constrained Optimization}, journal = JCM, volume = 13, number = 1, pages = {20--31}, year = 1995, abstract = {In this paper, a nonmonotonic trust region method for optimization problems with equality constraints is proposed by introducing a nonsmooth merit function and adopting a correction step. It is proved that all accumulation points of the iterates generated by the proposed algorithm are Kuhn-Tucker points and that the algorithm is q-superlinearly convergent.}, summary = {A non-monotonic method for problems with equality constraints is proposed by introducing a non-smooth merit function and a correction step. It is proved that all accumulation points of the iterates generated are Kuhn-Tucker points and that the algorithm is Q-superlinearly convergent.}} @techreport{Zhu99, author = {D. Zhu}, title = {A Family of Generalized Projected Gradient Methods with Mixing Strategy for Convex Constrained Optimization}, institution = {Department of Mathematics, Shanghai Normal University}, address = {Shanghai, China}, number = {(na)}, year = 1999, abstract = {A family of generalized projected gradient algorithms is proposed for convex constrained optimization problems. The mixed strategy in association with nonmonotone technique is adopted in which projected gradient methods switch to back tracking steps in trust region subproblems. The globally convergent theoretical analysis of the proposed algorithms are given and the local convergence rate of the proposed algorithms are proved unders some reasonable conditions}, summary = {A globally convergent trust-region method is proposed for problems with convex constraints, that combines the strategies of \citebb{ConnGoulSartToin93} with the non-monotone technique of \citebb{DengXiaoZhou93}.}} @article{Zhu96, author = {C. Zhu}, title = {Asymptotic convergence analysis of some inexact proximal point algorithms for minimization}, journal = SIOPT, volume = 6, number = 3, pages = {626--637}, year = 1996} @article{ZhuByrdLuNoce97, author = {C. Zhu and R. H. Byrd and P. Lu and J. Nocedal}, title = {Algorithm 78: {L-BFGS-B}: Fortran subroutines for large-scale bound constrained optimization}, journal = TOMS, volume = 23, number = 4, pages = {550--560}, year = 1997} @article{ZhuBrow87, author = {T. Zhu and L. D. Brown}, title = {Two-dimensional velocity inversion and synthetic seismogram computation}, journal = {Geophysics}, volume = 52, number = 1, pages = {37--50}, year = 1987, abstract = {A traveltime inversion schemes has been developed to estimate velocity and interface geometries of two-dimensional media from deep reflection data. The velocity structure is represented by finite elements, and the inversion is formulated as an iterative, constrained, linear least-squares problems which can be solved by either the singular value truncation method or the Levenberg-Marquardt method. The damping factor of the Levenberg-Marquardt method is chosen by the model-trust region approach. The traveltimes and derivative matrix required to solve the least-squares problem are computed by ray tracing. To aid seismic interpretation, the authors also include in the inversion scheme a fast algorithm based on asymptotic ray theory for calculating synthetic seismograms from the derived velocity model.}, summary = {A trust-region method is used to estimate velocity and interface geometries of two-dimensional media from deep-reflection data, where the velocity structure is represented by finite elements. The traveltimes and derivative matrix required to solve the least-squares problem are computed by ray tracing.}} @mastersthesis{Zupk97, author = {M. Zupke}, title = {{T}rust-{R}egion-{V}erfahren zur {L}\"{o}sung nichtlinearer {K}ompl\-em\-ent\-ar\-it\-\"{a}ts\-probleme}, school = HAMBURG, address = HAMBURG-ADDRESS, year = 1997, summary = {The nonlinear complementarity problem is reformulated as a non-smooth system of equations by using a recently introduced NCP-function. A trust-region-type method is then applied to the resulting system of equations, that allows an inexact solution of the trust-region subproblem. It is shown that the algorithm is well-defined for a general nonlinear complementarity problem and that it has some nice global and local convergence properties. Numerical show the advantage of using the non-monotone technique proposed by \citebb{Toin96b}.}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% PROCEEDINGS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @proceedings{BachGrotKort83, editor = {A. Bachem and M. Gr\"{o}tschel and B. Korte}, title = {Mathematical Programming: The State of the Art}, booktitle = {Mathematical Programming: The State of the Art}, publisher = SPRINGER, address = SPRINGER-ADDRESS, year = 1983} @proceedings{BalaThom84, editor = {A. V. Balakrishnan and M. Thomas}, title = {11th IFIP Conference on System Modelling and Optimization}, booktitle = {11th IFIP Conference on System Modelling and Optimization}, publisher = SPRINGER, address = SPRINGER-ADDRESS, number = 59, series = {Lecture Notes in Control and Information Sciences}, year = 1984} @proceedings{BoggByrdSchn85, editor = {P. T. Boggs and R. H. Byrd and R. B. Schnabel}, title = {Numerical Optimization 1984}, booktitle = {Numerical Optimization 1984}, publisher = SIAM, address = SIAM-ADDRESS, year = 1985} @proceedings{ColeLi90, editor = {T. F. Coleman and Y. Li}, title = {Large Scale Numerical Optimization}, booktitle = {Large Scale Numerical Optimization}, publisher = SIAM, address = SIAM-ADDRESS, year = 1990} @proceedings{CoxHamm90, editor = {M. G. Cox and S. J. Hammarling}, title = {Reliable Scientific Computation}, booktitle = {Reliable Scientific Computation}, publisher = OUP, address = OUP-ADDRESS, year = 1990} @proceedings{DeLeMurlPardTora98, editor = {R. De Leone and A. Murli and P. M. Pardalos and G. Toraldo}, title = {High Performance Algorithms and Software in Nonlinear Optimization}, booktitle = {High Performance Algorithms and Software in Nonlinear Optimization}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1998} @proceedings{DiPiGian96, editor = {Di Pillo, G. and F. Gianessi}, title = {Nonlinear Optimization and Applications}, booktitle = {Nonlinear Optimization and Applications}, publisher = {Plenum Publishing}, address = {New York}, year = 1996} @proceedings{DiPiGian99, editor = {Di Pillo, G. and F. Gianessi}, title = {Nonlinear Optimization and Applications 2}, booktitle = {Nonlinear Optimization and Applications 2}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1999} @proceedings{DixoSpedSzeg80, editor = {L. C. W. Dixon and E. Spedicato and G. P. Szego}, title = {Nonlinear Optimization: Theory and Algorithms}, booktitle = {Nonlinear Optimization: Theory and Algorithms}, publisher = {Birkhauser}, address = {Boston}, year = 1980} @proceedings{DuQiWome95, editor = {D. Du and L. Qi and R. Womersley}, title = {Recent Advances in Nonsmooth Optimization}, booktitle = {Recent Advances in Nonsmooth Optimization}, publisher = WSP, address = WSP-ADDRESS, year = 1995} @proceedings{DuffWats97, editor = {I. Duff and A. Watson}, title = {The State of the Art in Numerical Analysis}, booktitle = {The State of the Art in Numerical Analysis}, publisher = OUP, address = OUP-ADDRESS, year = 1997} @proceedings{FerrPang97, editor = {M. C. Ferris and J. S. Pang}, title = {Complementarity and variational problems: State of the Art}, booktitle = {Complementarity and variational problems: State of the Art}, publisher = SIAM, address = SIAM-ADDRESS, year = 1997} @proceedings{Flet69, editor = {R. Fletcher}, title = {Optimization}, booktitle = {Optimization}, publisher = AP, address = AP-ADDRESS, year = 1969} @proceedings{FukuQi98, editor = {M. Fukushima and L. Qi}, title = {Reformulation: Nonsmooth, Piecewise Smooth, Semismooth and Smoothing Methods}, booktitle = {Reformulation: Nonsmooth, Piecewise Smooth, Semismooth and Smoothing Methods}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1998} @proceedings{GillMurr74a, editor = {P. E. Gill and W. Murray}, title = {Numerical Methods for Constrained Optimization}, booktitle = {Numerical Methods for Constrained Optimization}, publisher = AP, address = AP-ADDRESS, year = 1974} @proceedings{GomeHenn94, editor = {S. Gomez and J. P. Hennart}, title = {Advances in {O}ptimization and {N}umerical {A}nalysis, Proceedings of the Sixth Workshop on Optimization and Numerical Analysis, Oaxaca, Mexico}, booktitle = {Advances in {O}ptimization and {N}umerical {A}nalysis, Proceedings of the Sixth Workshop on Optimization and Numerical Analysis, Oaxaca, Mexico}, publisher = KLUWER, address = KLUWER-ADDRESS, volume = 275, year = 1994} @proceedings{GrifWats92, editor = {D. F. Griffiths and G. A. Watson}, title = {Numerical Analysis 1991}, booktitle = {Numerical Analysis 1991}, publisher = LONGMAN, address = LONGMAN-ADDRESS, number = 260, series = {Pitman Research Notes in Mathematics Series}, year = 1992} @proceedings{HageHearPard94, editor = {W. W. Hager and D. W. Hearn and P. M. Pardalos}, title = {Large Scale Optimization: State of the Art}, booktitle = {Large Scale Optimization: State of the Art}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1994} @proceedings{Henn82, editor = {J. P. Hennart}, title = {Numerical Analysis}, booktitle = {Numerical Analysis}, publisher = SPRINGER, address = SPRINGER-ADDRESS, number = 909, series = {Lecture Notes in Mathematics}, year = 1982} @proceedings{IserPowe87, editor = {A. Iserles and M. J. D. Powell}, title = {The State of the Art in Numerical Analysis}, booktitle = {The State of the Art in Numerical Analysis}, publisher = OUP, address = OUP-ADDRESS, year = 1987} @proceedings{IriTana89, editor = {M. Iri and K. Tanabe}, title = {Mathematical Programming: Recent Developments and Applications}, booktitle = {Mathematical Programming: Recent Developments and Applications}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1989} @proceedings{LabbLapoTancToin98, editor = {M. Labb\'{e} and G. Laporte and K. Tanczos and Ph. L. Toint}, title = {Operations Research and Decision Aid Methodologies in Traffic and Transportation Management}, booktitle = {Operations Research and Decision Aid Methodologies in Traffic and Transportation Management}, publisher = SPRINGER, address = SPRINGER-ADDRESS, year = 1998} @proceedings{Loot72, editor = {F. A. Lootsma}, title = {Numerical Methods for Nonlinear Optimization}, booktitle = {Numerical Methods for Nonlinear Optimization}, publisher = AP, address = AP-ADDRESS, year = 1972} @proceedings{MangMeyeRobi75, editor = {O. L. Mangasarian and R. R. Meyer and S. M. Robinson}, title = {Nonlinear Programming, 2}, booktitle = {Nonlinear Programming, 2}, publisher = AP, address = AP-ADDRESS, year = 1975} @proceedings{Powe82, editor = {M. J. D. Powell}, title = {Nonlinear Optimization 1981}, booktitle = {Nonlinear Optimization 1981}, publisher = AP, address = AP-ADDRESS, year = 1982} @proceedings{Rabi70, editor = {P. Rabinowitz}, title = {Numerical Methods for Nonlinear Algebraic Equations}, booktitle = {Numerical Methods for Nonlinear Algebraic Equations}, publisher = {Gordon and Breach}, address = {London}, year = 1970} @proceedings{RoseMangRitt70, editor = {J. B. Rosen and O. L. Mangasarian and K. Ritter}, title = {Nonlinear Programming}, booktitle = {Nonlinear Programming}, publisher = AP, address = AP-ADDRESS, year = 1970} @proceedings{Sped94, editor = {E. Spedicato}, title = {Algorithms for Continuous Optimization: The State of the Art}, booktitle = {Algorithms for Continuous Optimization: The State of the Art}, publisher = KLUWER, address = KLUWER-ADDRESS, number = 434, series = {NATO ASI Series C: Mathematical and Physical Sciences}, year = 1994} @proceedings{Terl96, editor = {T. Terlaky}, title = {Interior Point Methods in Mathematical Programming}, booktitle = {Interior Point Methods in Mathematical Programming}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1996} @proceedings{VanH97, editor = {Van Huffel, S.}, title = {Recent Advances in Total Least-Squares Techniques and Error-in-Variables Modeling}, booktitle = {Recent Advances in Total Least-Squares Techniques and Error-in-Variables Modeling}, publisher = SIAM, address = SIAM-ADDRESS, year = 1997} @proceedings{Wats78, editor = {G. A. Watson}, title = {Numerical Analysis, Dundee 1977}, booktitle = {Numerical Analysis, Dundee 1977}, publisher = SPRINGER, address = SPRINGER-ADDRESS, number = 630, series = {Lecture Notes in Mathematics}, year = 1978} @proceedings{Yuan98, editor = {Y. Yuan}, title = {Advances in Nonlinear Programming}, booktitle = {Advances in Nonlinear Programming}, publisher = KLUWER, address = KLUWER-ADDRESS, year = 1998}