* Bug #9691 fixed - "imp" option of optim was poorly documented.
[scilab.git] / scilab / modules / optimization / help / en_US / optim.xml
1 <?xml version="1.0" encoding="UTF-8"?>
2 <!--
3  * Scilab ( http://www.scilab.org/ ) - This file is part of Scilab
4  * Copyright (C) 2008 - INRIA
5  * Copyright (C) 2008 - 2009 - INRIA - Michael Baudin
6  * Copyright (C) 2010 - 2011 - DIGITEO - Michael Baudin
7  *
8  * This file must be used under the terms of the CeCILL.
9  * This source file is licensed as described in the file COPYING, which
10  * you should have received as part of this distribution.  The terms
11  * are also available at
12  * http://www.cecill.info/licences/Licence_CeCILL_V2-en.txt
13  *
14  -->
15 <refentry xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:svg="http://www.w3.org/2000/svg" xmlns:ns3="http://www.w3.org/1999/xhtml" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:db="http://docbook.org/ns/docbook" xmlns:scilab="http://www.scilab.org"  xml:id="optim" xml:lang="en">
16     <refnamediv>
17         <refname>optim</refname>
18         <refpurpose>non-linear optimization routine</refpurpose>
19     </refnamediv>
20     <refsynopsisdiv>
21         <title>Calling Sequence</title>
22         <synopsis>
23             fopt = optim(costf, x0)
24             fopt = optim(costf [,&lt;contr&gt;],x0 [,algo] [,df0 [,mem]] [,work] [,&lt;stop&gt;] [,&lt;params&gt;] [,imp=iflag])
25             [fopt, xopt] = optim(...)
26             [fopt, xopt, gopt] = optim(...)
27             [fopt, xopt, gopt, work] = optim(...)
28         </synopsis>
29     </refsynopsisdiv>
30     <refsection>
31         <title>Arguments</title>
32         <variablelist>
33             <varlistentry>
34                 <term>costf</term>
35                 <listitem>
36                     <para>a function, a list or a string, the objective function.</para>
37                 </listitem>
38             </varlistentry>
39             <varlistentry>
40                 <term>x0</term>
41                 <listitem>
42                     <para>real vector, the initial guess for
43                         <literal>x</literal>.
44                     </para>
45                 </listitem>
46             </varlistentry>
47             <varlistentry>
48                 <term>&lt;contr&gt;</term>
49                 <listitem>
50                     <para>an optional sequence of arguments containing the lower and
51                         upper bounds on <literal>x</literal>. If bounds are required, this
52                         sequence of arguments must be <literal>"b",binf,bsup</literal> where
53                         <literal>binf</literal> and <literal>bsup</literal> are real vectors
54                         with same dimension as <literal>x0</literal>.
55                     </para>
56                 </listitem>
57             </varlistentry>
58             <varlistentry>
59                 <term>algo</term>
60                 <listitem>
61                     <para>a string, the algorithm to use (default
62                         <literal>algo="qn"</literal>).
63                     </para>
64                     <para>The available algorithms are:</para>
65                     <itemizedlist>
66                         <listitem>
67                             <para>
68                                 <literal>"qn"</literal>: Quasi-Newton with BFGS
69                             </para>
70                         </listitem>
71                         <listitem>
72                             <para>
73                                 <literal>"gc"</literal>: limited memory BFGS
74                             </para>
75                         </listitem>
76                         <listitem>
77                             <para>
78                                 <literal>"nd"</literal>: non-differentiable.
79                             </para>
80                             <para>
81                                 The <literal>"nd"</literal> algorithm does not accept
82                                 bounds on <literal>x</literal>.
83                             </para>
84                         </listitem>
85                     </itemizedlist>
86                 </listitem>
87             </varlistentry>
88             <varlistentry>
89                 <term>df0</term>
90                 <listitem>
91                     <para>
92                         real scalar, a guess of the decreasing of <literal>f</literal>
93                         at first iteration. (default <literal>df0=1</literal>).
94                     </para>
95                 </listitem>
96             </varlistentry>
97             <varlistentry>
98                 <term>mem</term>
99                 <listitem>
100                     <para>integer, the number of variables used to approximate the
101                         Hessian (default <literal>mem=10</literal>). This feature is
102                         available for the <literal>"gc"</literal> algorithm without
103                         constraints and the non-smooth algorithm <literal>"nd"</literal>
104                         without constraints.
105                     </para>
106                 </listitem>
107             </varlistentry>
108             <varlistentry>
109                 <term>&lt;stop&gt;</term>
110                 <listitem>
111                     <para>a sequence of arguments containing the parameters controlling
112                         the convergence of the algorithm. The following sequences are
113                         available: <screen>              "ar",nap
114                             "ar",nap,iter
115                             "ar",nap,iter,epsg
116                             "ar",nap,iter,epsg,epsf
117                             "ar",nap,iter,epsg,epsf,epsx            
118                         </screen>
119                     </para>
120                     <para>where:</para>
121                     <variablelist>
122                         <varlistentry>
123                             <term>nap</term>
124                             <listitem>
125                                 <para>
126                                     maximum number of calls to <literal>costf</literal>
127                                     allowed (default <literal>nap=100</literal>).
128                                 </para>
129                             </listitem>
130                         </varlistentry>
131                         <varlistentry>
132                             <term>iter</term>
133                             <listitem>
134                                 <para>maximum number of iterations allowed (default
135                                     <literal>iter=100</literal>).
136                                 </para>
137                             </listitem>
138                         </varlistentry>
139                         <varlistentry>
140                             <term>epsg</term>
141                             <listitem>
142                                 <para>threshold on gradient norm (default
143                                     <literal>epsg= %eps</literal>).
144                                 </para>
145                             </listitem>
146                         </varlistentry>
147                         <varlistentry>
148                             <term>epsf</term>
149                             <listitem>
150                                 <para>
151                                     threshold controlling decreasing of <literal>f</literal>
152                                     (default <literal>epsf=0</literal>).
153                                 </para>
154                             </listitem>
155                         </varlistentry>
156                         <varlistentry>
157                             <term>epsx</term>
158                             <listitem>
159                                 <para>
160                                     threshold controlling variation of <literal>x</literal>
161                                     (default <literal>epsx=0</literal>). This vector (possibly
162                                     matrix) with same size as <literal>x0</literal> can be used to
163                                     scale <literal>x</literal>.
164                                 </para>
165                             </listitem>
166                         </varlistentry>
167                     </variablelist>
168                 </listitem>
169             </varlistentry>
170             <varlistentry>
171                 <term>&lt;params&gt;</term>
172                 <listitem>
173                     <para>in the case where the objective function is a C or Fortran
174                         routine, a sequence of arguments containing the method to
175                         communicate with the objective function. This option has no meaning
176                         when the cost function is a Scilab script.
177                     </para>
178                     <para>The available values for &lt;params&gt; are the
179                         following.
180                     </para>
181                     <itemizedlist>
182                         <listitem>
183                             <para>
184                                 <literal>"in"</literal>
185                             </para>
186                             <para>That mode allows to allocate memory in the internal Scilab
187                                 workspace so that the objective function can get arrays with the
188                                 required size, but without directly allocating the memory. The
189                                 <literal>"in"</literal> value stands for "initialization". In
190                                 that mode, before the value and derivative of the objective
191                                 function is to be computed, there is a dialog between the
192                                 <literal>optim</literal> Scilab primitive and the objective
193                                 function <literal>costf</literal>. In this dialog, the objective
194                                 function is called two times, with particular values of the
195                                 <literal>ind</literal> parameter. The first time,
196                                 <literal>ind</literal> is set to 10 and the objective function
197                                 is expected to set the <literal>nizs</literal>,
198                                 <literal>nrzs</literal> and <literal>ndzs</literal> integer
199                                 parameters of the <literal>nird</literal> common, which is
200                                 defined as:
201                             </para>
202                             <screen>common /nird/ nizs,nrzs,ndzs    </screen>
203                             <para>This allows Scilab to allocate memory inside its internal
204                                 workspace. The second time the objective function is called,
205                                 <literal>ind</literal> is set to 11 and the objective function
206                                 is expected to set the <literal>ti</literal>,
207                                 <literal>tr</literal> and <literal>tz</literal> arrays. After
208                                 this initialization phase, each time it is called, the objective
209                                 function is ensured that the <literal>ti</literal>,
210                                 <literal>tr</literal> and <literal>tz</literal> arrays which are
211                                 passed to it have the values that have been previously
212                                 initialized.
213                             </para>
214                         </listitem>
215                         <listitem>
216                             <para>
217                                 <literal>"ti",valti</literal>
218                             </para>
219                             <para>
220                                 In this mode, <literal>valti</literal> is expected to be a
221                                 Scilab vector variable containing integers. Whenever the
222                                 objective function is called, the <literal>ti</literal> array it
223                                 receives contains the values of the Scilab variable.
224                             </para>
225                         </listitem>
226                         <listitem>
227                             <para>
228                                 <literal>"td", valtd</literal>
229                             </para>
230                             <para>
231                                 In this mode, <literal>valtd</literal> is expected to be a
232                                 Scilab vector variable containing double values. Whenever the
233                                 objective function is called, the <literal>td</literal> array it
234                                 receives contains the values of the Scilab variable.
235                             </para>
236                         </listitem>
237                         <listitem>
238                             <para>
239                                 <literal>"ti",valti,"td",valtd</literal>
240                             </para>
241                             <para>This mode combines the two previous modes.</para>
242                         </listitem>
243                     </itemizedlist>
244                     <para>
245                         The <literal>ti, td</literal> arrays may be used so that the
246                         objective function can be computed. For example, if the objective
247                         function is a polynomial, the ti array may may be used to store the
248                         coefficients of that polynomial.
249                     </para>
250                     <para>Users should choose carefully between the
251                         <literal>"in"</literal> mode and the <literal>"ti"</literal> and
252                         <literal>"td"</literal> mode, depending on the fact that the arrays
253                         are Scilab variables or not. If the data is available as Scilab
254                         variables, then the <literal>"ti", valti, "td", valtd</literal> mode
255                         should be chosen. If the data is available directly from the
256                         objective function, the <literal>"in"</literal> mode should be
257                         chosen. Notice that there is no <literal>"tr"</literal> mode, since,
258                         in Scilab, all real values are doubles.
259                     </para>
260                     <para>If neither the "in" mode, nor the "ti", "td" mode is chosen,
261                         that is, if &lt;params&gt; is not present as an option of the optim
262                         primitive, the user may should not assume that the ti,tr and td
263                         arrays can be used : reading or writing the arrays may generate
264                         unpredictable results.
265                     </para>
266                 </listitem>
267             </varlistentry>
268             <varlistentry>
269                 <term>"imp=iflag"</term>
270                 <listitem>
271                     <para>named argument used to set the trace mode (default
272                         <literal>imp=0</literal>, which prints no messages). If <varname>imp</varname>
273                         is greater or equal to 1, more information are printed, depending on the
274                         algorithm chosen. More precisely:
275                     </para>
276                     <itemizedlist>
277                         <listitem>
278                             <para><literal>"qn"</literal> without constraints: from <literal>iflag=1</literal>
279                             to <literal>iflag=3</literal>.</para>
280                             <itemizedlist>
281                                 <listitem>
282                                     <para><literal>iflag>=1</literal>: initial and final print,</para>
283                                 </listitem>
284                                 <listitem>
285                                     <para><literal>iflag>=2</literal>: one line per iteration (number of iterations,
286                                     number of calls to f, value of f),</para>
287                                 </listitem>
288                                 <listitem>
289                                     <para><literal>iflag>=3</literal>: extra information on line searches.</para>
290                                 </listitem>
291                             </itemizedlist>
292                         </listitem>
293                         <listitem>
294                             <para><literal>"qn"</literal> with bounds constraints: from <literal>iflag=1</literal>
295                             to <literal>iflag=4</literal>.</para>
296                             <itemizedlist>
297                                 <listitem>
298                                     <para><literal>iflag>=1</literal>: initial and final print,</para>
299                                 </listitem>
300                                 <listitem>
301                                     <para><literal>iflag>=2</literal>: one line per iteration (number of iterations,
302                                     number of calls to f, value of f),</para>
303                                 </listitem>
304                                 <listitem>
305                                     <para><literal>iflag>=3</literal>: extra information on line searches.</para>
306                                 </listitem>
307                             </itemizedlist>
308                         </listitem>
309                         <listitem>
310                             <para><literal>"gc"</literal> without constraints: from <literal>iflag=1</literal>
311                             to <literal>iflag=5</literal>.</para>
312                             <itemizedlist>
313                                 <listitem>
314                                     <para><literal>iflag>=1</literal> and <literal>iflag>=2</literal>: initial and final print,</para>
315                                 </listitem>
316                                 <listitem>
317                                     <para><literal>iflag=3</literal>: one line per iteration (number of iterations,
318                                     number of calls to f, value of f),</para>
319                                 </listitem>
320                                 <listitem>
321                                     <para><literal>iflag>=4</literal>: extra information on lines searches.</para>
322                                 </listitem>
323                             </itemizedlist> 
324                         </listitem>
325                         <listitem>
326                             <para><literal>"gc"</literal> with bounds constraints: from <literal>iflag=1</literal>
327                             to <literal>iflag=3</literal>.</para>
328                             <itemizedlist>
329                                 <listitem>
330                                     <para><literal>iflag>=1</literal>: initial and final print,</para>
331                                 </listitem>
332                                 <listitem>
333                                     <para><literal>iflag>=2</literal>: one print per iteration,</para>
334                                 </listitem>
335                                 <listitem>
336                                     <para><literal>iflag=3</literal>: extra information.</para>
337                                 </listitem>
338                             </itemizedlist>
339                         </listitem>
340                         <listitem>
341                             <para><literal>"nd"</literal> with bounds constraints: from <literal>iflag=1</literal>
342                             to <literal>iflag=8</literal>.</para>
343                             <itemizedlist>
344                                 <listitem>
345                                     <para><literal>iflag>=1</literal>: initial and final print,</para>
346                                 </listitem>
347                                 <listitem>
348                                     <para><literal>iflag>=2</literal>: one print on each convergence,</para>
349                                 </listitem>
350                                 <listitem>
351                                     <para><literal>iflag>=3</literal>: one print per iteration,</para>
352                                 </listitem>
353                                 <listitem>
354                                     <para><literal>iflag>=4</literal>: line search,</para>
355                                 </listitem>
356                                 <listitem>
357                                     <para><literal>iflag>=5</literal>: various tolerances,</para>
358                                 </listitem>
359                                 <listitem>
360                                     <para><literal>iflag>=6</literal>: weight and information on the computation of direction.</para>
361                                 </listitem>
362                             </itemizedlist>
363                         </listitem>
364                     </itemizedlist>
365                 </listitem>
366             </varlistentry>
367             <varlistentry>
368                 <term>fopt</term>
369                 <listitem>
370                     <para>the value of the objective function at the point
371                         <literal>xopt</literal>
372                     </para>
373                 </listitem>
374             </varlistentry>
375             <varlistentry>
376                 <term>xopt</term>
377                 <listitem>
378                     <para>
379                         best value of <literal>x</literal> found.
380                     </para>
381                 </listitem>
382             </varlistentry>
383             <varlistentry>
384                 <term>gopt</term>
385                 <listitem>
386                     <para>the gradient of the objective function at the point
387                         <literal>xopt</literal>
388                     </para>
389                 </listitem>
390             </varlistentry>
391             <varlistentry>
392                 <term>work</term>
393                 <listitem>
394                     <para>working array for hot restart for quasi-Newton method. This
395                         array is automatically initialized by <literal>optim</literal> when
396                         <literal>optim</literal> is invoked. It can be used as input
397                         parameter to speed-up the calculations.
398                     </para>
399                 </listitem>
400             </varlistentry>
401         </variablelist>
402     </refsection>
403     <refsection>
404         <title>Description</title>
405         <para>This function solves unconstrained nonlinear optimization
406             problems:
407         </para>
408         <screen>min f(x)      </screen>
409         <para>
410             where <literal>x</literal> is a vector and <literal>f(x)</literal>
411             is a function that returns a scalar. This function can also solve bound
412             constrained nonlinear optimization problems:
413         </para>
414         <screen>min f(x)
415             binf &lt;= x &lt;= bsup      
416         </screen>
417         <para>
418             where <literal>binf</literal> is the lower bound and
419             <literal>bsup</literal> is the upper bound on <literal>x</literal>.
420         </para>
421         <para>
422             The <literal>costf</literal> argument can be a Scilab function, a
423             list or a string giving the name of a C or Fortran routine (see
424             "external"). This external must return the value <literal>f</literal> of
425             the cost function at the point <literal>x</literal> and the gradient
426             <literal>g</literal> of the cost function at the point
427             <literal>x</literal>.
428         </para>
429         <variablelist>
430             <varlistentry>
431                 <term>Scilab function case</term>
432                 <listitem>
433                     <para>
434                         If <literal>costf</literal> is a Scilab function, its calling
435                         sequence must be:
436                     </para>
437                     <screen>[f, g, ind] = costf(x, ind)      </screen>
438                     <para>
439                         where <literal>x</literal> is the current point,
440                         <literal>ind</literal> is an integer flag described below,
441                         <literal>f</literal> is the real value of the objective function at
442                         the point <literal>x</literal> and <literal>g</literal> is a vector
443                         containing the gradient of the objective function at
444                         <literal>x</literal>. The variable <literal>ind</literal> is
445                         described below.
446                     </para>
447                 </listitem>
448             </varlistentry>
449             <varlistentry>
450                 <term>List case</term>
451                 <listitem>
452                     <para>It may happen that objective function requires extra
453                         arguments. In this case, we can use the following feature. The
454                         <literal>costf</literal> argument can be the list
455                         <literal>(real_costf, arg1,...,argn)</literal>. In this case,
456                         <literal>real_costf</literal>, the first element in the list, must
457                         be a Scilab function with calling sequence: <screen>        [f,g,ind]=real_costf(x,ind,arg1,...,argn)      </screen>
458                         The <literal>x</literal>, <literal>f</literal>,
459                         <literal>g</literal>, <literal>ind</literal> arguments have the same
460                         meaning as before. In this case, each time the objective function is
461                         called back, the arguments <literal>arg1,...,argn</literal> are
462                         automatically appended at the end of the calling sequence of
463                         <literal>real_costf</literal>.
464                     </para>
465                 </listitem>
466             </varlistentry>
467             <varlistentry>
468                 <term>String case</term>
469                 <listitem>
470                     <para>
471                         If <literal>costf</literal> is a string, it refers to the name
472                         of a C or Fortran routine which must be linked to Scilab
473                     </para>
474                     <variablelist>
475                         <varlistentry>
476                             <term>Fortran case</term>
477                             <listitem>
478                                 <para>The calling sequence of the Fortran subroutine computing
479                                     the objective must be:
480                                 </para>
481                                 <screen>subroutine costf(ind,n,x,f,g,ti,tr,td)      </screen>
482                                 <para>with the following declarations:</para>
483                                 <screen>integer ind,n ti(*)
484                                     double precision x(n),f,g(n),td(*)
485                                     real tr(*)      
486                                 </screen>
487                                 <para>
488                                     The argument <literal>ind</literal> is described
489                                     below.
490                                 </para>
491                                 <para>If ind = 2, 3 or 4, the inputs of the routine are :
492                                     <literal>x, ind, n, ti, tr,td</literal>.
493                                 </para>
494                                 <para>If ind = 2, 3 or 4, the outputs of the routine are :
495                                     <literal>f</literal> and <literal>g</literal>.
496                                 </para>
497                             </listitem>
498                         </varlistentry>
499                         <varlistentry>
500                             <term>C case</term>
501                             <listitem>
502                                 <para>The calling sequence of the C function computing the
503                                     objective must be:
504                                 </para>
505                                 <screen>void costf(int *ind, int *n, double *x, double *f, double *g, int *ti, float *tr, double *td)      </screen>
506                                 <para>
507                                     The argument <literal>ind</literal> is described
508                                     below.
509                                 </para>
510                                 <para>The inputs and outputs of the function are the same as
511                                     in the fortran case.
512                                 </para>
513                             </listitem>
514                         </varlistentry>
515                     </variablelist>
516                 </listitem>
517             </varlistentry>
518         </variablelist>
519         <para>
520             On output, <literal>ind&lt;0</literal> means that
521             <literal>f</literal> cannot be evaluated at <literal>x</literal> and
522             <literal>ind=0</literal> interrupts the optimization.
523         </para>
524     </refsection>
525     <refsection>
526         <title>Termination criteria</title>
527         <para>Each algorithm has its own termination criteria, which may use the
528             parameters given by the user, that is <literal>nap</literal>,
529             <literal>iter</literal>, <literal>epsg</literal>, <literal>epsf</literal>
530             and <literal>epsx</literal>. Not all the parameters are taken into
531             account. In the table below, we present the specific termination
532             parameters which are taken into account by each algorithm. The
533             unconstrained solver is identified by "UNC" while the bound constrained
534             solver is identified by "BND". An empty entry means that the parameter is
535             ignored by the algorithm.
536         </para>
537         <para>
538             <informaltable border="1">
539                 <tr>
540                     <td>Solver</td>
541                     <td>nap</td>
542                     <td>iter</td>
543                     <td>epsg</td>
544                     <td>epsf</td>
545                     <td>epsx</td>
546                 </tr>
547                 <tr>
548                     <td>optim/"qn" UNC</td>
549                     <td>X</td>
550                     <td>X</td>
551                     <td>X</td>
552                     <td/>
553                     <td/>
554                 </tr>
555                 <tr>
556                     <td>optim/"qn" BND</td>
557                     <td>X</td>
558                     <td>X</td>
559                     <td>X</td>
560                     <td>X</td>
561                     <td>X</td>
562                 </tr>
563                 <tr>
564                     <td>optim/"gc" UNC</td>
565                     <td>X</td>
566                     <td>X</td>
567                     <td>X</td>
568                     <td/>
569                     <td/>
570                 </tr>
571                 <tr>
572                     <td>optim/"gc" BND</td>
573                     <td>X</td>
574                     <td>X</td>
575                     <td>X</td>
576                     <td>X</td>
577                     <td>X</td>
578                 </tr>
579                 <tr>
580                     <td>optim/"nd" UNC</td>
581                     <td>X</td>
582                     <td>X</td>
583                     <td/>
584                     <td>X</td>
585                     <td>X</td>
586                 </tr>
587             </informaltable>
588         </para>
589     </refsection>
590     <refsection>
591         <title>Example: Scilab function</title>
592         <para>The following is an example with a Scilab function. Notice, for
593             simplifications reasons, the Scilab function "cost" of the following
594             example computes the objective function f and its derivative no matter of
595             the value of ind. This allows to keep the example simple. In practical
596             situations though, the computation of "f" and "g" may raise performances
597             issues so that a direct optimization may be to use the value of "ind" to
598             compute "f" and "g" only when needed.
599         </para>
600         <programlisting role="example">function [f, g, ind] = cost(x, ind)
601             xref = [1; 2; 3];
602             f = 0.5 * norm(x - xref)^2;
603             g = x - xref;
604             endfunction
605             
606             // Simplest call
607             x0 = [1; -1; 1];
608             [fopt, xopt] = optim(cost, x0)
609             
610             // Use "gc" algorithm
611             [fopt, xopt, gopt] = optim(cost, x0, "gc")
612             
613             // Use "nd" algorithm
614             [fopt, xopt, gopt] = optim(cost, x0, "nd")
615             
616             // Upper and lower bounds on x
617             [fopt, xopt, gopt] = optim(cost, "b", [-1;0;2], [0.5;1;4], x0)
618             
619             // Upper and lower bounds on x and setting up the algorithm to "gc"
620             [fopt, xopt, gopt] = optim(cost, "b", [-1; 0; 2], [0.5; 1; 4], x0, "gc")
621             
622             // Bound on the number of call to the objective function
623             [fopt, xopt, gopt] = optim(cost, "b", [-1; 0; 2], [0.5; 1; 4], x0, "gc", "ar", 3)
624             
625             // Set max number of call to the objective function (3)
626             // Set max number of iterations (100)
627             // Set stopping threshold on the value of f (1e-6),
628             // on the value of the norm of the gradient of the objective function (1e-6)
629             // on the improvement on the parameters x_opt (1e-6;1e-6;1e-6)
630             [fopt, xopt, gopt] = optim(cost, "b", [-1; 0; 2], [0.5; 1; 4], x0, "gc", "ar", 3, 100, 1e-6, 1e-6, [1e-3; 1e-3; 1e-3])
631             
632             // Additionnal messages are printed in the console.
633             [fopt, xopt] = optim(cost, x0, imp = 3)    
634         </programlisting>
635     </refsection>
636     <refsection>
637         <title>Example: Print messages</title>
638         <para>
639             The <literal>imp</literal> flag may take negative integer values,
640             say k. In that case, the cost function is called once every -k iterations.
641             This allows to draw the function value or write a log file.
642         </para>
643         <para>
644             This feature is available only with the <literal>"qn"</literal>
645             algorithm without constraints.
646         </para>
647         <para>In the following example, we solve the Rosenbrock test case. For
648             each iteration of the algorithm, we print the value of x, f and g.
649         </para>
650         <programlisting role="example">function [f, g, ind] = cost(x, ind)
651             xref = [1; 2; 3];
652             f = 0.5 * norm(x - xref)^2;
653             g = x - xref;
654             if (ind == 1) then
655             mprintf("f(x) = %s, |g(x)|=%s\n", string(f), string(norm(g)))
656             end
657             endfunction
658             
659             x0 = [1; -1; 1];
660             [fopt, xopt] = optim(cost, x0, imp = -1)   
661         </programlisting>
662         <para>The previous script produces the following output.</para>
663         <screen>--&gt;[fopt, xopt] = optim(cost, x0, imp = -1)
664             f(x) = 6.5, |g(x)|=3.6055513
665             f(x) = 2.8888889, |g(x)|=2.4037009
666             f(x) = 9.861D-31, |g(x)|=1.404D-15
667             f(x) = 0, |g(x)|=0
668             Norm of projected gradient lower than   0.0000000D+00.
669             xopt  =
670             1.
671             2.
672             3.
673             fopt  =
674             0.    
675         </screen>
676         <para>In the following example, we solve the Rosenbrock test case. For
677             each iteration of the algorithm, we plot the current value of x into a 2D
678             graph containing the contours of Rosenbrock's function. This allows to see
679             the progress of the algorithm while the algorithm is performing. We could
680             as well write the value of x, f and g into a log file if needed.
681         </para>
682         <programlisting role="example">// 1. Define Rosenbrock for optimization
683             function [f , g , ind] = rosenbrock (x , ind)
684             f = 100.0 *(x(2) - x(1)^2)^2 + (1 - x(1))^2;
685             g(1) = - 400. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
686             g(2) = 200. * ( x(2) - x(1)**2 )
687             endfunction
688             
689             // 2. Define rosenbrock for contouring
690             function f = rosenbrockC ( x1 , x2 )
691             x = [x1 x2]
692             ind = 4
693             [ f , g , ind ] = rosenbrock ( x , ind )
694             endfunction
695             
696             // 3. Define Rosenbrock for plotting
697             function [ f , g , ind ] = rosenbrockPlot ( x , ind )
698             [ f , g , ind ] = rosenbrock ( x , ind )
699             if (ind == 1) then
700             plot ( x(1) , x(2) , "g." )
701             end
702             endfunction
703             
704             // 4. Draw the contour of Rosenbrock's function
705             x0 = [-1.2 1.0];
706             xopt = [1.0 1.0];
707             xdata = linspace(-2,2,100);
708             ydata = linspace(-2,2,100);
709             contour ( xdata , ydata , rosenbrockC , [1 10 100 500 1000])
710             plot(x0(1) , x0(2) , "b.")
711             plot(xopt(1) , xopt(2) , "r*")
712             
713             // 5. Plot the optimization process, during optimization
714             [fopt, xopt] = optim ( rosenbrockPlot , x0 , imp = -1)    
715         </programlisting>
716         <scilab:image>
717             function [f, g, ind]=rosenbrock(x, ind)
718             f = 100.0 *(x(2) - x(1)^2)^2 + (1 - x(1))^2;
719             g(1) = - 400. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
720             g(2) = 200. * ( x(2) - x(1)**2 )
721             endfunction
722             
723             function f=rosenbrockC(x1, x2)
724             x = [x1 x2]
725             ind = 4
726             [ f , g , ind ] = rosenbrock ( x , ind )
727             endfunction
728             
729             function [f, g, ind]=rosenbrockPlot(x, ind)
730             [ f , g , ind ] = rosenbrock ( x , ind )
731             if (ind == 1) then
732             plot ( x(1) , x(2) , "g." )
733             end
734             endfunction
735             
736             x0 = [-1.2 1.0];
737             xopt = [1.0 1.0];
738             xdata = linspace(-2,2,100);
739             ydata = linspace(-2,2,100);
740             contour ( xdata , ydata , rosenbrockC , [1 10 100 500 1000])
741             plot(x0(1) , x0(2) , "b.")
742             plot(xopt(1) , xopt(2) , "r*")
743             [fopt, xopt] = optim ( rosenbrockPlot , x0 , imp = -1)
744         </scilab:image>
745         
746     </refsection>
747     <refsection>
748         <title>Example: Optimizing with numerical derivatives</title>
749         <para>It is possible to optimize a problem without an explicit knowledge
750             of the derivative of the cost function. For this purpose, we can use the
751             numdiff or derivative function to compute a numerical derivative of the
752             cost function.
753         </para>
754         <para>In the following example, we use the numdiff function to solve
755             Rosenbrock's problem.
756         </para>
757         <programlisting role="example">function f = rosenbrock ( x )
758             f = 100.0 *(x(2)-x(1)^2)^2 + (1-x(1))^2;
759             endfunction
760             
761             function [ f , g , ind ] = rosenbrockCost ( x , ind )
762             f = rosenbrock ( x );
763             g= numdiff ( rosenbrock , x );
764             endfunction
765             
766             x0 = [-1.2 1.0];
767             
768             [ fopt , xopt ] = optim ( rosenbrockCost , x0 )    
769         </programlisting>
770         <para>In the following example, we use the derivative function to solve
771             Rosenbrock's problem. Given that the step computation strategy is not the
772             same in numdiff and derivative, this might lead to improved
773             results.
774         </para>
775         <programlisting role="example">function f = rosenbrock ( x )
776             f = 100.0 *(x(2)-x(1)^2)^2 + (1-x(1))^2;
777             endfunction
778             
779             function [ f , g , ind ] = rosenbrockCost2 ( x , ind )
780             f = rosenbrock ( x );
781             g = derivative ( rosenbrock , x.' , order = 4 );
782             endfunction
783             
784             x0 = [-1.2 1.0];
785             [fopt , xopt] = optim ( rosenbrockCost2 , x0 )    
786         </programlisting>
787     </refsection>
788     <refsection>
789         <title>Example: Counting function evaluations and number of
790             iterations
791         </title>
792         <para>
793             The <literal>imp</literal> option can take negative values. If the
794             <literal>imp</literal> is equal to <literal>m</literal> where
795             <literal>m</literal> is a negative integer, then the cost function is
796             evaluated every -<literal>m</literal> iterations, with the
797             <literal>ind</literal> input argument equal to 1. The following example
798             uses this feature to compute the number of iterations. The global variable
799             <literal>mydata</literal> is used to store the number of function
800             evaluations as well as the number of iterations.
801         </para>
802         <programlisting role="example">function [f, g, ind] = cost(x, ind,xref)
803             global _MYDATA_
804             if ( ind == 1 )
805             _MYDATA_.niter = _MYDATA_.niter + 1
806             end
807             _MYDATA_.nfevals = _MYDATA_.nfevals + 1
808             f = 0.5 * norm(x - xref)^2;
809             g = x - xref;
810             endfunction
811             xref = [1; 2; 3];
812             x0 = [1; -1; 1];
813             global _MYDATA_
814             _MYDATA_ = tlist ( ["T_MYDATA", "niter", "nfevals"])
815             _MYDATA_.niter = 0
816             _MYDATA_.nfevals = 0
817             [f, xopt] = optim(list(cost, xref), x0, imp = -1)
818             mprintf("Number of function evaluations:%d\n", _MYDATA_.nfevals)
819             mprintf("Number of iterations:%d\n", _MYDATA_.niter) 
820         </programlisting>
821         <para>While the previous example perfectly works, there is a risk that the
822             same variable <literal>_MYDATA_</literal> is used by some internal
823             function used by <literal>optim</literal>. In this case, the value may be
824             wrong. This is why a sufficiently weird variable name has been
825             used.
826         </para>
827     </refsection>
828     <refsection>
829         <title>Example : Passing extra parameters</title>
830         <para>In most practical situations, the cost function depends on extra
831             parameters which are required to evaluate the cost function. There are
832             several methods to achieve this goal.
833         </para>
834         <para>In the following example, the cost function uses 4 parameters
835             <literal>a, b, c</literal> and <literal>d</literal>. We define the cost
836             function with additionnal input arguments, which are declared after the
837             index argument. Then we pass a list as the first input argument of the
838             <literal>optim</literal> solver. The first element of the list is the cost
839             function. The additionnal variables are directly passed to the cost
840             function.
841         </para>
842         <programlisting role="example">function [ f , g , ind ] = costfunction ( x , ind , a , b , c , d )
843             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
844             g(1) = 2 * a * ( x(1) - c )
845             g(2) = 2 * b * ( x(2) - d )
846             endfunction
847             
848             x0 = [1 1];
849             a = 1.0;
850             b = 2.0;
851             c = 3.0;
852             d = 4.0;
853             costf = list ( costfunction , a , b , c, d );
854             [fopt , xopt] = optim ( costf , x0 , imp = 2)    
855         </programlisting>
856         <para>In complex cases, the cost function may have so many parameters,
857             that having a function which takes all arguments as inputs is not
858             convenient. For example, consider the situation where the cost function
859             needs 12 parameters. Then, designing a function with 14 input arguments
860             (x, index and the 12 parameters) is difficult to manage. Instead, we can
861             use a more complex data structure to store our data. In the following
862             example, we use a tlist to store the 4 input arguments. This method can
863             easily be expanded to an arbitrary number of parameters.
864         </para>
865         <programlisting role="example">function [f , g , ind] = costfunction ( x , ind , parameters)
866             // Get the parameters
867             a = parameters.a
868             b = parameters.b
869             c = parameters.c
870             d = parameters.d
871             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
872             g(1) = 2 * a * ( x(1) - c )
873             g(2) = 2 * b * ( x(2) - d )
874             endfunction
875             
876             x0 = [1 1];
877             a = 1.0;
878             b = 2.0;
879             c = 3.0;
880             d = 4.0;
881             // Store the parameters
882             parameters = tlist ( [
883             "T_MYPARAMS"
884             "a"
885             "b"
886             "c"
887             "d"
888             ]);
889             
890             parameters.a = a;
891             parameters.b = b;
892             parameters.c = c;
893             parameters.d = d;
894             costf = list ( costfunction , parameters );
895             [fopt , xopt] = optim ( costf , x0 , imp = 2)    
896         </programlisting>
897         <para>In the following example, the parameters are defined before the
898             optimizer is called. They are directly used in the cost function.
899         </para>
900         <programlisting role="example">// The example NOT to follow
901             function [ f , g , ind ] = costfunction ( x , ind )
902             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
903             g(1) = 2 * a * ( x(1) - c )
904             g(2) = 2 * b * ( x(2) - d )
905             endfunction
906             x0 = [1 1];
907             a = 1.0;
908             b = 2.0;
909             c = 3.0;
910             d = 4.0;
911             [ fopt , xopt ] = optim ( costfunction , x0 , imp = 2 )   
912         </programlisting>
913         <para>While the previous example perfectly works, there is a risk that the
914             same variables are used by some internal function used by
915             <literal>optim</literal>. In this case, the value of the parameters are
916             not what is expected and the optimization can fail or, worse, give a wrong
917             result. It is also difficult to manage such a function, which requires
918             that all the parameters are defined in the calling context.
919         </para>
920         <para>In the following example, we define the cost function with the
921             classical header. Inside the function definition, we declare that the
922             parameters <literal>a, b, c</literal> and <literal>d</literal> are global
923             variables. Then we declare and set the global variables.
924         </para>
925         <programlisting role="example">// Another example NOT to follow
926             function [ f , g , ind ] = costfunction ( x , ind )
927             global a b c d
928             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
929             g(1) = 2 * a * ( x(1) - c )
930             g(2) = 2 * b * ( x(2) - d )
931             endfunction
932             x0 = [1 1];
933             global a b c d
934             a = 1.0;
935             b = 2.0;
936             c = 3.0;
937             d = 4.0;
938             [ fopt , xopt ] = optim ( costfunction , x0 , imp = 2 )    
939         </programlisting>
940         <para>While the previous example perfectly works, there is a risk that the
941             same variables are used by some internal function used by
942             <literal>optim</literal>. In this case, the value of the parameters are
943             not what is expected and the optimization can fail or, worse, give a wrong
944             result.
945         </para>
946     </refsection>
947     <refsection>
948         <title>Example : Checking that derivatives are correct</title>
949         <para>Many optimization problem can be avoided if the derivatives are
950             computed correctly. One common reason for failure in the step-length
951             procedure is an error in the calculation of the cost function and its
952             gradient. Incorrect calculation of derivatives is by far the most common
953             user error.
954         </para>
955         <para>In the following example, we give a false implementation of
956             Rosenbrock's gradient. In order to check the computation of the
957             derivatives, we use the <literal>derivative</literal> function. We define
958             the <literal>simplified</literal> function, which delegates the
959             computation of <literal>f</literal> to the rosenbrock function. The
960             <literal>simplified</literal> function is passed as an input argument of
961             the <literal>derivative</literal> function.
962         </para>
963         <programlisting role="example">function [ f , g , index ] = rosenbrock ( x , index )
964             f = 100.0 *(x(2)-x(1)^2)^2 + (1-x(1))^2;
965             // Exact :
966             g(1) = - 400. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
967             // Wrong :
968             g(1) = - 1200. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
969             g(2) = 200. * ( x(2) - x(1)**2 )
970             endfunction
971             
972             function f = simplified ( x )
973             index = 1;
974             [ f , g , index ] = rosenbrock ( x , index )
975             endfunction
976             
977             x0 = [-1.2 1];
978             index = 1;
979             [ f , g , index ] = rosenbrock ( x0 , index );
980             gnd = derivative ( simplified , x0.' );
981             mprintf("Exact derivative:[%s]\n" , strcat ( string(g) , " " ));
982             mprintf("Numerical derivative:[%s]\n" , strcat ( string(gnd) , " " ));    
983         </programlisting>
984         <para>The previous script produces the following output. Obviously, the
985             difference between the two gradient is enormous, which shows that the
986             wrong formula has been used in the gradient.
987         </para>
988         <programlisting role="example">      Exact derivative:[-638 -88]
989             Numerical derivative:[-215.6 -88]   
990         </programlisting>
991     </refsection>
992     <refsection>
993         <title>Example: C function</title>
994         <para>The following is an example with a C function, where a C source code
995             is written into a file, dynamically compiled and loaded into Scilab, and
996             then used by the "optim" solver. The interface of the "rosenc" function is
997             fixed, even if the arguments are not really used in the cost function.
998             This is because the underlying optimization solvers must assume that the
999             objective function has a known, constant interface. In the following
1000             example, the arrays ti and tr are not used, only the array "td" is used,
1001             as a parameter of the Rosenbrock function. Notice that the content of the
1002             arrays ti and td are the same that the content of the Scilab variable, as
1003             expected.
1004         </para>
1005         <programlisting role="example">// External function written in C (C compiler required)
1006             // write down the C code (Rosenbrock problem)
1007             C=['#include &lt;math.h&gt;'
1008             'double sq(double x)'
1009             '{ return x*x;}'
1010             'void rosenc(int *ind, int *n, double *x, double *f, double *g, '
1011             '                                int *ti, float *tr, double *td)'
1012             '{'
1013             '  double p;'
1014             '  int i;'
1015             '  p=td[0];'
1016             '  if (*ind==2||*ind==4) {'
1017             '    *f=1.0;'
1018             '    for (i=1;i&lt;*n;i++)'
1019             '      *f+=p*sq(x[i]-sq(x[i-1]))+sq(1.0-x[i]);'
1020             '  }'
1021             '  if (*ind==3||*ind==4) {'
1022             '    g[0]=-4.0*p*(x[1]-sq(x[0]))*x[0];'
1023             '    for (i=1;i&lt;*n-1;i++)'
1024             '      g[i]=2.0*p*(x[i]-sq(x[i-1]))-4.0*p*(x[i+1]-sq(x[i]))*x[i]-2.0*(1.0-x[i]);'
1025             '    g[*n-1]=2.0*p*(x[*n-1]-sq(x[*n-2]))-2.0*(1.0-x[*n-1]);'
1026             '  }'
1027             '}'];
1028             cd TMPDIR;
1029             mputl(C, TMPDIR+'/rosenc.c')
1030             
1031             // compile the C code
1032             l = ilib_for_link('rosenc', 'rosenc.c', [], 'c');
1033             
1034             // incremental linking
1035             link(l, 'rosenc', 'c')
1036             
1037             //solve the problem
1038             x0 = [40; 10; 50];
1039             p = 100;
1040             [f, xo, go] = optim('rosenc', x0, 'td', p)
1041         </programlisting>
1042     </refsection>
1043     <refsection>
1044         <title>Example: Fortran function</title>
1045         <para>The following is an example with a Fortran function.</para>
1046         <programlisting role="example">// External function written in Fortran (Fortran compiler required)
1047             // write down the Fortran  code (Rosenbrock problem)
1048             F = [ '      subroutine rosenf(ind, n, x, f, g, ti, tr, td)'
1049             '      integer ind,n,ti(*)'
1050             '      double precision x(n),f,g(n),td(*)'
1051             '      real tr(*)'
1052             'c'
1053             '      double precision y,p'
1054             '      p=td(1)'
1055             '      if (ind.eq.2.or.ind.eq.4) then'
1056             '        f=1.0d0'
1057             '        do i=2,n'
1058             '          f=f+p*(x(i)-x(i-1)**2)**2+(1.0d0-x(i))**2'
1059             '        enddo'
1060             '      endif'
1061             '      if (ind.eq.3.or.ind.eq.4) then'
1062             '        g(1)=-4.0d0*p*(x(2)-x(1)**2)*x(1)'
1063             '        if(n.gt.2) then'
1064             '          do i=2,n-1'
1065             '            g(i)=2.0d0*p*(x(i)-x(i-1)**2)-4.0d0*p*(x(i+1)-x(i)**2)*x(i)'
1066             '     &amp;           -2.0d0*(1.0d0-x(i))'
1067             '          enddo'
1068             '        endif'
1069             '        g(n)=2.0d0*p*(x(n)-x(n-1)**2)-2.0d0*(1.0d0-x(n))'
1070             '      endif'
1071             '      return'
1072             '      end'];
1073             cd TMPDIR;
1074             mputl(F, TMPDIR+'/rosenf.f')
1075             
1076             // compile the Fortran code
1077             l = ilib_for_link('rosenf', 'rosenf.f', [], 'f');
1078             
1079             // incremental linking
1080             link(l, 'rosenf', 'f')
1081             
1082             //solve the problem
1083             x0 = [40; 10; 50];
1084             p = 100;
1085             [f, xo, go] = optim('rosenf', x0, 'td', p)    
1086         </programlisting>
1087     </refsection>
1088     <refsection>
1089         <title>Example: Fortran function with initialization</title>
1090         <para>The following is an example with a Fortran function in which the
1091             "in" option is used to allocate memory inside the Scilab environment. In
1092             this mode, there is a dialog between Scilab and the objective function.
1093             The goal of this dialog is to initialize the parameters of the objective
1094             function. Each part of this dialog is based on a specific value of the
1095             "ind" parameter.
1096         </para>
1097         <para>At the beginning, Scilab calls the objective function, with the ind
1098             parameter equals to 10. This tells the objective function to initialize
1099             the sizes of the arrays it needs by setting the nizs, nrzs and ndzs
1100             integer parameters of the "nird" common. Then the objective function
1101             returns. At this point, Scilab creates internal variables and allocate
1102             memory for the variable izs, rzs and dzs. Scilab calls the objective
1103             function back again, this time with ind equals to 11. This tells the
1104             objective function to initialize the arrays izs, rzs and dzs. When the
1105             objective function has done so, it returns. Then Scilab enters in the real
1106             optimization mode and calls the optimization solver the user requested.
1107             Whenever the objective function is called, the izs, rzs and dzs arrays
1108             have the values that have been previously initialized.
1109         </para>
1110         <programlisting role="example">//
1111             // Define a fortran source code and compile it (fortran compiler required)
1112             //
1113             fortransource = ['      subroutine rosenf(ind,n,x,f,g,izs,rzs,dzs)'
1114             'C     -------------------------------------------'
1115             'c     Example of cost function given by a subroutine'
1116             'c     if n&lt;=2 returns ind=0'
1117             'c     f.bonnans, oct 86'
1118             '      implicit double precision (a-h,o-z)'
1119             '      real rzs(1)'
1120             '      double precision dzs(*)'
1121             '      dimension x(n),g(n),izs(*)'
1122             '      common/nird/nizs,nrzs,ndzs'
1123             '      if (n.lt.3) then'
1124             '        ind=0'
1125             '        return'
1126             '      endif'
1127             '      if(ind.eq.10) then'
1128             '         nizs=2'
1129             '         nrzs=1'
1130             '         ndzs=2'
1131             '         return'
1132             '      endif'
1133             '      if(ind.eq.11) then'
1134             '         izs(1)=5'
1135             '         izs(2)=10'
1136             '         dzs(2)=100.0d+0'
1137             '         return'
1138             '      endif'
1139             '      if(ind.eq.2)go to 5'
1140             '      if(ind.eq.3)go to 20'
1141             '      if(ind.eq.4)go to 5'
1142             '      ind=-1'
1143             '      return'
1144             '5     f=1.0d+0'
1145             '      do 10 i=2,n'
1146             '        im1=i-1'
1147             '10      f=f + dzs(2)*(x(i)-x(im1)**2)**2 + (1.0d+0-x(i))**2'
1148             '      if(ind.eq.2)return'
1149             '20    g(1)=-4.0d+0*dzs(2)*(x(2)-x(1)**2)*x(1)'
1150             '      nm1=n-1'
1151             '      do 30 i=2,nm1'
1152             '        im1=i-1'
1153             '        ip1=i+1'
1154             '        g(i)=2.0d+0*dzs(2)*(x(i)-x(im1)**2)'
1155             '30      g(i)=g(i) -4.0d+0*dzs(2)*(x(ip1)-x(i)**2)*x(i) - '
1156             '     &amp;        2.0d+0*(1.0d+0-x(i))'
1157             '      g(n)=2.0d+0*dzs(2)*(x(n)-x(nm1)**2) - 2.0d+0*(1.0d+0-x(n))'
1158             '      return'
1159             '      end'];
1160             cd TMPDIR;
1161             mputl(fortransource, TMPDIR + '/rosenf.f')
1162             
1163             // compile the C code
1164             libpath = ilib_for_link('rosenf', 'rosenf.f', [], 'f');
1165             
1166             // incremental linking
1167             linkid = link(libpath, 'rosenf', 'f');
1168             
1169             x0 = 1.2 * ones(1, 5);
1170             //
1171             // Solve the problem
1172             //
1173             [f, x, g] = optim('rosenf', x0, 'in');    
1174         </programlisting>
1175     </refsection>
1176     <refsection>
1177         <title>Example: Fortran function with initialization on Windows with Intel
1178             Fortran Compiler
1179         </title>
1180         <para>Under the Windows operating system with Intel Fortran Compiler, one
1181             must carefully design the fortran source code so that the dynamic link
1182             works properly. On Scilab's side, the optimization component is
1183             dynamically linked and the symbol "nird" is exported out of the
1184             optimization dll. On the cost function's side, which is also dynamically
1185             linked, the "nird" common must be imported in the cost function
1186             dll.
1187         </para>
1188         <para>The following example is a re-writing of the previous example, with
1189             special attention for the Windows operating system with Intel Fortran
1190             compiler as example. In that case, we introduce additionnal compiling
1191             instructions, which allows the compiler to import the "nird"
1192             symbol.
1193         </para>
1194         <programlisting role="example">fortransource = ['subroutine rosenf(ind,n,x,f,g,izs,rzs,dzs)'
1195             'cDEC$ IF DEFINED (FORDLL)'
1196             'cDEC$ ATTRIBUTES DLLIMPORT:: /nird/'
1197             'cDEC$ ENDIF'
1198             'C     -------------------------------------------'
1199             'c     Example of cost function given by a subroutine'
1200             'c     if n&lt;=2 returns ind=0'
1201             'c     f.bonnans, oct 86'
1202             '      implicit double precision (a-h,o-z)'
1203             [etc...]    
1204         </programlisting>
1205     </refsection>
1206     <refsection role="see also">
1207         <title>See Also</title>
1208         <simplelist type="inline">
1209             <member>
1210                 <link linkend="external">external</link>
1211             </member>
1212             <member>
1213                 <link linkend="qpsolve">qpsolve</link>
1214             </member>
1215             <member>
1216                 <link linkend="datafit">datafit</link>
1217             </member>
1218             <member>
1219                 <link linkend="leastsq">leastsq</link>
1220             </member>
1221             <member>
1222                 <link linkend="numdiff">numdiff</link>
1223             </member>
1224             <member>
1225                 <link linkend="derivative">derivative</link>
1226             </member>
1227             <member>
1228                 <link linkend="NDcost">NDcost</link>
1229             </member>
1230         </simplelist>
1231     </refsection>
1232     <refsection>
1233         <title>References</title>
1234         <para>The following is a map from the various options to the underlying
1235             solvers.
1236         </para>
1237         <variablelist>
1238             <varlistentry>
1239                 <term>"qn" without constraints</term>
1240                 <listitem>
1241                     <para>n1qn1 : a quasi-Newton method with a Wolfe-type line
1242                         search
1243                     </para>
1244                 </listitem>
1245             </varlistentry>
1246             <varlistentry>
1247                 <term>"qn" with bounds constraints</term>
1248                 <listitem>
1249                     <para>qnbd : a quasi-Newton method with projection</para>
1250                     <para>RR-0242 - A variant of a projected variable metric method for
1251                         bound constrained optimization problems, Bonnans Frederic, Rapport
1252                         de recherche de l'INRIA - Rocquencourt, Octobre 1983
1253                     </para>
1254                 </listitem>
1255             </varlistentry>
1256             <varlistentry>
1257                 <term>"gc" without constraints</term>
1258                 <listitem>
1259                     <para>n1qn3 : a Quasi-Newton limited memory method with BFGS.</para>
1260                 </listitem>
1261             </varlistentry>
1262             <varlistentry>
1263                 <term>"gc" with bounds constraints</term>
1264                 <listitem>
1265                     <para>gcbd : a BFGS-type method with limited memory and
1266                         projection
1267                     </para>
1268                 </listitem>
1269             </varlistentry>
1270             <varlistentry>
1271                 <term>"nd" without constraints</term>
1272                 <listitem>
1273                     <para>n1fc1 : a bundle method</para>
1274                 </listitem>
1275             </varlistentry>
1276             <varlistentry>
1277                 <term>"nd" with bounds constraints</term>
1278                 <listitem>
1279                     <para>not available</para>
1280                 </listitem>
1281             </varlistentry>
1282         </variablelist>
1283     </refsection>
1284 </refentry>