3810fd192275777a2652fcd55b1c5b2c2366d8eb
[scilab.git] / scilab / modules / optimization / help / en_US / optim.xml
1 <?xml version="1.0" encoding="UTF-8"?>
2 <!--
3  * Scilab ( http://www.scilab.org/ ) - This file is part of Scilab
4  * Copyright (C) 2008 - INRIA
5  * Copyright (C) 2008 - 2009 - INRIA - Michael Baudin
6  * Copyright (C) 2010 - 2011 - DIGITEO - Michael Baudin
7  *
8  * This file must be used under the terms of the CeCILL.
9  * This source file is licensed as described in the file COPYING, which
10  * you should have received as part of this distribution.  The terms
11  * are also available at
12  * http://www.cecill.info/licences/Licence_CeCILL_V2.1-en.txt
13  *
14  -->
15 <refentry xmlns="http://docbook.org/ns/docbook" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:svg="http://www.w3.org/2000/svg" xmlns:ns3="http://www.w3.org/1999/xhtml" xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:db="http://docbook.org/ns/docbook" xmlns:scilab="http://www.scilab.org"  xml:id="optim" xml:lang="en">
16     <refnamediv>
17         <refname>optim</refname>
18         <refpurpose>non-linear optimization routine</refpurpose>
19     </refnamediv>
20     <refsynopsisdiv>
21         <title>Calling Sequence</title>
22         <synopsis>
23             fopt = optim(costf, x0)
24             fopt = optim(costf [,&lt;contr&gt;],x0 [,algo] [,df0 [,mem]] [,work] [,&lt;stop&gt;] [,&lt;params&gt;] [,imp=iflag])
25             [fopt, xopt] = optim(...)
26             [fopt, xopt, gopt] = optim(...)
27             [fopt, xopt, gopt, work] = optim(...)
28             [fopt, xopt, gopt, work, iters] = optim(...)
29             [fopt, xopt, gopt, work, iters, evals] = optim(...)
30             [fopt, xopt, gopt, work, iters, evals, err] = optim(...)
31         </synopsis>
32     </refsynopsisdiv>
33     <refsection>
34         <title>Arguments</title>
35         <variablelist>
36             <varlistentry>
37                 <term>costf</term>
38                 <listitem>
39                     <para>a function, a list or a string, the objective function.</para>
40                 </listitem>
41             </varlistentry>
42             <varlistentry>
43                 <term>x0</term>
44                 <listitem>
45                     <para>real vector, the initial guess for
46                         <literal>x</literal>.
47                     </para>
48                 </listitem>
49             </varlistentry>
50             <varlistentry>
51                 <term>&lt;contr&gt;</term>
52                 <listitem>
53                     <para>an optional sequence of arguments containing the lower and
54                         upper bounds on <literal>x</literal>. If bounds are required, this
55                         sequence of arguments must be <literal>"b",binf,bsup</literal> where
56                         <literal>binf</literal> and <literal>bsup</literal> are real vectors
57                         with same dimension as <literal>x0</literal>.
58                     </para>
59                 </listitem>
60             </varlistentry>
61             <varlistentry>
62                 <term>algo</term>
63                 <listitem>
64                     <para>a string, the algorithm to use (default
65                         <literal>algo="qn"</literal>).
66                     </para>
67                     <para>The available algorithms are:</para>
68                     <itemizedlist>
69                         <listitem>
70                             <para>
71                                 <literal>"qn"</literal>: Quasi-Newton with BFGS
72                             </para>
73                         </listitem>
74                         <listitem>
75                             <para>
76                                 <literal>"gc"</literal>: limited memory BFGS
77                             </para>
78                         </listitem>
79                         <listitem>
80                             <para>
81                                 <literal>"nd"</literal>: non-differentiable.
82                             </para>
83                             <para>
84                                 The <literal>"nd"</literal> algorithm does not accept
85                                 bounds on <literal>x</literal>.
86                             </para>
87                         </listitem>
88                     </itemizedlist>
89                 </listitem>
90             </varlistentry>
91             <varlistentry>
92                 <term>df0</term>
93                 <listitem>
94                     <para>
95                         real scalar, a guess of the decreasing of <literal>f</literal>
96                         at first iteration. (default <literal>df0=1</literal>).
97                     </para>
98                 </listitem>
99             </varlistentry>
100             <varlistentry>
101                 <term>mem</term>
102                 <listitem>
103                     <para>integer, the number of variables used to approximate the
104                         Hessian (default <literal>mem=10</literal>). This feature is
105                         available for the <literal>"gc"</literal> algorithm without
106                         constraints and the non-smooth algorithm <literal>"nd"</literal>
107                         without constraints.
108                     </para>
109                 </listitem>
110             </varlistentry>
111             <varlistentry>
112                 <term>&lt;stop&gt;</term>
113                 <listitem>
114                     <para>a sequence of arguments containing the parameters controlling
115                         the convergence of the algorithm. The following sequences are
116                         available: <screen>              "ar",nap
117                             "ar",nap,iter
118                             "ar",nap,iter,epsg
119                             "ar",nap,iter,epsg,epsf
120                             "ar",nap,iter,epsg,epsf,epsx
121                         </screen>
122                     </para>
123                     <para>where:</para>
124                     <variablelist>
125                         <varlistentry>
126                             <term>nap</term>
127                             <listitem>
128                                 <para>
129                                     maximum number of calls to <literal>costf</literal>
130                                     allowed (default <literal>nap=100</literal>).
131                                 </para>
132                             </listitem>
133                         </varlistentry>
134                         <varlistentry>
135                             <term>iter</term>
136                             <listitem>
137                                 <para>maximum number of iterations allowed (default
138                                     <literal>iter=100</literal>).
139                                 </para>
140                             </listitem>
141                         </varlistentry>
142                         <varlistentry>
143                             <term>epsg</term>
144                             <listitem>
145                                 <para>threshold on gradient norm (default
146                                     <literal>epsg= %eps</literal>).
147                                 </para>
148                             </listitem>
149                         </varlistentry>
150                         <varlistentry>
151                             <term>epsf</term>
152                             <listitem>
153                                 <para>
154                                     threshold controlling decreasing of <literal>f</literal>
155                                     (default <literal>epsf=0</literal>).
156                                 </para>
157                             </listitem>
158                         </varlistentry>
159                         <varlistentry>
160                             <term>epsx</term>
161                             <listitem>
162                                 <para>
163                                     threshold controlling variation of <literal>x</literal>
164                                     (default <literal>epsx=0</literal>). This vector (possibly
165                                     matrix) with same size as <literal>x0</literal> can be used to
166                                     scale <literal>x</literal>.
167                                 </para>
168                             </listitem>
169                         </varlistentry>
170                     </variablelist>
171                 </listitem>
172             </varlistentry>
173             <varlistentry>
174                 <term>&lt;params&gt;</term>
175                 <listitem>
176                     <para>in the case where the objective function is a C or Fortran
177                         routine, a sequence of arguments containing the method to
178                         communicate with the objective function. This option has no meaning
179                         when the cost function is a Scilab script.
180                     </para>
181                     <para>The available values for &lt;params&gt; are the
182                         following.
183                     </para>
184                     <itemizedlist>
185                         <listitem>
186                             <para>
187                                 <literal>"in"</literal>
188                             </para>
189                             <para>That mode allows to allocate memory in the internal Scilab
190                                 workspace so that the objective function can get arrays with the
191                                 required size, but without directly allocating the memory. The
192                                 <literal>"in"</literal> value stands for "initialization". In
193                                 that mode, before the value and derivative of the objective
194                                 function is to be computed, there is a dialog between the
195                                 <literal>optim</literal> Scilab primitive and the objective
196                                 function <literal>costf</literal>. In this dialog, the objective
197                                 function is called two times, with particular values of the
198                                 <literal>ind</literal> parameter. The first time,
199                                 <literal>ind</literal> is set to 10 and the objective function
200                                 is expected to set the <literal>nizs</literal>,
201                                 <literal>nrzs</literal> and <literal>ndzs</literal> integer
202                                 parameters of the <literal>nird</literal> common, which is
203                                 defined as:
204                             </para>
205                             <screen>common /nird/ nizs,nrzs,ndzs    </screen>
206                             <para>This allows Scilab to allocate memory inside its internal
207                                 workspace. The second time the objective function is called,
208                                 <literal>ind</literal> is set to 11 and the objective function
209                                 is expected to set the <literal>ti</literal>,
210                                 <literal>tr</literal> and <literal>tz</literal> arrays. After
211                                 this initialization phase, each time it is called, the objective
212                                 function is ensured that the <literal>ti</literal>,
213                                 <literal>tr</literal> and <literal>tz</literal> arrays which are
214                                 passed to it have the values that have been previously
215                                 initialized.
216                             </para>
217                         </listitem>
218                         <listitem>
219                             <para>
220                                 <literal>"ti",valti</literal>
221                             </para>
222                             <para>
223                                 In this mode, <literal>valti</literal> is expected to be a
224                                 Scilab vector variable containing integers. Whenever the
225                                 objective function is called, the <literal>ti</literal> array it
226                                 receives contains the values of the Scilab variable.
227                             </para>
228                         </listitem>
229                         <listitem>
230                             <para>
231                                 <literal>"td", valtd</literal>
232                             </para>
233                             <para>
234                                 In this mode, <literal>valtd</literal> is expected to be a
235                                 Scilab vector variable containing double values. Whenever the
236                                 objective function is called, the <literal>td</literal> array it
237                                 receives contains the values of the Scilab variable.
238                             </para>
239                         </listitem>
240                         <listitem>
241                             <para>
242                                 <literal>"ti",valti,"td",valtd</literal>
243                             </para>
244                             <para>This mode combines the two previous modes.</para>
245                         </listitem>
246                     </itemizedlist>
247                     <para>
248                         The <literal>ti, td</literal> arrays may be used so that the
249                         objective function can be computed. For example, if the objective
250                         function is a polynomial, the ti array may may be used to store the
251                         coefficients of that polynomial.
252                     </para>
253                     <para>Users should choose carefully between the
254                         <literal>"in"</literal> mode and the <literal>"ti"</literal> and
255                         <literal>"td"</literal> mode, depending on the fact that the arrays
256                         are Scilab variables or not. If the data is available as Scilab
257                         variables, then the <literal>"ti", valti, "td", valtd</literal> mode
258                         should be chosen. If the data is available directly from the
259                         objective function, the <literal>"in"</literal> mode should be
260                         chosen. Notice that there is no <literal>"tr"</literal> mode, since,
261                         in Scilab, all real values are doubles.
262                     </para>
263                     <para>If neither the "in" mode, nor the "ti", "td" mode is chosen,
264                         that is, if &lt;params&gt; is not present as an option of the optim
265                         primitive, the user may should not assume that the ti,tr and td
266                         arrays can be used : reading or writing the arrays may generate
267                         unpredictable results.
268                     </para>
269                 </listitem>
270             </varlistentry>
271             <varlistentry>
272                 <term>"imp=iflag"</term>
273                 <listitem>
274                     <para>named argument used to set the trace mode (default
275                         <literal>imp=0</literal>, which prints no messages). If <varname>imp</varname>
276                         is greater or equal to 1, more information are printed, depending on the
277                         algorithm chosen. More precisely:
278                     </para>
279                     <itemizedlist>
280                         <listitem>
281                             <para>
282                                 <literal>"qn"</literal> without constraints: from <literal>iflag=1</literal>
283                                 to <literal>iflag=3</literal>.
284                             </para>
285                             <itemizedlist>
286                                 <listitem>
287                                     <para>
288                                         <literal>iflag>=1</literal>: initial and final print,
289                                     </para>
290                                 </listitem>
291                                 <listitem>
292                                     <para>
293                                         <literal>iflag>=2</literal>: one line per iteration (number of iterations,
294                                         number of calls to f, value of f),
295                                     </para>
296                                 </listitem>
297                                 <listitem>
298                                     <para>
299                                         <literal>iflag>=3</literal>: extra information on line searches.
300                                     </para>
301                                 </listitem>
302                             </itemizedlist>
303                         </listitem>
304                         <listitem>
305                             <para>
306                                 <literal>"qn"</literal> with bounds constraints: from <literal>iflag=1</literal>
307                                 to <literal>iflag=4</literal>.
308                             </para>
309                             <itemizedlist>
310                                 <listitem>
311                                     <para>
312                                         <literal>iflag>=1</literal>: initial and final print,
313                                     </para>
314                                 </listitem>
315                                 <listitem>
316                                     <para>
317                                         <literal>iflag>=2</literal>: one line per iteration (number of iterations,
318                                         number of calls to f, value of f),
319                                     </para>
320                                 </listitem>
321                                 <listitem>
322                                     <para>
323                                         <literal>iflag>=3</literal>: extra information on line searches.
324                                     </para>
325                                 </listitem>
326                             </itemizedlist>
327                         </listitem>
328                         <listitem>
329                             <para>
330                                 <literal>"gc"</literal> without constraints: from <literal>iflag=1</literal>
331                                 to <literal>iflag=5</literal>.
332                             </para>
333                             <itemizedlist>
334                                 <listitem>
335                                     <para>
336                                         <literal>iflag>=1</literal> and <literal>iflag>=2</literal>: initial and final print,
337                                     </para>
338                                 </listitem>
339                                 <listitem>
340                                     <para>
341                                         <literal>iflag=3</literal>: one line per iteration (number of iterations,
342                                         number of calls to f, value of f),
343                                     </para>
344                                 </listitem>
345                                 <listitem>
346                                     <para>
347                                         <literal>iflag>=4</literal>: extra information on lines searches.
348                                     </para>
349                                 </listitem>
350                             </itemizedlist>
351                         </listitem>
352                         <listitem>
353                             <para>
354                                 <literal>"gc"</literal> with bounds constraints: from <literal>iflag=1</literal>
355                                 to <literal>iflag=3</literal>.
356                             </para>
357                             <itemizedlist>
358                                 <listitem>
359                                     <para>
360                                         <literal>iflag>=1</literal>: initial and final print,
361                                     </para>
362                                 </listitem>
363                                 <listitem>
364                                     <para>
365                                         <literal>iflag>=2</literal>: one print per iteration,
366                                     </para>
367                                 </listitem>
368                                 <listitem>
369                                     <para>
370                                         <literal>iflag=3</literal>: extra information.
371                                     </para>
372                                 </listitem>
373                             </itemizedlist>
374                         </listitem>
375                         <listitem>
376                             <para>
377                                 <literal>"nd"</literal> with bounds constraints: from <literal>iflag=1</literal>
378                                 to <literal>iflag=8</literal>.
379                             </para>
380                             <itemizedlist>
381                                 <listitem>
382                                     <para>
383                                         <literal>iflag>=1</literal>: initial and final print,
384                                     </para>
385                                 </listitem>
386                                 <listitem>
387                                     <para>
388                                         <literal>iflag>=2</literal>: one print on each convergence,
389                                     </para>
390                                 </listitem>
391                                 <listitem>
392                                     <para>
393                                         <literal>iflag>=3</literal>: one print per iteration,
394                                     </para>
395                                 </listitem>
396                                 <listitem>
397                                     <para>
398                                         <literal>iflag>=4</literal>: line search,
399                                     </para>
400                                 </listitem>
401                                 <listitem>
402                                     <para>
403                                         <literal>iflag>=5</literal>: various tolerances,
404                                     </para>
405                                 </listitem>
406                                 <listitem>
407                                     <para>
408                                         <literal>iflag>=6</literal>: weight and information on the computation of direction.
409                                     </para>
410                                 </listitem>
411                             </itemizedlist>
412                         </listitem>
413                     </itemizedlist>
414                 </listitem>
415             </varlistentry>
416             <varlistentry>
417                 <term>fopt</term>
418                 <listitem>
419                     <para>the value of the objective function at the point
420                         <literal>xopt</literal>
421                     </para>
422                 </listitem>
423             </varlistentry>
424             <varlistentry>
425                 <term>xopt</term>
426                 <listitem>
427                     <para>
428                         best value of <literal>x</literal> found.
429                     </para>
430                 </listitem>
431             </varlistentry>
432             <varlistentry>
433                 <term>gopt</term>
434                 <listitem>
435                     <para>the gradient of the objective function at the point
436                         <literal>xopt</literal>
437                     </para>
438                 </listitem>
439             </varlistentry>
440             <varlistentry>
441                 <term>work</term>
442                 <listitem>
443                     <para>working array for hot restart for quasi-Newton method. This
444                         array is automatically initialized by <literal>optim</literal> when
445                         <literal>optim</literal> is invoked. It can be used as input
446                         parameter to speed-up the calculations.
447                     </para>
448                 </listitem>
449             </varlistentry>
450             <varlistentry>
451                 <term>iters</term>
452                 <listitem>
453                     <para>
454                         scalar, the number of iterations that is displayed when <literal>imp=2</literal>.
455                     </para>
456                 </listitem>
457             </varlistentry>
458             <varlistentry>
459                 <term>evals</term>
460                 <listitem>
461                     <para>
462                         scalar, the number of <literal>cost</literal> function evaluations
463                         that is displayed when <literal>imp=2</literal>.
464                     </para>
465                 </listitem>
466             </varlistentry>
467             <varlistentry>
468                 <term>err</term>
469                 <listitem>
470                     <para>
471                         scalar, a termination indicator.
472                         The success flag is <literal>9</literal>.
473                         <literal>err=1</literal>: Norm of projected gradient lower than...
474                         <literal>err=2</literal>: At last iteration f decreases by less than...
475                         <literal>err=3</literal>: Optimization stops because of too small variations for x.
476                         <literal>err=4</literal>: Optim stops: maximum number of calls to f is reached.
477                         <literal>err=5</literal>: Optim stops: maximum number of iterations is reached.
478                         <literal>err=6</literal>: Optim stops: too small variations in gradient direction.
479                         <literal>err=7</literal>: Stop during calculation of descent direction.
480                         <literal>err=8</literal>: Stop during calculation of estimated hessian.
481                         <literal>err=9</literal>: End of optimization, successful completion.
482                         <literal>err=10</literal>: End of optimization (linear search fails).
483                     </para>
484                 </listitem>
485             </varlistentry>
486         </variablelist>
487     </refsection>
488     <refsection>
489         <title>Description</title>
490         <para>This function solves unconstrained nonlinear optimization
491             problems:
492         </para>
493         <screen>min f(x)      </screen>
494         <para>
495             where <literal>x</literal> is a vector and <literal>f(x)</literal>
496             is a function that returns a scalar. This function can also solve bound
497             constrained nonlinear optimization problems:
498         </para>
499         <screen>min f(x)
500             binf &lt;= x &lt;= bsup
501         </screen>
502         <para>
503             where <literal>binf</literal> is the lower bound and
504             <literal>bsup</literal> is the upper bound on <literal>x</literal>.
505         </para>
506         <para>
507             The <literal>costf</literal> argument can be a Scilab function, a
508             list or a string giving the name of a C or Fortran routine (see
509             "external"). This external must return the value <literal>f</literal> of
510             the cost function at the point <literal>x</literal> and the gradient
511             <literal>g</literal> of the cost function at the point
512             <literal>x</literal>.
513         </para>
514         <variablelist>
515             <varlistentry>
516                 <term>Scilab function case</term>
517                 <listitem>
518                     <para>
519                         If <literal>costf</literal> is a Scilab function, its calling
520                         sequence must be:
521                     </para>
522                     <screen>[f, g, ind] = costf(x, ind)      </screen>
523                     <para>
524                         where <literal>x</literal> is the current point,
525                         <literal>ind</literal> is an integer flag described below,
526                         <literal>f</literal> is the real value of the objective function at
527                         the point <literal>x</literal> and <literal>g</literal> is a vector
528                         containing the gradient of the objective function at
529                         <literal>x</literal>. The variable <literal>ind</literal> is
530                         described below.
531                     </para>
532                 </listitem>
533             </varlistentry>
534             <varlistentry>
535                 <term>List case</term>
536                 <listitem>
537                     <para>It may happen that objective function requires extra
538                         arguments. In this case, we can use the following feature. The
539                         <literal>costf</literal> argument can be the list
540                         <literal>(real_costf, arg1,...,argn)</literal>. In this case,
541                         <literal>real_costf</literal>, the first element in the list, must
542                         be a Scilab function with calling sequence: <screen>        [f,g,ind]=real_costf(x,ind,arg1,...,argn)      </screen>
543                         The <literal>x</literal>, <literal>f</literal>,
544                         <literal>g</literal>, <literal>ind</literal> arguments have the same
545                         meaning as before. In this case, each time the objective function is
546                         called back, the arguments <literal>arg1,...,argn</literal> are
547                         automatically appended at the end of the calling sequence of
548                         <literal>real_costf</literal>.
549                     </para>
550                 </listitem>
551             </varlistentry>
552             <varlistentry>
553                 <term>String case</term>
554                 <listitem>
555                     <para>
556                         If <literal>costf</literal> is a string, it refers to the name
557                         of a C or Fortran routine which must be linked to Scilab
558                     </para>
559                     <variablelist>
560                         <varlistentry>
561                             <term>Fortran case</term>
562                             <listitem>
563                                 <para>The calling sequence of the Fortran subroutine computing
564                                     the objective must be:
565                                 </para>
566                                 <screen>subroutine costf(ind,n,x,f,g,ti,tr,td)      </screen>
567                                 <para>with the following declarations:</para>
568                                 <screen>integer ind,n ti(*)
569                                     double precision x(n),f,g(n),td(*)
570                                     real tr(*)
571                                 </screen>
572                                 <para>
573                                     The argument <literal>ind</literal> is described
574                                     below.
575                                 </para>
576                                 <para>If ind = 2, 3 or 4, the inputs of the routine are :
577                                     <literal>x, ind, n, ti, tr,td</literal>.
578                                 </para>
579                                 <para>If ind = 2, 3 or 4, the outputs of the routine are :
580                                     <literal>f</literal> and <literal>g</literal>.
581                                 </para>
582                             </listitem>
583                         </varlistentry>
584                         <varlistentry>
585                             <term>C case</term>
586                             <listitem>
587                                 <para>The calling sequence of the C function computing the
588                                     objective must be:
589                                 </para>
590                                 <screen>void costf(int *ind, int *n, double *x, double *f, double *g, int *ti, float *tr, double *td)      </screen>
591                                 <para>
592                                     The argument <literal>ind</literal> is described
593                                     below.
594                                 </para>
595                                 <para>The inputs and outputs of the function are the same as
596                                     in the fortran case.
597                                 </para>
598                             </listitem>
599                         </varlistentry>
600                     </variablelist>
601                 </listitem>
602             </varlistentry>
603         </variablelist>
604         <para>
605             On output, <literal>ind&lt;0</literal> means that
606             <literal>f</literal> cannot be evaluated at <literal>x</literal> and
607             <literal>ind=0</literal> interrupts the optimization.
608         </para>
609     </refsection>
610     <refsection>
611         <title>Termination criteria</title>
612         <para>Each algorithm has its own termination criteria, which may use the
613             parameters given by the user, that is <literal>nap</literal>,
614             <literal>iter</literal>, <literal>epsg</literal>, <literal>epsf</literal>
615             and <literal>epsx</literal>. Not all the parameters are taken into
616             account. In the table below, we present the specific termination
617             parameters which are taken into account by each algorithm. The
618             unconstrained solver is identified by "UNC" while the bound constrained
619             solver is identified by "BND". An empty entry means that the parameter is
620             ignored by the algorithm.
621         </para>
622         <para>
623             <informaltable border="1">
624                 <tr>
625                     <td>Solver</td>
626                     <td>nap</td>
627                     <td>iter</td>
628                     <td>epsg</td>
629                     <td>epsf</td>
630                     <td>epsx</td>
631                 </tr>
632                 <tr>
633                     <td>optim/"qn" UNC</td>
634                     <td>X</td>
635                     <td>X</td>
636                     <td>X</td>
637                     <td/>
638                     <td/>
639                 </tr>
640                 <tr>
641                     <td>optim/"qn" BND</td>
642                     <td>X</td>
643                     <td>X</td>
644                     <td>X</td>
645                     <td>X</td>
646                     <td>X</td>
647                 </tr>
648                 <tr>
649                     <td>optim/"gc" UNC</td>
650                     <td>X</td>
651                     <td>X</td>
652                     <td>X</td>
653                     <td/>
654                     <td/>
655                 </tr>
656                 <tr>
657                     <td>optim/"gc" BND</td>
658                     <td>X</td>
659                     <td>X</td>
660                     <td>X</td>
661                     <td>X</td>
662                     <td>X</td>
663                 </tr>
664                 <tr>
665                     <td>optim/"nd" UNC</td>
666                     <td>X</td>
667                     <td>X</td>
668                     <td/>
669                     <td>X</td>
670                     <td>X</td>
671                 </tr>
672             </informaltable>
673         </para>
674     </refsection>
675     <refsection>
676         <title>Example: Scilab function</title>
677         <para>The following is an example with a Scilab function. Notice, for
678             simplifications reasons, the Scilab function "cost" of the following
679             example computes the objective function f and its derivative no matter of
680             the value of ind. This allows to keep the example simple. In practical
681             situations though, the computation of "f" and "g" may raise performances
682             issues so that a direct optimization may be to use the value of "ind" to
683             compute "f" and "g" only when needed.
684         </para>
685         <programlisting role="example">function [f, g, ind] = cost(x, ind)
686             xref = [1; 2; 3];
687             f = 0.5 * norm(x - xref)^2;
688             g = x - xref;
689             endfunction
690             
691             // Simplest call
692             x0 = [1; -1; 1];
693             [fopt, xopt] = optim(cost, x0)
694             
695             // Use "gc" algorithm
696             [fopt, xopt, gopt] = optim(cost, x0, "gc")
697             
698             // Use "nd" algorithm
699             [fopt, xopt, gopt] = optim(cost, x0, "nd")
700             
701             // Upper and lower bounds on x
702             [fopt, xopt, gopt] = optim(cost, "b", [-1;0;2], [0.5;1;4], x0)
703             
704             // Upper and lower bounds on x and setting up the algorithm to "gc"
705             [fopt, xopt, gopt] = optim(cost, "b", [-1; 0; 2], [0.5; 1; 4], x0, "gc")
706             
707             // Bound on the number of call to the objective function
708             [fopt, xopt, gopt] = optim(cost, "b", [-1; 0; 2], [0.5; 1; 4], x0, "gc", "ar", 3)
709             
710             // Set max number of call to the objective function (3)
711             // Set max number of iterations (100)
712             // Set stopping threshold on the value of f (1e-6),
713             // on the value of the norm of the gradient of the objective function (1e-6)
714             // on the improvement on the parameters x_opt (1e-6;1e-6;1e-6)
715             [fopt, xopt, gopt] = optim(cost, "b", [-1; 0; 2], [0.5; 1; 4], x0, "gc", "ar", 3, 100, 1e-6, 1e-6, [1e-3; 1e-3; 1e-3])
716             
717             // Additionnal messages are printed in the console.
718             [fopt, xopt] = optim(cost, x0, imp = 3)
719         </programlisting>
720     </refsection>
721     <refsection>
722         <title>Example: Print messages</title>
723         <para>
724             The <literal>imp</literal> flag may take negative integer values,
725             say k. In that case, the cost function is called once every -k iterations.
726             This allows to draw the function value or write a log file.
727         </para>
728         <para>
729             This feature is available only with the <literal>"qn"</literal>
730             algorithm without constraints.
731         </para>
732         <para>In the following example, we solve the Rosenbrock test case. For
733             each iteration of the algorithm, we print the value of x, f and g.
734         </para>
735         <programlisting role="example">function [f, g, ind] = cost(x, ind)
736             xref = [1; 2; 3];
737             f = 0.5 * norm(x - xref)^2;
738             g = x - xref;
739             if (ind == 1) then
740             mprintf("f(x) = %s, |g(x)|=%s\n", string(f), string(norm(g)))
741             end
742             endfunction
743             
744             x0 = [1; -1; 1];
745             [fopt, xopt] = optim(cost, x0, imp = -1)
746         </programlisting>
747         <para>The previous script produces the following output.</para>
748         <screen>--&gt;[fopt, xopt] = optim(cost, x0, imp = -1)
749             f(x) = 6.5, |g(x)|=3.6055513
750             f(x) = 2.8888889, |g(x)|=2.4037009
751             f(x) = 9.861D-31, |g(x)|=1.404D-15
752             f(x) = 0, |g(x)|=0
753             Norm of projected gradient lower than   0.0000000D+00.
754             xopt  =
755             1.
756             2.
757             3.
758             fopt  =
759             0.
760         </screen>
761         <para>In the following example, we solve the Rosenbrock test case. For
762             each iteration of the algorithm, we plot the current value of x into a 2D
763             graph containing the contours of Rosenbrock's function. This allows to see
764             the progress of the algorithm while the algorithm is performing. We could
765             as well write the value of x, f and g into a log file if needed.
766         </para>
767         <programlisting role="example">// 1. Define Rosenbrock for optimization
768             function [f , g , ind] = rosenbrock (x , ind)
769             f = 100.0 *(x(2) - x(1)^2)^2 + (1 - x(1))^2;
770             g(1) = - 400. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
771             g(2) = 200. * ( x(2) - x(1)**2 )
772             endfunction
773             
774             // 2. Define rosenbrock for contouring
775             function f = rosenbrockC ( x1 , x2 )
776             x = [x1 x2]
777             ind = 4
778             [ f , g , ind ] = rosenbrock ( x , ind )
779             endfunction
780             
781             // 3. Define Rosenbrock for plotting
782             function [ f , g , ind ] = rosenbrockPlot ( x , ind )
783             [ f , g , ind ] = rosenbrock ( x , ind )
784             if (ind == 1) then
785             plot ( x(1) , x(2) , "g." )
786             end
787             endfunction
788             
789             // 4. Draw the contour of Rosenbrock's function
790             x0 = [-1.2 1.0];
791             xopt = [1.0 1.0];
792             xdata = linspace(-2,2,100);
793             ydata = linspace(-2,2,100);
794             contour ( xdata , ydata , rosenbrockC , [1 10 100 500 1000])
795             plot(x0(1) , x0(2) , "b.")
796             plot(xopt(1) , xopt(2) , "r*")
797             
798             // 5. Plot the optimization process, during optimization
799             [fopt, xopt] = optim ( rosenbrockPlot , x0 , imp = -1)
800         </programlisting>
801         <scilab:image>
802             function [f, g, ind]=rosenbrock(x, ind)
803             f = 100.0 *(x(2) - x(1)^2)^2 + (1 - x(1))^2;
804             g(1) = - 400. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
805             g(2) = 200. * ( x(2) - x(1)**2 )
806             endfunction
807             
808             function f=rosenbrockC(x1, x2)
809             x = [x1 x2]
810             ind = 4
811             [ f , g , ind ] = rosenbrock ( x , ind )
812             endfunction
813             
814             function [f, g, ind]=rosenbrockPlot(x, ind)
815             [ f , g , ind ] = rosenbrock ( x , ind )
816             if (ind == 1) then
817             plot ( x(1) , x(2) , "g." )
818             end
819             endfunction
820             
821             x0 = [-1.2 1.0];
822             xopt = [1.0 1.0];
823             xdata = linspace(-2,2,100);
824             ydata = linspace(-2,2,100);
825             contour ( xdata , ydata , rosenbrockC , [1 10 100 500 1000])
826             plot(x0(1) , x0(2) , "b.")
827             plot(xopt(1) , xopt(2) , "r*")
828             [fopt, xopt] = optim ( rosenbrockPlot , x0 , imp = -1)
829         </scilab:image>
830         
831     </refsection>
832     <refsection>
833         <title>Example: Optimizing with numerical derivatives</title>
834         <para>It is possible to optimize a problem without an explicit knowledge
835             of the derivative of the cost function. For this purpose, we can use the
836             numdiff or derivative function to compute a numerical derivative of the
837             cost function.
838         </para>
839         <para>In the following example, we use the numdiff function to solve
840             Rosenbrock's problem.
841         </para>
842         <programlisting role="example">function f = rosenbrock ( x )
843             f = 100.0 *(x(2)-x(1)^2)^2 + (1-x(1))^2;
844             endfunction
845             
846             function [ f , g , ind ] = rosenbrockCost ( x , ind )
847             f = rosenbrock ( x );
848             g= numdiff ( rosenbrock , x );
849             endfunction
850             
851             x0 = [-1.2 1.0];
852             
853             [ fopt , xopt ] = optim ( rosenbrockCost , x0 )
854         </programlisting>
855         <para>In the following example, we use the derivative function to solve
856             Rosenbrock's problem. Given that the step computation strategy is not the
857             same in numdiff and derivative, this might lead to improved
858             results.
859         </para>
860         <programlisting role="example">function f = rosenbrock ( x )
861             f = 100.0 *(x(2)-x(1)^2)^2 + (1-x(1))^2;
862             endfunction
863             
864             function [ f , g , ind ] = rosenbrockCost2 ( x , ind )
865             f = rosenbrock ( x );
866             g = derivative ( rosenbrock , x.' , order = 4 );
867             endfunction
868             
869             x0 = [-1.2 1.0];
870             [fopt , xopt] = optim ( rosenbrockCost2 , x0 )
871         </programlisting>
872     </refsection>
873     <refsection>
874         <title>Example: Counting function evaluations and number of
875             iterations
876         </title>
877         <para>
878             The <literal>imp</literal> option can take negative values. If the
879             <literal>imp</literal> is equal to <literal>m</literal> where
880             <literal>m</literal> is a negative integer, then the cost function is
881             evaluated every -<literal>m</literal> iterations, with the
882             <literal>ind</literal> input argument equal to 1. The following example
883             uses this feature to compute the number of iterations. The global variable
884             <literal>mydata</literal> is used to store the number of function
885             evaluations as well as the number of iterations.
886         </para>
887         <programlisting role="example">
888             function [f, g, ind] = cost(x, ind)
889             global _MYDATA_
890             if ( ind == 1 )
891             _MYDATA_.niter = _MYDATA_.niter + 1;
892             else
893             _MYDATA_.nfevals = _MYDATA_.nfevals + 1;
894             end
895             xref = [1; 2; 3];
896             if ( ind == 2 | ind == 4 ) then
897             f = 0.5*norm(x-xref)^2;
898             else
899             f = 0;
900             end
901             if ( ind == 3 | ind == 4 ) then
902             g = x-xref;
903             else
904             g = zeros(3, 1);
905             end
906             endfunction
907             x0 = [1; -1; 1];
908             global _MYDATA_
909             _MYDATA_ = tlist ( ["MYDATA", "niter", "nfevals"]);
910             _MYDATA_.niter = 0;
911             _MYDATA_.nfevals = 0;
912             [f, xopt] = optim(cost, x0, imp=-1);
913             mprintf ( "Number of function evaluations: %d\n", _MYDATA_.nfevals );
914             mprintf ( "Number of iterations: %d\n", _MYDATA_.niter );
915         </programlisting>
916         <para>While the previous example perfectly works, there is a risk that the
917             same variable <literal>_MYDATA_</literal> is used by some internal
918             function used by <literal>optim</literal>. In this case, the value may be
919             wrong. This is why a sufficiently weird variable name has been
920             used.
921         </para>
922     </refsection>
923     <refsection>
924         <title>Example : Passing extra parameters</title>
925         <para>In most practical situations, the cost function depends on extra
926             parameters which are required to evaluate the cost function. There are
927             several methods to achieve this goal.
928         </para>
929         <para>In the following example, the cost function uses 4 parameters
930             <literal>a, b, c</literal> and <literal>d</literal>. We define the cost
931             function with additionnal input arguments, which are declared after the
932             index argument. Then we pass a list as the first input argument of the
933             <literal>optim</literal> solver. The first element of the list is the cost
934             function. The additionnal variables are directly passed to the cost
935             function.
936         </para>
937         <programlisting role="example">function [ f , g , ind ] = costfunction ( x , ind , a , b , c , d )
938             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
939             g(1) = 2 * a * ( x(1) - c )
940             g(2) = 2 * b * ( x(2) - d )
941             endfunction
942             
943             x0 = [1 1];
944             a = 1.0;
945             b = 2.0;
946             c = 3.0;
947             d = 4.0;
948             costf = list ( costfunction , a , b , c, d );
949             [fopt , xopt] = optim ( costf , x0 , imp = 2)
950         </programlisting>
951         <para>In complex cases, the cost function may have so many parameters,
952             that having a function which takes all arguments as inputs is not
953             convenient. For example, consider the situation where the cost function
954             needs 12 parameters. Then, designing a function with 14 input arguments
955             (x, index and the 12 parameters) is difficult to manage. Instead, we can
956             use a more complex data structure to store our data. In the following
957             example, we use a tlist to store the 4 input arguments. This method can
958             easily be expanded to an arbitrary number of parameters.
959         </para>
960         <programlisting role="example">function [f , g , ind] = costfunction ( x , ind , parameters)
961             // Get the parameters
962             a = parameters.a
963             b = parameters.b
964             c = parameters.c
965             d = parameters.d
966             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
967             g(1) = 2 * a * ( x(1) - c )
968             g(2) = 2 * b * ( x(2) - d )
969             endfunction
970             
971             x0 = [1 1];
972             a = 1.0;
973             b = 2.0;
974             c = 3.0;
975             d = 4.0;
976             // Store the parameters
977             parameters = tlist ( [
978             "T_MYPARAMS"
979             "a"
980             "b"
981             "c"
982             "d"
983             ]);
984             
985             parameters.a = a;
986             parameters.b = b;
987             parameters.c = c;
988             parameters.d = d;
989             costf = list ( costfunction , parameters );
990             [fopt , xopt] = optim ( costf , x0 , imp = 2)
991         </programlisting>
992         <para>In the following example, the parameters are defined before the
993             optimizer is called. They are directly used in the cost function.
994         </para>
995         <programlisting role="example">// The example NOT to follow
996             function [ f , g , ind ] = costfunction ( x , ind )
997             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
998             g(1) = 2 * a * ( x(1) - c )
999             g(2) = 2 * b * ( x(2) - d )
1000             endfunction
1001             x0 = [1 1];
1002             a = 1.0;
1003             b = 2.0;
1004             c = 3.0;
1005             d = 4.0;
1006             [ fopt , xopt ] = optim ( costfunction , x0 , imp = 2 )
1007         </programlisting>
1008         <para>While the previous example perfectly works, there is a risk that the
1009             same variables are used by some internal function used by
1010             <literal>optim</literal>. In this case, the value of the parameters are
1011             not what is expected and the optimization can fail or, worse, give a wrong
1012             result. It is also difficult to manage such a function, which requires
1013             that all the parameters are defined in the calling context.
1014         </para>
1015         <para>In the following example, we define the cost function with the
1016             classical header. Inside the function definition, we declare that the
1017             parameters <literal>a, b, c</literal> and <literal>d</literal> are global
1018             variables. Then we declare and set the global variables.
1019         </para>
1020         <programlisting role="example">// Another example NOT to follow
1021             function [ f , g , ind ] = costfunction ( x , ind )
1022             global a b c d
1023             f = a * ( x(1) - c ) ^2 + b * ( x(2) - d )^2
1024             g(1) = 2 * a * ( x(1) - c )
1025             g(2) = 2 * b * ( x(2) - d )
1026             endfunction
1027             x0 = [1 1];
1028             global a b c d
1029             a = 1.0;
1030             b = 2.0;
1031             c = 3.0;
1032             d = 4.0;
1033             [ fopt , xopt ] = optim ( costfunction , x0 , imp = 2 )
1034         </programlisting>
1035         <para>While the previous example perfectly works, there is a risk that the
1036             same variables are used by some internal function used by
1037             <literal>optim</literal>. In this case, the value of the parameters are
1038             not what is expected and the optimization can fail or, worse, give a wrong
1039             result.
1040         </para>
1041     </refsection>
1042     <refsection>
1043         <title>Example : Checking that derivatives are correct</title>
1044         <para>Many optimization problem can be avoided if the derivatives are
1045             computed correctly. One common reason for failure in the step-length
1046             procedure is an error in the calculation of the cost function and its
1047             gradient. Incorrect calculation of derivatives is by far the most common
1048             user error.
1049         </para>
1050         <para>In the following example, we give a false implementation of
1051             Rosenbrock's gradient. In order to check the computation of the
1052             derivatives, we use the <literal>derivative</literal> function. We define
1053             the <literal>simplified</literal> function, which delegates the
1054             computation of <literal>f</literal> to the rosenbrock function. The
1055             <literal>simplified</literal> function is passed as an input argument of
1056             the <literal>derivative</literal> function.
1057         </para>
1058         <programlisting role="example">function [ f , g , index ] = rosenbrock ( x , index )
1059             f = 100.0 *(x(2)-x(1)^2)^2 + (1-x(1))^2;
1060             // Exact :
1061             g(1) = - 400. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
1062             // Wrong :
1063             g(1) = - 1200. * ( x(2) - x(1)**2 ) * x(1) -2. * ( 1. - x(1) )
1064             g(2) = 200. * ( x(2) - x(1)**2 )
1065             endfunction
1066             
1067             function f = simplified ( x )
1068             index = 1;
1069             [ f , g , index ] = rosenbrock ( x , index )
1070             endfunction
1071             
1072             x0 = [-1.2 1];
1073             index = 1;
1074             [ f , g , index ] = rosenbrock ( x0 , index );
1075             gnd = derivative ( simplified , x0.' );
1076             mprintf("Exact derivative:[%s]\n" , strcat ( string(g) , " " ));
1077             mprintf("Numerical derivative:[%s]\n" , strcat ( string(gnd) , " " ));
1078         </programlisting>
1079         <para>The previous script produces the following output. Obviously, the
1080             difference between the two gradient is enormous, which shows that the
1081             wrong formula has been used in the gradient.
1082         </para>
1083         <programlisting role="example">      Exact derivative:[-638 -88]
1084             Numerical derivative:[-215.6 -88]
1085         </programlisting>
1086     </refsection>
1087     <refsection>
1088         <title>Example: C function</title>
1089         <para>The following is an example with a C function, where a C source code
1090             is written into a file, dynamically compiled and loaded into Scilab, and
1091             then used by the "optim" solver. The interface of the "rosenc" function is
1092             fixed, even if the arguments are not really used in the cost function.
1093             This is because the underlying optimization solvers must assume that the
1094             objective function has a known, constant interface. In the following
1095             example, the arrays ti and tr are not used, only the array "td" is used,
1096             as a parameter of the Rosenbrock function. Notice that the content of the
1097             arrays ti and td are the same that the content of the Scilab variable, as
1098             expected.
1099         </para>
1100         <programlisting role="example">// External function written in C (C compiler required)
1101             // write down the C code (Rosenbrock problem)
1102             C=['#include &lt;math.h&gt;'
1103             'double sq(double x)'
1104             '{ return x*x;}'
1105             'void rosenc(int *ind, int *n, double *x, double *f, double *g, '
1106             '                                int *ti, float *tr, double *td)'
1107             '{'
1108             '  double p;'
1109             '  int i;'
1110             '  p=td[0];'
1111             '  if (*ind==2||*ind==4) {'
1112             '    *f=1.0;'
1113             '    for (i=1;i&lt;*n;i++)'
1114             '      *f+=p*sq(x[i]-sq(x[i-1]))+sq(1.0-x[i]);'
1115             '  }'
1116             '  if (*ind==3||*ind==4) {'
1117             '    g[0]=-4.0*p*(x[1]-sq(x[0]))*x[0];'
1118             '    for (i=1;i&lt;*n-1;i++)'
1119             '      g[i]=2.0*p*(x[i]-sq(x[i-1]))-4.0*p*(x[i+1]-sq(x[i]))*x[i]-2.0*(1.0-x[i]);'
1120             '    g[*n-1]=2.0*p*(x[*n-1]-sq(x[*n-2]))-2.0*(1.0-x[*n-1]);'
1121             '  }'
1122             '}'];
1123             cd TMPDIR;
1124             mputl(C, TMPDIR+'/rosenc.c')
1125             
1126             // compile the C code
1127             l = ilib_for_link('rosenc', 'rosenc.c', [], 'c');
1128             
1129             // incremental linking
1130             link(l, 'rosenc', 'c')
1131             
1132             //solve the problem
1133             x0 = [40; 10; 50];
1134             p = 100;
1135             [f, xo, go] = optim('rosenc', x0, 'td', p)
1136         </programlisting>
1137     </refsection>
1138     <refsection>
1139         <title>Example: Fortran function</title>
1140         <para>The following is an example with a Fortran function.</para>
1141         <programlisting role="example">// External function written in Fortran (Fortran compiler required)
1142             // write down the Fortran  code (Rosenbrock problem)
1143             F = [ '      subroutine rosenf(ind, n, x, f, g, ti, tr, td)'
1144             '      integer ind,n,ti(*)'
1145             '      double precision x(n),f,g(n),td(*)'
1146             '      real tr(*)'
1147             'c'
1148             '      double precision y,p'
1149             '      p=td(1)'
1150             '      if (ind.eq.2.or.ind.eq.4) then'
1151             '        f=1.0d0'
1152             '        do i=2,n'
1153             '          f=f+p*(x(i)-x(i-1)**2)**2+(1.0d0-x(i))**2'
1154             '        enddo'
1155             '      endif'
1156             '      if (ind.eq.3.or.ind.eq.4) then'
1157             '        g(1)=-4.0d0*p*(x(2)-x(1)**2)*x(1)'
1158             '        if(n.gt.2) then'
1159             '          do i=2,n-1'
1160             '            g(i)=2.0d0*p*(x(i)-x(i-1)**2)-4.0d0*p*(x(i+1)-x(i)**2)*x(i)'
1161             '     &amp;           -2.0d0*(1.0d0-x(i))'
1162             '          enddo'
1163             '        endif'
1164             '        g(n)=2.0d0*p*(x(n)-x(n-1)**2)-2.0d0*(1.0d0-x(n))'
1165             '      endif'
1166             '      return'
1167             '      end'];
1168             cd TMPDIR;
1169             mputl(F, TMPDIR+'/rosenf.f')
1170             
1171             // compile the Fortran code
1172             l = ilib_for_link('rosenf', 'rosenf.f', [], 'f');
1173             
1174             // incremental linking
1175             link(l, 'rosenf', 'f')
1176             
1177             //solve the problem
1178             x0 = [40; 10; 50];
1179             p = 100;
1180             [f, xo, go] = optim('rosenf', x0, 'td', p)
1181         </programlisting>
1182     </refsection>
1183     <refsection>
1184         <title>Example: Fortran function with initialization</title>
1185         <para>The following is an example with a Fortran function in which the
1186             "in" option is used to allocate memory inside the Scilab environment. In
1187             this mode, there is a dialog between Scilab and the objective function.
1188             The goal of this dialog is to initialize the parameters of the objective
1189             function. Each part of this dialog is based on a specific value of the
1190             "ind" parameter.
1191         </para>
1192         <para>At the beginning, Scilab calls the objective function, with the ind
1193             parameter equals to 10. This tells the objective function to initialize
1194             the sizes of the arrays it needs by setting the nizs, nrzs and ndzs
1195             integer parameters of the "nird" common. Then the objective function
1196             returns. At this point, Scilab creates internal variables and allocate
1197             memory for the variable izs, rzs and dzs. Scilab calls the objective
1198             function back again, this time with ind equals to 11. This tells the
1199             objective function to initialize the arrays izs, rzs and dzs. When the
1200             objective function has done so, it returns. Then Scilab enters in the real
1201             optimization mode and calls the optimization solver the user requested.
1202             Whenever the objective function is called, the izs, rzs and dzs arrays
1203             have the values that have been previously initialized.
1204         </para>
1205         <programlisting role="example">//
1206             // Define a fortran source code and compile it (fortran compiler required)
1207             //
1208             fortransource = ['      subroutine rosenf(ind,n,x,f,g,izs,rzs,dzs)'
1209             'C     -------------------------------------------'
1210             'c     Example of cost function given by a subroutine'
1211             'c     if n&lt;=2 returns ind=0'
1212             'c     f.bonnans, oct 86'
1213             '      implicit double precision (a-h,o-z)'
1214             '      real rzs(1)'
1215             '      double precision dzs(*)'
1216             '      dimension x(n),g(n),izs(*)'
1217             '      common/nird/nizs,nrzs,ndzs'
1218             '      if (n.lt.3) then'
1219             '        ind=0'
1220             '        return'
1221             '      endif'
1222             '      if(ind.eq.10) then'
1223             '         nizs=2'
1224             '         nrzs=1'
1225             '         ndzs=2'
1226             '         return'
1227             '      endif'
1228             '      if(ind.eq.11) then'
1229             '         izs(1)=5'
1230             '         izs(2)=10'
1231             '         dzs(2)=100.0d+0'
1232             '         return'
1233             '      endif'
1234             '      if(ind.eq.2)go to 5'
1235             '      if(ind.eq.3)go to 20'
1236             '      if(ind.eq.4)go to 5'
1237             '      ind=-1'
1238             '      return'
1239             '5     f=1.0d+0'
1240             '      do 10 i=2,n'
1241             '        im1=i-1'
1242             '10      f=f + dzs(2)*(x(i)-x(im1)**2)**2 + (1.0d+0-x(i))**2'
1243             '      if(ind.eq.2)return'
1244             '20    g(1)=-4.0d+0*dzs(2)*(x(2)-x(1)**2)*x(1)'
1245             '      nm1=n-1'
1246             '      do 30 i=2,nm1'
1247             '        im1=i-1'
1248             '        ip1=i+1'
1249             '        g(i)=2.0d+0*dzs(2)*(x(i)-x(im1)**2)'
1250             '30      g(i)=g(i) -4.0d+0*dzs(2)*(x(ip1)-x(i)**2)*x(i) - '
1251             '     &amp;        2.0d+0*(1.0d+0-x(i))'
1252             '      g(n)=2.0d+0*dzs(2)*(x(n)-x(nm1)**2) - 2.0d+0*(1.0d+0-x(n))'
1253             '      return'
1254             '      end'];
1255             cd TMPDIR;
1256             mputl(fortransource, TMPDIR + '/rosenf.f')
1257             
1258             // compile the C code
1259             libpath = ilib_for_link('rosenf', 'rosenf.f', [], 'f');
1260             
1261             // incremental linking
1262             linkid = link(libpath, 'rosenf', 'f');
1263             
1264             x0 = 1.2 * ones(1, 5);
1265             //
1266             // Solve the problem
1267             //
1268             [f, x, g] = optim('rosenf', x0, 'in');
1269         </programlisting>
1270     </refsection>
1271     <refsection>
1272         <title>Example: Fortran function with initialization on Windows with Intel
1273             Fortran Compiler
1274         </title>
1275         <para>Under the Windows operating system with Intel Fortran Compiler, one
1276             must carefully design the fortran source code so that the dynamic link
1277             works properly. On Scilab's side, the optimization component is
1278             dynamically linked and the symbol "nird" is exported out of the
1279             optimization dll. On the cost function's side, which is also dynamically
1280             linked, the "nird" common must be imported in the cost function
1281             dll.
1282         </para>
1283         <para>The following example is a re-writing of the previous example, with
1284             special attention for the Windows operating system with Intel Fortran
1285             compiler as example. In that case, we introduce additionnal compiling
1286             instructions, which allows the compiler to import the "nird"
1287             symbol.
1288         </para>
1289         <programlisting role="example">fortransource = ['subroutine rosenf(ind,n,x,f,g,izs,rzs,dzs)'
1290             'cDEC$ IF DEFINED (FORDLL)'
1291             'cDEC$ ATTRIBUTES DLLIMPORT:: /nird/'
1292             'cDEC$ ENDIF'
1293             'C     -------------------------------------------'
1294             'c     Example of cost function given by a subroutine'
1295             'c     if n&lt;=2 returns ind=0'
1296             'c     f.bonnans, oct 86'
1297             '      implicit double precision (a-h,o-z)'
1298             [etc...]
1299         </programlisting>
1300     </refsection>
1301     <refsection role="see also">
1302         <title>See Also</title>
1303         <simplelist type="inline">
1304             <member>
1305                 <link linkend="external">external</link>
1306             </member>
1307             <member>
1308                 <link linkend="qpsolve">qpsolve</link>
1309             </member>
1310             <member>
1311                 <link linkend="datafit">datafit</link>
1312             </member>
1313             <member>
1314                 <link linkend="leastsq">leastsq</link>
1315             </member>
1316             <member>
1317                 <link linkend="numdiff">numdiff</link>
1318             </member>
1319             <member>
1320                 <link linkend="derivative">derivative</link>
1321             </member>
1322             <member>
1323                 <link linkend="NDcost">NDcost</link>
1324             </member>
1325         </simplelist>
1326     </refsection>
1327     <refsection>
1328         <title>References</title>
1329         <para>The following is a map from the various options to the underlying
1330             solvers.
1331         </para>
1332         <variablelist>
1333             <varlistentry>
1334                 <term>"qn" without constraints</term>
1335                 <listitem>
1336                     <para>n1qn1 : a quasi-Newton method with a Wolfe-type line
1337                         search
1338                     </para>
1339                 </listitem>
1340             </varlistentry>
1341             <varlistentry>
1342                 <term>"qn" with bounds constraints</term>
1343                 <listitem>
1344                     <para>qnbd : a quasi-Newton method with projection</para>
1345                     <para>RR-0242 - A variant of a projected variable metric method for
1346                         bound constrained optimization problems, Bonnans Frederic, Rapport
1347                         de recherche de l'INRIA - Rocquencourt, Octobre 1983
1348                     </para>
1349                 </listitem>
1350             </varlistentry>
1351             <varlistentry>
1352                 <term>"gc" without constraints</term>
1353                 <listitem>
1354                     <para>n1qn3 : a Quasi-Newton limited memory method with BFGS.</para>
1355                 </listitem>
1356             </varlistentry>
1357             <varlistentry>
1358                 <term>"gc" with bounds constraints</term>
1359                 <listitem>
1360                     <para>gcbd : a BFGS-type method with limited memory and
1361                         projection
1362                     </para>
1363                 </listitem>
1364             </varlistentry>
1365             <varlistentry>
1366                 <term>"nd" without constraints</term>
1367                 <listitem>
1368                     <para>n1fc1 : a bundle method</para>
1369                 </listitem>
1370             </varlistentry>
1371             <varlistentry>
1372                 <term>"nd" with bounds constraints</term>
1373                 <listitem>
1374                     <para>not available</para>
1375                 </listitem>
1376             </varlistentry>
1377         </variablelist>
1378     </refsection>
1379 </refentry>