CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
mlp_gen.cc
Go to the documentation of this file.
1 #include <math.h>
2 #include <string.h>
3 #include <stdio.h>
4 #include <stdlib.h>
5 
6 #include "mlp_gen.h"
7 #include "mlp_sigmoide.h"
8 
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12 
13 #define NPMAX 100000
14 #define NLMAX 1000
15 
17 struct learn_ learn_ MLP_HIDDEN;
18 struct pat_ pat_ MLP_HIDDEN;
20 struct stat_ stat_ MLP_HIDDEN;
21 
22 int MessLang = 0;
23 int OutputWeights = 100;
25 int WeightsMemory = 0;
26 int PatMemory[2] = {0,0};
27 int BFGSMemory = 0;
29 int LearnMemory = 0;
30 int NetMemory = 0;
31 float MLPfitVersion = (float) 1.40;
34 
35 dbl ***dir;
42 
43 /* The following lines are needed to use the dgels routine from the LAPACK
44  library in Reslin() */
45 
46 #include "mlp_lapack.h"
47 /* Subroutine */ int dgels_(char *trans, integer *m, integer *n, integer *
48  nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb,
49  doublereal *work, integer *lwork, integer *info);
50 
51 /***********************************************************/
52 /* MLP_Out */
53 /* */
54 /* computes the output of the Neural Network */
55 /* inputs: double *rrin = inputs of the MLP */
56 /* outputs: double *rrout = outputs of the MLP */
57 /* */
58 /* Author: J.Schwindling 29-Mar-99 */
59 /* Modified: J.Schwindling 16-Jul-99 unrolled loops */
60 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid */
61 /***********************************************************/
62 
63 /* extern "C"Dllexport */void MLP_Out(type_pat *rrin, dbl *rrout)
64 {
65 // static int i, il, in, j, ilm1, m, mp1;
66  static int i, il, in, j, m, mp1;
67  dbl **deriv1;
68 
69 /* input layer */
70 
71  deriv1 = NET.Deriv1;
72  m = NET.Nneur[0]%4;
73  if(m==0) goto L10;
74  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
75 L10:
76  mp1 = m+1;
77  for(i=mp1; i<=NET.Nneur[0]; i+=4)
78  {
79  NET.Outn[0][i-1] = rrin[i-1];
80  NET.Outn[0][i] = rrin[i];
81  NET.Outn[0][i+1] = rrin[i+1];
82  NET.Outn[0][i+2] = rrin[i+2];
83  }
84 
85 /* hidden and output layers */
86 
87  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
88  NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
89 
90  for(il=2; il<NET.Nlayer; il++)
91  {
92  MLP_vSigmoideDeriv(NET.Outn[il-1],
93  deriv1[il-1],NET.Nneur[il-1]);
94  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
95  NET.Outn[il],NET.Nneur[il],
96  NET.Nneur[il-1]);
97  }
98  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
99  {
100  deriv1[NET.Nlayer-1][in] = 1;
101  }
102 }
103 
104 
105 /***********************************************************/
106 /* MLP_Out_T */
107 /* */
108 /* computes the output of the Neural Network when called */
109 /* from MLP_Test */
110 /* inputs: double *rrin = inputs of the MLP */
111 /* */
112 /* Author: J.Schwindling 23-Jul-1999 */
113 /***********************************************************/
114 
115 /* extern "C"Dllexport */void MLP_Out_T(type_pat *rrin)
116 {
117  static int i, il, in, j, ilm1, m, mp1;
118  register dbl a;
119 
120 /* input layer */
121 
122  m = NET.Nneur[0]%4;
123  if(m==0) goto L10;
124  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
125 L10:
126  mp1 = m+1;
127  for(i=mp1; i<=NET.Nneur[0]; i+=4)
128  {
129  NET.Outn[0][i-1] = rrin[i-1];
130  NET.Outn[0][i] = rrin[i];
131  NET.Outn[0][i+1] = rrin[i+1];
132  NET.Outn[0][i+2] = rrin[i+2];
133  }
134 
135 /* hidden and output layers */
136 
137 /* for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
138  NET.Outn[0][in],NET.Weights[1][0][in]);
139  printf("\n"); */
140  for(il=1; il<NET.Nlayer; il++)
141  {
142  ilm1 = il-1;
143  m = NET.Nneur[ilm1]%4;
144  for(in=0; in<NET.Nneur[il]; in++)
145  {
146  a = NET.Weights[il][in][0];
147  if(m==0) goto L20;
148  for(j=1;j<=m;j++) a +=
149  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
150 L20:
151  mp1 = m+1;
152  for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
153  {
154  a +=
155  NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
156  NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
157  NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
158  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
159  }
160  switch(NET.T_func[il][in])
161  {
162  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
163  break;
164  case 1: NET.Outn[il][in] = a;
165  break;
166  case 0: NET.Outn[il][in] = 0;
167  break;
168  }
169  }
170  }
171 }
172 
173 
174 /***********************************************************/
175 /* MLP_Out2 */
176 /* */
177 /* computes the output of the Neural Network */
178 /* inputs: double *rrin = inputs of the MLP */
179 /* outputs: double *rrout = outputs of the MLP */
180 /* */
181 /* Author: J.Schwindling 29-Mar-99 */
182 /* Modified: J.Schwindling 16-Jul-99 unrolled loops */
183 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid */
184 /***********************************************************/
185 
186 /* extern "C"Dllexport */void MLP_Out2(type_pat *rrin)
187 {
188 // static int il, in, m, mp1, i0, ilm1;
189  static int il, in, m, mp1;
190  register int i;
191  dbl **rrout, **deriv1;
192  register dbl *prrout;
193  type_pat *prrin;
194  int nhid = NET.Nneur[1];
195  int nin = NET.Nneur[0];
196 
197  rrout = NET.Outn;
198  deriv1 = NET.Deriv1;
199 
200  m = NET.Nneur[0]%4;
201  if(m==0) goto L10;
202  if(m==1)
203  {
204  rrout[0][0] = rrin[1];
205  goto L10;
206  }
207  else if(m==2)
208  {
209  rrout[0][0] = rrin[1];
210  rrout[0][1] = rrin[2];
211  goto L10;
212  }
213  else if(m==3)
214  {
215  rrout[0][0] = rrin[1];
216  rrout[0][1] = rrin[2];
217  rrout[0][2] = rrin[3];
218  goto L10;
219  }
220 L10:
221  mp1 = m+1;
222  prrout = &(rrout[0][mp1]);
223  prrin = &(rrin[mp1+1]);
224  for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
225  {
226  *(prrout-1) = *(prrin-1);
227  *prrout = *prrin;
228  *(prrout+1)= *(prrin+1);
229  *(prrout+2) = *(prrin+2);
230  }
231 
232 /* input layer */
233 
234  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
235  NET.Outn[1],nhid,nin);
236 
237 
238 /* hidden and output layers */
239 
240  for(il=2; il<NET.Nlayer; il++)
241  {
242  MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
243  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
244  NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
245  }
246  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
247  deriv1[NET.Nlayer-1][in] = 1;
248 }
249 
250 
251 /***********************************************************/
252 /* MLP_Test_MM */
253 /* */
254 /* computes the MLP error function using matrix-matrix */
255 /* multiplications */
256 /* inputs: int ifile = file number: 0=learn, 1=test */
257 /* dbl *tmp = a pointer to an array of size */
258 /* 2 x number of neurons in first */
259 /* hidden layer */
260 /* */
261 /* return value (dbl) = error value */
262 /* */
263 /* Author: J.Schwindling 25-Jan-2000 */
264 /***********************************************************/
265 
267 {
268  int ipat;
269  int npat = PAT.Npat[ifile];
270  int nhid = NET.Nneur[1];
271  int nin = NET.Nneur[0];
272  int jpat, j, il, ilm1, m, in, mp1;
273  dbl err, a, rrans;
274  dbl *pweights, *ptmp;
275 
276  err = 0;
277  for(ipat=0; ipat<npat-1; ipat+=2)
278  {
279  MLP_MM2rows(tmp, &(PAT.vRin[ifile][ipat*(nin+1)]),
280  NET.vWeights[1], 2, nhid, nin+1,
281  nin+1, nin+1);
282 
283  switch(NET.T_func[1][0])
284  {
285  case 2:
286  ptmp = &(tmp[0]);
287  MLP_vSigmoide(ptmp,2*nhid);
288  break;
289 
290  case 1:
291  break;
292 
293  case 0:
294  for(jpat=0; jpat<2; jpat++)
295  {
296  for(j=0; j<nhid; j++)
297  {
298  tmp[j+jpat*nhid] = 0;
299  }
300  }
301  break;
302  }
303 
304  for(jpat=0; jpat<2; jpat++)
305  {
306  for(in=0; in<nhid; in++)
307  {
308  NET.Outn[1][in] = tmp[jpat*nhid+in];
309  }
310  for(il=2; il<NET.Nlayer; il++)
311  {
312  ilm1 = il-1;
313  m = NET.Nneur[ilm1]%4;
314  for(in=0; in<NET.Nneur[il]; in++)
315  {
316  pweights = &(NET.Weights[il][in][0]);
317  a = *pweights;
318  pweights++;
319  if(m==0) goto L20;
320  for(j=1;j<=m;j++,pweights++) a +=
321  (*pweights)*NET.Outn[ilm1][j-1];
322 L20:
323  mp1 = m+1;
324  for(j=mp1; j<=NET.Nneur[ilm1];
325  j+=4, pweights+=4)
326  {
327  a +=
328  *(pweights+3)*NET.Outn[ilm1][j+2]+
329  *(pweights+2)*NET.Outn[ilm1][j+1]+
330  *(pweights+1)*NET.Outn[ilm1][j]+
331  *(pweights )*NET.Outn[ilm1][j-1];
332  }
333  switch(NET.T_func[il][in])
334  {
335  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
336  break;
337  case 1: NET.Outn[il][in] = a;
338  break;
339  case 0: NET.Outn[il][in] = 0;
340  break;
341  }
342  }
343  if(il == NET.Nlayer-1)
344  {
345  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
346  {
347  rrans = (dbl) PAT.Rans[ifile][ipat+jpat][in];
348  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
349  (rrans-NET.Outn[NET.Nlayer-1][in])*
350  PAT.Pond[ifile][ipat+jpat];
351  }
352  }
353  }
354  }
355  }
356 
357 /* cas npat impair */
358  for(ipat=ipat; ipat<npat; ipat++)
359  {
360  MLP_MatrixVector(NET.vWeights[1],
361  &(PAT.vRin[ifile][ipat*(nin+1)]),tmp,
362  nhid,nin+1);
363 
364  switch(NET.T_func[1][0])
365  {
366  case 2:
367  ptmp = &(tmp[0]);
368  MLP_vSigmoide(ptmp,2*nhid);
369  break;
370 
371  case 1:
372  break;
373 
374  case 0:
375  for(j=0; j<nhid; j++)
376  {
377  tmp[j] = 0;
378  }
379  break;
380  }
381 
382  for(in=0; in<nhid; in++)
383  {
384  NET.Outn[1][in] = tmp[in];
385  }
386  for(il=2; il<NET.Nlayer; il++)
387  {
388  ilm1 = il-1;
389  m = NET.Nneur[ilm1]%4;
390  for(in=0; in<NET.Nneur[il]; in++)
391  {
392  pweights = &(NET.Weights[il][in][0]);
393  a = *pweights;
394  pweights++;
395  if(m==0) goto L25;
396  for(j=1;j<=m;j++,pweights++) a +=
397  (*pweights)*NET.Outn[ilm1][j-1];
398 L25:
399  mp1 = m+1;
400  for(j=mp1; j<=NET.Nneur[ilm1];
401  j+=4, pweights+=4)
402  {
403  a +=
404  *(pweights+3)*NET.Outn[ilm1][j+2]+
405  *(pweights+2)*NET.Outn[ilm1][j+1]+
406  *(pweights+1)*NET.Outn[ilm1][j]+
407  *(pweights )*NET.Outn[ilm1][j-1];
408  }
409  switch(NET.T_func[il][in])
410  {
411  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
412  break;
413  case 1: NET.Outn[il][in] = a;
414  break;
415  case 0: NET.Outn[il][in] = 0;
416  break;
417  }
418  }
419  if(il == NET.Nlayer-1)
420  {
421  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
422  {
423  rrans = (dbl) PAT.Rans[ifile][ipat][in];
424  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
425  (rrans-NET.Outn[NET.Nlayer-1][in])*
426  PAT.Pond[ifile][ipat];
427  }
428  }
429  }
430  }
431  return(err);
432 }
433 
434 
435 /***********************************************************/
436 /* MLP_Test */
437 /* */
438 /* computes the MLP error function */
439 /* inputs: int ifile = file number: 0=learn, 1=test */
440 /* int regul = 0: no regularisation term */
441 /* 1: regularisation term */
442 /* (for hybrid learning method) */
443 /* */
444 /* return value (dbl) = error value */
445 /* */
446 /* Author: J.Schwindling 31-Mar-99 */
447 /***********************************************************/
448 
449 /* extern "C"Dllexport */dbl MLP_Test(int ifile,int regul)
450 {
451  dbl err, rrans;
452  int in,jn,ipat,ipati;
453 
454  dbl *tmp;
455 
456  tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl));
457  if(tmp == 0) /* not enough memory */
458  {
459  printf("not enough memory in MLP_Test\n");
460  err = 0;
461  for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
462  {
463  if(ifile==0)
464  {
465  ipati = ExamplesIndex[ipat];
466  }
467  else
468  {
469  ipati = ipat;
470  }
471  MLP_Out_T(PAT.Rin[ifile][ipati]);
472  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
473  {
474  rrans = (dbl) PAT.Rans[ifile][ipati][in];
475  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
476  (rrans-NET.Outn[NET.Nlayer-1][in])*
477  PAT.Pond[ifile][ipati];
478  }
479  }
480 
481  if(regul>=1)
482  {
483  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
484  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
485  {
486  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
487  NET.Weights[NET.Nlayer-1][in][jn];
488  }
489  }
490  free(tmp);
491  return(err);
492  }
493  else /* computation using matrix - matrix multiply */
494  {
495  err = MLP_Test_MM(ifile, tmp);
496  if(regul>=1)
497  {
498  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
499  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
500  {
501  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
502  NET.Weights[NET.Nlayer-1][in][jn];
503  }
504  }
505  free(tmp);
506  return(err);
507  }
508 }
509 
510 
511 /***********************************************************/
512 /* MLP_Stochastic */
513 /* */
514 /* one epoch of MLP stochastic training */
515 /* (optimized for speed) */
516 /* */
517 /* Author: J.Schwindling 08-Jul-99 */
518 /* Modified: J.Schwindling 20-Jul-99 remove unused cases */
519 /***********************************************************/
520 
521 /* extern "C"Dllexport */dbl MLP_Stochastic()
522 {
523  int ipat, ii, inm1;
524  dbl err = 0;
525  int il, in1, in, itest2;
526  dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
527  dbl eta, eps;
528  register dbl a, b, dd, a1, a2, a3, a4;
529  dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
530  dbl ***weights;
531 
532  if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");
533  weights = NET.Weights;
534 /* shuffle patterns */
535  ShuffleExamples(PAT.Npat[0],ExamplesIndex);
536 
537 /* reduce learning parameter */
538  if(LEARN.Decay<1) EtaDecay();
539 
540  eta = -LEARN.eta;
541  eps = LEARN.epsilon;
542 
543 /* loop on the examples */
544  for(ipat=0;ipat<PAT.Npat[0];ipat++)
545  {
546  ii = ExamplesIndex[ipat];
547  pond = PAT.Pond[0][ii];
548 
549  MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)]));
550 
551 /* next lines are equivalent to DeDwSum */
552  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
553  {
554  deriv = NET.Deriv1[NET.Nlayer-1][in];
555  a = (dbl) PAT.Rans[0][ii][in];
556  b = NET.Outn[NET.Nlayer-1][in]-a;
557  err += b*b*pond;
558  NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
559  }
560 
561  for(il=NET.Nlayer-2; il>0; il--)
562  {
563  dd = NET.Delta[il+1][0];
564  for(in=0; in<NET.Nneur[il]-3; in+=4)
565  {
566  deriv1 = NET.Deriv1[il][in];
567  deriv2 = NET.Deriv1[il][in+1];
568  deriv3 = NET.Deriv1[il][in+2];
569  deriv4 = NET.Deriv1[il][in+3];
570  itest2 = (NET.Nneur[il+1]==1);
571  a1 = dd*weights[il+1][0][in+1];
572  a2 = dd*weights[il+1][0][in+2];
573  a3 = dd*weights[il+1][0][in+3];
574  a4 = dd*weights[il+1][0][in+4];
575  if(itest2) goto L1;
576  pdelta = &(NET.Delta[il+1][1]);
577  for(in1=1; in1<NET.Nneur[il+1];
578  in1++, pdelta++)
579  {
580  a1 += *pdelta * weights[il+1][in1][in+1];
581  a2 += *pdelta * weights[il+1][in1][in+2];
582  a3 += *pdelta * weights[il+1][in1][in+3];
583  a4 += *pdelta * weights[il+1][in1][in+4];
584  }
585 L1: NET.Delta[il][in] = a1*deriv1;
586  NET.Delta[il][in+1] = a2*deriv2;
587  NET.Delta[il][in+2] = a3*deriv3;
588  NET.Delta[il][in+3] = a4*deriv4;
589  }
590  for(in=in; in<NET.Nneur[il]; in++)
591  {
592  deriv = NET.Deriv1[il][in];
593  itest2 = (NET.Nneur[il+1]==1);
594  a = dd*weights[il+1][0][in+1];
595  if(itest2) goto L2;
596  pdelta = &(NET.Delta[il+1][1]);
597  for(in1=1; in1<NET.Nneur[il+1];
598  in1++, pdelta++)
599  {
600  a += *pdelta *
601  weights[il+1][in1][in+1];
602  }
603 L2: NET.Delta[il][in] = a*deriv;
604  }
605 
606  } /* end of loop on layers */
607 
608 
609 /* update the weights */
610  if(eps==0)
611  {
612  for(il=1; il<NET.Nlayer; il++)
613  {
614  inm1 = NET.Nneur[il-1];
615  for(in=0; in<NET.Nneur[il]-3; in+=4)
616  {
617  a1 = NET.Delta[il][in];
618  a2 = NET.Delta[il][in+1];
619  a3 = NET.Delta[il][in+2];
620  a4 = NET.Delta[il][in+3];
621  pout = &(NET.Outn[il-1][0]);
622  weights[il][in][0] += a1;
623  weights[il][in+1][0] += a2;
624  weights[il][in+2][0] += a3;
625  weights[il][in+3][0] += a4;
626  weights[il][in][1] += a1* (*pout);
627  weights[il][in+1][1] += a2* (*pout);
628  weights[il][in+2][1] += a3* (*pout);
629  weights[il][in+3][1] += a4* (*pout);
630  pout++;
631  pw1 = &(weights[il][in][2]);
632  pw2 = &(weights[il][in+1][2]);
633  pw3 = &(weights[il][in+2][2]);
634  pw4 = &(weights[il][in+3][2]);
635  for(in1=2; in1<=inm1;
636  ++in1, ++pout, ++pw1, ++pw2,
637  ++pw3, ++pw4)
638  {
639  *pw1 += a1 * *pout;
640  *pw2 += a2 * *pout;
641  *pw3 += a3 * *pout;
642  *pw4 += a4 * *pout;
643  }
644  }
645  for(in=in; in<NET.Nneur[il]; in++)
646  {
647  a1 = NET.Delta[il][in];
648  pout = &(NET.Outn[il-1][0]);
649  weights[il][in][0] += a1;
650  weights[il][in][1] += a1* (*pout);
651  pout++;
652  pw1 = &(weights[il][in][2]);
653  for(in1=2; in1<=inm1;
654  ++in1, ++pout, ++pw1)
655  {
656  *pw1 += a1 * *pout;
657  }
658  }
659  }
660  }
661  else
662  {
663  for(il=1; il<NET.Nlayer; il++)
664  {
665  for(in=0; in<NET.Nneur[il]; in++)
666  {
667 
668  a = NET.Delta[il][in];
669  LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
670  NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
671 
672  b = a*NET.Outn[il-1][0];
673  LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
674  NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
675 
676  for(in1=2; in1<=NET.Nneur[il-1]; in1++)
677  {
678  b = a*NET.Outn[il-1][in1-1];
679  LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
680  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
681  }
682  }
683  }
684  }
685 
686  } /* end of loop on examples */
687  return(err);
688 }
689 
690 
691 /***********************************************************/
692 /* MLP_Epoch */
693 /* */
694 /* one epoch of MLP training */
695 /* inputs: int iepoch = epoch number */
696 /* dbl *alpmin = optimal alpha from Line Search*/
697 /* */
698 /* return value (dbl) = error value on learning sample */
699 /* BEFORE changing the weights */
700 /* */
701 /* Author: J.Schwindling 31-Mar-99 */
702 /* Modified: J.Schwindling 09-Apr-99 */
703 /* re-organize routine */
704 /* J.Schwindling 13-Apr-99 */
705 /* remove Quickprop algorithm */
706 /* implement Ribiere-Polak conjugate grad. */
707 /***********************************************************/
708 
709 /* extern "C"Dllexport */dbl MLP_Epoch(int iepoch, dbl *alpmin, int *Ntest)
710 {
711  dbl err, ONorm, beta, prod, ddir;
712 /* int *index;*/
713  int Nweights, Nlinear, ipat, ierr;
714  int nn;
715 
716  err = 0;
717  *alpmin = 0.;
718 
719  Nweights = NET.Nweights;
720  Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
721 
722  if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");
723 /* stochastic minimization */
724  if(LEARN.Meth==1)
725  {
726 
727  err = MLP_Stochastic();
728 
729  }
730  else
731  {
732  if(iepoch==1 && LEARN.Meth==7)
733  {
734  SetLambda(10000);
735  MLP_ResLin();
736  if(NET.Debug>=2) PrintWeights();
737  }
738 
739 /* save previous gradient and reset current one */
740  DeDwSaveZero();
741  if(LEARN.Meth==16)
742  {
743  ShuffleExamples(PAT.Npat[0],ExamplesIndex);
744  nn = PAT.Npat[0];
745  PAT.Npat[0] = nn/10;
746  for(ipat=0;ipat<nn;ipat++)
747  {
748  ierr = MLP_Train(&ExamplesIndex[ipat],&err);
749  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
750  }
751  }
752  else
753  {
754  for(ipat=0;ipat<PAT.Npat[0];ipat++)
755  {
756  ierr = MLP_Train(&ipat,&err);
757  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
758  }
759  }
760  DeDwScale(PAT.Npat[0]);
761  if(LEARN.Meth==2) StochStep();
762  if(LEARN.Meth==3)
763  {
764  SteepestDir();
765  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
766  }
767 
768 /* Conjugate Gradients Ribiere - Polak */
769  if(LEARN.Meth==4)
770  {
771  if((iepoch-1)%LEARN.Nreset==0)
772  {
773  LEARN.Norm = DeDwNorm(); /* for next epoch */
774  SteepestDir();
775  }
776  else
777  {
778  ONorm = LEARN.Norm;
779  LEARN.Norm = DeDwNorm();
780  prod = DeDwProd();
781  beta = (LEARN.Norm-prod)/ONorm;
782  CGDir(beta);
783  }
784  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
785  }
786 
787 /* Conjugate Gradients Fletcher - Reeves */
788  if(LEARN.Meth==5)
789  {
790  if((iepoch-1)%LEARN.Nreset==0)
791  {
792  LEARN.Norm = DeDwNorm(); /* for next epoch */
793  SteepestDir();
794  }
795  else
796  {
797  ONorm = LEARN.Norm;
798  LEARN.Norm = DeDwNorm();
799  beta = LEARN.Norm/ONorm;
800  CGDir(beta);
801  }
802  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
803  }
804  if(LEARN.Meth==6)
805  {
806  if((iepoch-1)%LEARN.Nreset==0)
807  {
808  SteepestDir();
809  InitBFGSH(Nweights);
810  }
811  else
812  {
813  GetGammaDelta();
814  ierr = GetBFGSH(Nweights);
815  if(ierr)
816  {
817  SteepestDir();
818  InitBFGSH(Nweights);
819  }
820  else
821  {
822  BFGSdir(Nweights);
823  }
824  }
825  ddir = DerivDir();
826  if(ddir>0)
827  {
828  SteepestDir();
829  InitBFGSH(Nweights);
830  ddir = DerivDir();
831  }
832  if(LineSearch(alpmin,Ntest,err)==1)
833  {
834  InitBFGSH(Nweights);
835  SteepestDir();
836  if(LineSearch(alpmin,Ntest,err)==1)
837  {
838  printf("Line search fail \n");
839  }
840  }
841  }
842  if(LEARN.Meth==7)
843  {
844  if((iepoch-1)%LEARN.Nreset==0)
845  {
846  SteepestDir();
847  InitBFGSH(Nweights-Nlinear);
848  }
849  else
850  {
851  if(NET.Debug>=5) printf("Before GetGammaDelta \n");
852  GetGammaDelta();
853  if(NET.Debug>=5) printf("After GetGammaDelta \n");
854  ierr = GetBFGSH(Nweights-Nlinear);
855  if(NET.Debug>=5) printf("After GetBFGSH \n");
856  if(ierr)
857  {
858  SteepestDir();
859  InitBFGSH(Nweights-Nlinear);
860  }
861  else
862  {
863  BFGSdir(Nweights-Nlinear);
864  }
865  if(NET.Debug>=5) printf("After BFGSdir \n");
866  }
867  SetLambda(10000);
868  if(LineSearchHyb(alpmin,Ntest)==1)
869  {
870  InitBFGSH(Nweights-Nlinear);
871  SteepestDir();
872  if(LineSearchHyb(alpmin,Ntest)==1)
873  {
874  printf("Line search fail \n");
875  }
876  }
877  }
878  }
879 
880  if(NET.Debug>=5) printf(" End MLP_Epoch\n");
881  return(err);
882 }
883 
884 
885 /***********************************************************/
886 /* MLP_Train */
887 /* */
888 /* Train Network: compute output, update DeDw */
889 /* inputs: int *ipat = pointer to pattern number */
890 /* input/output: dbl *err = current error */
891 /* */
892 /* return value (int) = error value: 1 wrong pattern number*/
893 /* 2 *ipat < 0 */
894 /* */
895 /* Author: J.Schwindling 09-Apr-99 */
896 /***********************************************************/
897 
898 /* extern "C"Dllexport */int MLP_Train(int *ipat, dbl *err)
899 {
900  int in;
901 
902 /* if(*ipat>=PAT.Npat[0]) return(1);*/
903  if(*ipat<0) return(2);
904 
905 /* MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
906  MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
907  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
908  {
909  *err += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
910  *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
911  PAT.Pond[0][*ipat];
912  }
913  DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);
914  return(0);
915 }
916 
917 
918 /***********************************************************/
919 /* StochStepHyb */
920 /* */
921 /* Update the weights according to stochastic minimization */
922 /* formula (for hybrid methods) */
923 /* */
924 /* return value (int) = 0 */
925 /* */
926 /* Author: J.Schwindling 09-Apr-99 */
927 /***********************************************************/
928 
929 /* extern "C"Dllexport */int StochStepHyb()
930 {
931  int il, in1, in;
932  dbl eta, eps;
933 
934  eta = LEARN.eta;
935  eps = LEARN.epsilon;
936  for(il=NET.Nlayer-2; il>0; il--) {
937 
938  for(in=0; in<NET.Nneur[il]; in++) {
939 
940  /* compute delta weights */
941  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
942  LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
943  + eps * LEARN.Odw[il][in][in1];
944  }
945 
946  /* update weights */
947  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
948  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
949  }
950  }
951  }
952  MLP_ResLin();
953  return(0);
954 }
955 
956 
957 /***********************************************************/
958 /* StochStep */
959 /* */
960 /* Update the weights according to stochastic minimization */
961 /* formula */
962 /* */
963 /* return value (int) = 0 */
964 /* */
965 /* Author: J.Schwindling 09-Apr-99 */
966 /***********************************************************/
967 
968 /* extern "C"Dllexport */int StochStep()
969 {
970  int il, in1, in;
971  dbl eta, eps, epseta;
972 
973  eta = -LEARN.eta;
974  eps = LEARN.epsilon;
975  epseta = eps/eta;
976  for(il=NET.Nlayer-1; il>0; il--) {
977  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
978 
979  /* compute delta weights */
980  for(in=0; in<NET.Nneur[il]; in++) {
981  LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
982  + epseta * LEARN.Odw[il][in][in1]);
983  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
984  }
985 
986  }
987  }
988 
989  return(0);
990 }
991 
992 
993 /***********************************************************/
994 /* DeDwNorm */
995 /* */
996 /* computes the norm of the gradient */
997 /* */
998 /* Author: J.Schwindling 31-Mar-99 */
999 /***********************************************************/
1000 
1001 /* extern "C"Dllexport */dbl DeDwNorm()
1002 {
1003  int il,in,jn;
1004  dbl dd=0;
1005  for(il=1; il<NET.Nlayer; il++)
1006  for(in=0; in<NET.Nneur[il]; in++)
1007  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1008  dd += LEARN.DeDw[il][in][jn]*
1009  LEARN.DeDw[il][in][jn];
1010  return(dd);
1011 }
1012 
1013 
1014 /***********************************************************/
1015 /* DeDwProd */
1016 /* */
1017 /* scalar product between new gradient and previous one */
1018 /* (used in Polak-Ribiere Conjugate Gradient method */
1019 /* */
1020 /* Author: J.Schwindling 26-Mar-99 */
1021 /***********************************************************/
1022 
1023 /* extern "C"Dllexport */dbl DeDwProd()
1024 {
1025  int il,in,jn;
1026  dbl dd=0;
1027  for(il=1; il<NET.Nlayer; il++)
1028  for(in=0; in<NET.Nneur[il]; in++)
1029  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1030  dd += LEARN.DeDw[il][in][jn]*
1031  LEARN.ODeDw[il][in][jn];
1032  return(dd);
1033 }
1034 
1035 
1036 /***********************************************************/
1037 /* DeDwZero */
1038 /* */
1039 /* resets to 0 the gradient (should be done before DeDwSum)*/
1040 /* */
1041 /* Author: J.Schwindling 31-Mar-99 */
1042 /***********************************************************/
1043 
1044 /* extern "C"Dllexport */void DeDwZero()
1045 {
1046  int il, in, jn;
1047  for(il=1; il<NET.Nlayer; il++)
1048  for(in=0; in<NET.Nneur[il]; in++)
1049  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1050  LEARN.DeDw[il][in][jn] = 0;
1051 }
1052 
1053 
1054 /***********************************************************/
1055 /* DeDwScale */
1056 /* */
1057 /* divides the gradient by the number of examples */
1058 /* inputs: int Nexamples = number of examples */
1059 /* */
1060 /* Author: J.Schwindling 31-Mar-99 */
1061 /***********************************************************/
1062 
1063 /* extern "C"Dllexport */void DeDwScale(int Nexamples)
1064 {
1065  int il, in, jn;
1066  for(il=1; il<NET.Nlayer; il++)
1067  for(in=0; in<NET.Nneur[il]; in++)
1068  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1069  LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
1070 }
1071 
1072 
1073 /***********************************************************/
1074 /* DeDwSave */
1075 /* */
1076 /* copies the gradient DeDw into ODeDw */
1077 /* */
1078 /* Author: J.Schwindling 31-Mar-99 */
1079 /***********************************************************/
1080 
1081 /* extern "C"Dllexport */void DeDwSave()
1082 {
1083  int il, in, jn;
1084  for(il=1; il<NET.Nlayer; il++)
1085  for(in=0; in<NET.Nneur[il]; in++)
1086  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1087  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1088 }
1089 
1090 
1091 /***********************************************************/
1092 /* DeDwSaveZero */
1093 /* */
1094 /* copies the gradient DeDw into ODeDw */
1095 /* resets DeDw to 0 */
1096 /* */
1097 /* Author: J.Schwindling 23-Apr-99 */
1098 /***********************************************************/
1099 
1100 /* extern "C"Dllexport */void DeDwSaveZero()
1101 {
1102  int il, in, jn;
1103  for(il=1; il<NET.Nlayer; il++)
1104  for(in=0; in<NET.Nneur[il]; in++)
1105  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1106  {
1107  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1108  LEARN.DeDw[il][in][jn] = 0;
1109  }
1110 }
1111 
1112 
1113 /***********************************************************/
1114 /* DeDwSum */
1115 /* */
1116 /* adds to the total gradient the gradient in the current */
1117 /* example */
1118 /* inputs: int Nexamples = number of examples */
1119 /* */
1120 /* Author: J.Schwindling 31-Mar-99 */
1121 /* Modified: B.Mansoulie 23-Apr-99 */
1122 /* faster and still correct way to compute the */
1123 /* sigmoid derivative */
1124 /***********************************************************/
1125 
1126 /* extern "C"Dllexport */int DeDwSum(type_pat *ans, dbl *out, int ipat)
1127 {
1128  int il, in1, in, ii;
1129 /* dbl err[NMAX][4]; */
1130  dbl deriv;
1131  dbl *pout, *pdedw, *pdelta;
1132  register dbl a, b;
1133 /* char buf[50];*/
1134 
1135 /* output layer */
1136  b = (dbl) PAT.Pond[0][ipat];
1137  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
1138  {
1139  deriv = NET.Deriv1[NET.Nlayer-1][in];
1140  NET.Delta[NET.Nlayer-1][in] =
1141  (out[in] - (dbl) ans[in])*deriv*b;
1142  }
1143 
1144  for(il=NET.Nlayer-2; il>0; il--)
1145  {
1146 
1147  for(in=0; in<NET.Nneur[il]; in++)
1148  {
1149  deriv = NET.Deriv1[il][in];
1150  a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
1151  pdelta = &(NET.Delta[il+1][1]);
1152  for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++)
1153  {
1154  a += *pdelta * NET.Weights[il+1][in1][in+1];
1155  }
1156  NET.Delta[il][in] = a * deriv;
1157  }
1158  }
1159 
1160  for(il=1; il<NET.Nlayer; il++)
1161  {
1162  ii = NET.Nneur[il-1];
1163  for(in=0; in<NET.Nneur[il]; in++)
1164  {
1165  a = NET.Delta[il][in];
1166  LEARN.DeDw[il][in][0] += a;
1167  LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
1168  pout = &(NET.Outn[il-1][1]);
1169  pdedw = &(LEARN.DeDw[il][in][2]);
1170  for(in1=1; in1<ii; ++in1, ++pout, ++pdedw)
1171  {
1172  (*pdedw) += a * (*pout);
1173  }
1174  }
1175  }
1176 
1177  return(0);
1178 }
1179 
1180 
1181 /***********************************************************/
1182 /* SetTransFunc */
1183 /* */
1184 /* to set the transfert function of a neuron */
1185 /* inputs: int layer = layer number (1 -> Nlayer) */
1186 /* int neuron = neuron number (1 -> Nneur) */
1187 /* int func = transfert function: */
1188 /* 0: y=0 */
1189 /* 1: y=x */
1190 /* 2: y=1/(1+exp(-x)) */
1191 /* 3: y=tanh(x) */
1192 /* 4: y=delta*x+1/(1+exp(-x)) */
1193 /* 5: y=exp(-x**2) */
1194 /* */
1195 /* return value (int) = error value: */
1196 /* 0: no error */
1197 /* 1: layer > 4 */
1198 /* 2: neuron > NMAX */
1199 /* */
1200 /* Author: J.Schwindling 02-Apr-99 */
1201 /***********************************************************/
1202 
1203 /* extern "C"Dllexport */int SetTransFunc(int layer, int neuron,
1204  int func)
1205 {
1206  if(layer>NLMAX) return(1);
1207 /* if(neuron>NMAX) return(2);*/
1208 
1209  NET.T_func[layer-1][neuron-1] = func;
1210 
1211  return(0);
1212 }
1213 
1214 
1215 /***********************************************************/
1216 /* SetDefaultFuncs */
1217 /* */
1218 /* - sets the default transfer functions to sigmoid for */
1219 /* hidden layers and linear for output layer */
1220 /* - sets temperatures to 1 */
1221 /* */
1222 /* */
1223 /* Author: J.Schwindling 08-Apr-99 */
1224 /***********************************************************/
1225 
1227 {
1228  int il,in;
1229  for(il=0; il<NET.Nlayer; il++) {
1230  for(in=0; in<NET.Nneur[il]; in++) {
1231  NET.T_func[il][in] = 2;
1232  if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
1233  }
1234  }
1235 
1236 }
1237 
1238 
1239 /***********************************************************/
1240 /* SteepestDir */
1241 /* */
1242 /* sets the search direction to steepest descent */
1243 /* */
1244 /* Author: J.Schwindling 02-Apr-99 */
1245 /***********************************************************/
1246 
1247 /* extern "C"Dllexport */void SteepestDir()
1248 {
1249  int il,in,jn;
1250  for(il=1; il<NET.Nlayer; il++)
1251  for(in=0; in<NET.Nneur[il]; in++)
1252  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1253  dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
1254 }
1255 
1256 
1257 /***********************************************************/
1258 /* CGDir */
1259 /* */
1260 /* sets the search direction to conjugate gradient dir */
1261 /* inputs: dbl beta : d(t+1) = -g(t+1) + beta d(t) */
1262 /* beta should be: */
1263 /* ||g(t+1)||^2 / ||g(t)||^2 (Fletcher-Reeves) */
1264 /* g(t+1) (g(t+1)-g(t)) / ||g(t)||^2 (Polak-Ribiere) */
1265 /* */
1266 /* Author: J.Schwindling 02-Apr-99 */
1267 /***********************************************************/
1268 
1269 /* extern "C"Dllexport */void CGDir(dbl beta)
1270 {
1271  int il,in,jn;
1272  for(il=1; il<NET.Nlayer; il++)
1273  for(in=0; in<NET.Nneur[il]; in++)
1274  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1275  {
1276  dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
1277  beta*dir[il][in][jn];
1278  }
1279 }
1280 
1281 
1282 /***********************************************************/
1283 /* DerivDir */
1284 /* */
1285 /* scalar product between gradient and direction */
1286 /* = derivative along direction */
1287 /* */
1288 /* Author: J.Schwindling 01-Jul-99 */
1289 /***********************************************************/
1290 
1291 /* extern "C"Dllexport */dbl DerivDir()
1292 {
1293  int il,in,jn;
1294  dbl ddir = 0;
1295 
1296  for(il=1; il<NET.Nlayer; il++)
1297  for(in=0; in<NET.Nneur[il]; in++)
1298  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1299  {
1300  ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
1301  }
1302  return(ddir);
1303 }
1304 
1305 
1306 /***********************************************************/
1307 /* GetGammaDelta */
1308 /* */
1309 /* sets the vectors Gamma (=g(t+1)-g(t)) */
1310 /* and delta (=w(t+1)-w(t)) */
1311 /* (for BFGS and Hybrid learning methods) */
1312 /* */
1313 /* Author: J.Schwindling 02-Apr-99 */
1314 /***********************************************************/
1315 
1316 /* extern "C"Dllexport */void GetGammaDelta()
1317 {
1318  int i=0;
1319  int il,in,jn;
1320  for(il=1; il<NET.Nlayer; il++)
1321  for(in=0; in<NET.Nneur[il]; in++)
1322  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1323  {
1324  Gamma[i] = LEARN.DeDw[il][in][jn]-
1325  LEARN.ODeDw[il][in][jn];
1326  delta[i] = LEARN.Odw[il][in][jn];
1327  i++;
1328  }
1329 }
1330 
1331 
1332 /***********************************************************/
1333 /* BFGSDir */
1334 /* */
1335 /* sets the search direction to BFGS direction from the */
1336 /* BFGS matrix */
1337 /* */
1338 /* inputs: int Nweights = number of weights */
1339 /* */
1340 /* Author: J.Schwindling 02-Apr-99 */
1341 /***********************************************************/
1342 
1343 /* extern "C"Dllexport */void BFGSdir(int Nweights)
1344 {
1345  dbl *g, *s;
1346  int kk=0;
1347  int il,i,j,in,jn;
1348 
1349  g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
1350  s = (dbl*) malloc(Nweights*sizeof(dbl));
1351 
1352  for(il=1; kk<Nweights; il++)
1353  for(in=0; in<NET.Nneur[il]; in++)
1354  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1355  {
1356  g[kk] = LEARN.DeDw[il][in][jn];
1357  kk++;
1358  }
1359  for(i=0; i<Nweights; i++)
1360  {
1361  s[i] = 0;
1362  for(j=0; j<Nweights; j++)
1363  {
1364  s[i] += BFGSH[i][j] * g[j];
1365  }
1366  }
1367 
1368  kk = 0;
1369  for(il=1; kk<Nweights; il++)
1370  for(in=0; in<NET.Nneur[il]; in++)
1371  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1372  {
1373  dir[il][in][jn] = -s[kk];
1374  kk++;
1375  }
1376  free(g);
1377  free(s);
1378 }
1379 
1380 
1381 /***********************************************************/
1382 /* InitBFGS */
1383 /* */
1384 /* initializes BFGS matrix to identity */
1385 /* */
1386 /* inputs: int Nweights = number of weights */
1387 /* */
1388 /* Author: J.Schwindling 02-Apr-99 */
1389 /***********************************************************/
1390 
1391 /* extern "C"Dllexport */void InitBFGSH(int Nweights)
1392 {
1393  int i,j;
1394  for(i=0; i<Nweights; i++)
1395  for(j=0; j<Nweights; j++)
1396  {
1397  BFGSH[i][j] = 0;
1398  if(i==j) BFGSH[i][j] = 1;
1399  }
1400 }
1401 
1402 
1403 /***********************************************************/
1404 /* GetBFGSH */
1405 /* */
1406 /* sets the BFGS matrix */
1407 /* */
1408 /* inputs: int Nweights = number of weights */
1409 /* */
1410 /* return value (int) = 0 if no problem */
1411 /* 1 is deltaTgamma = 0 -> switch to */
1412 /* steepest dir */
1413 /* */
1414 /* Author: J.Schwindling 02-Apr-99 */
1415 /* Modified: J.Schwindling 04-May-99 */
1416 /* computations as Nw^2 , matrices removed */
1417 /* Modified: J.Schwindling 11-Jun-99 */
1418 /* return value */
1419 /***********************************************************/
1420 
1421 /* extern "C"Dllexport */int GetBFGSH(int Nweights)
1422 {
1423  typedef double dble;
1424  dble deltaTgamma=0;
1425  dble factor=0;
1426  dble *Hgamma;
1427  dble *tmp;
1428  register dble a, b;
1429  int i,j;
1430 
1431  Hgamma = (dble *) malloc(Nweights*sizeof(dble));
1432  tmp = (dble *) malloc(Nweights*sizeof(dble));
1433 
1434  for(i=0; i<Nweights; i++)
1435  {
1436  deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
1437  a = 0;
1438  b = 0;
1439  for(j=0; j<Nweights; j++)
1440  {
1441  a += (dble) BFGSH[i][j] * (dble) Gamma[j];
1442  b += (dble) Gamma[j] * (dble) BFGSH[j][i];
1443  }
1444  Hgamma[i] = a;
1445  tmp[i] = b;
1446  factor += (dble) Gamma[i]*Hgamma[i];
1447  }
1448  if(deltaTgamma == 0) return 1;
1449  a = 1 / deltaTgamma;
1450  factor = 1 + factor*a;
1451 
1452  for(i=0; i<Nweights; i++)
1453  {
1454  b = (dble) delta[i];
1455  for(j=0; j<Nweights; j++)
1456  BFGSH[i][j] += (dbl) (factor*b* (dble)
1457  delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;
1458  }
1459  free(Hgamma);
1460  free(tmp);
1461  return 0;
1462 }
1463 
1464 
1465 /***********************************************************/
1466 /* LineSearch */
1467 /* */
1468 /* search along the line defined by dir */
1469 /* */
1470 /* outputs: dbl *alpmin = optimal step length */
1471 /* */
1472 /* Author: B.Mansoulie 01-Jul-98 */
1473 /***********************************************************/
1474 
1475 /* extern "C"Dllexport */
1476 int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
1477 {
1478  dbl ***w0;
1479  dbl alpha1, alpha2, alpha3;
1480  dbl err1, err2, err3;
1481  dbl tau;
1482  int icount, il, in, jn;
1483 
1484  tau=LEARN.Tau;
1485 
1486 /* store weights before line search */
1487 
1488  *Ntest = 0;
1489  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1490  for(il=1; il<NET.Nlayer; il++)
1491  {
1492  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1493  for(in=0; in<NET.Nneur[il]; in++)
1494  {
1495  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1496  sizeof(dbl));
1497  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1498  {
1499  w0[il][in][jn] = NET.Weights[il][in][jn];
1500  }
1501  }
1502  }
1503 
1504 /* compute error(w0) */
1505 
1506 /* err1 = MLP_Test(0,0);
1507  (*Ntest) ++;*/
1508  err1 = Err0;
1509 
1510  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1511 
1512  *alpmin = 0;
1513  alpha1 = 0;
1514 /* alpha2 = 0.05;
1515  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1516  alpha2 = LastAlpha;
1517  if(alpha2 < 0.01) alpha2 = 0.01;
1518  if(alpha2 > 2.0) alpha2 = 2.0;
1519  MLP_Line(w0,alpha2);
1520  err2 = MLP_Test(0,0);
1521  (*Ntest) ++;
1522  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1523 
1524  alpha3 = alpha2;
1525  err3 = err2;
1526 
1527 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1528  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1529 
1530  if(err1>err2)
1531  {
1532  for(icount=1;icount<=100;icount++)
1533  {
1534  alpha3 = alpha3*tau;
1535  MLP_Line(w0,alpha3);
1536  err3 =MLP_Test(0,0);
1537  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);
1538  (*Ntest) ++;
1539  if(err3>err2) break;
1540  alpha1 = alpha2;
1541  err1 = err2;
1542  alpha2 = alpha3;
1543  err2 = err3;
1544  }
1545  if(icount>=100) /* line search fails */
1546  {
1547  MLP_Line(w0,0); /* reset weights */
1548  free(w0);
1549  return(1);
1550  }
1551  }
1552  else
1553  {
1554  for(icount=1;icount<=100;icount++)
1555  {
1556  alpha2 = alpha2/tau;
1557  MLP_Line(w0,alpha2);
1558  err2 = MLP_Test(0,0);
1559  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1560  (*Ntest) ++;
1561  if(err1>err2) break;
1562  alpha3 = alpha2;
1563  err3 = err2;
1564  }
1565  if(icount>=100) /* line search fails */
1566  {
1567  MLP_Line(w0,0); /* reset weights */
1568  free(w0);
1569  LastAlpha = 0.05; /* try to be safe */
1570  return(1);
1571  }
1572  }
1573 
1574 /* find bottom of parabola */
1575 
1576  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1577  -(err2-err1)/(alpha2-alpha1)));
1578  if(*alpmin>10000) *alpmin=10000;
1579 
1580 /* set the weights */
1581  MLP_Line(w0,*alpmin);
1582  LastAlpha = *alpmin;
1583 
1584 /* store weight changes */
1585  for(il=1; il<NET.Nlayer; il++)
1586  for(in=0; in<NET.Nneur[il]; in++)
1587  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1588  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1589  - w0[il][in][jn];
1590 
1591  for(il=1; il<NET.Nlayer; il++)
1592  for(in=0; in<NET.Nneur[il]; in++)
1593  free(w0[il][in]);
1594  for(il=1; il<NET.Nlayer; il++)
1595  free(w0[il]);
1596  free(w0);
1597 
1598  return(0);
1599 }
1600 
1601 
1602 /***********************************************************/
1603 /* DecreaseSearch */
1604 /* */
1605 /* search along the line defined by dir for a point where */
1606 /* error is decreased (faster than full line search) */
1607 /* */
1608 /* outputs: dbl *alpmin = step length */
1609 /* */
1610 /* Author: J.Schwindling 11-May-99 */
1611 /***********************************************************/
1612 
1613 /* extern "C"Dllexport */
1614 int DecreaseSearch(dbl *alpmin, int *Ntest, dbl Err0)
1615 {
1616  dbl ***w0;
1617  dbl alpha2;
1618  dbl err1, err2;
1619  dbl tau;
1620  int icount, il, in, jn;
1621 
1622  tau=LEARN.Tau;
1623 
1624 /* store weights before line search */
1625 
1626  *Ntest = 0;
1627  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1628  for(il=1; il<NET.Nlayer; il++)
1629  {
1630  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1631  for(in=0; in<NET.Nneur[il]; in++)
1632  {
1633  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1634  sizeof(dbl));
1635  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1636  {
1637  w0[il][in][jn] = NET.Weights[il][in][jn];
1638  }
1639  }
1640  }
1641 
1642 /* compute error(w0) */
1643 
1644 /* err1 = MLP_Test(0,0);
1645  (*Ntest) ++;*/
1646  err1 = Err0;
1647 
1648  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1649 
1650  *alpmin = 0;
1651  alpha2 = 0.05;
1652  MLP_Line(w0,alpha2);
1653  err2 = MLP_Test(0,0);
1654  (*Ntest) ++;
1655 
1656  if(err2<err1)
1657  {
1658  *alpmin = alpha2;
1659  }
1660  else
1661  {
1662 
1663 
1664  for(icount=1;icount<=100;icount++)
1665  {
1666  alpha2 = alpha2/tau;
1667  MLP_Line(w0,alpha2);
1668  err2 = MLP_Test(0,0);
1669  (*Ntest) ++;
1670  if(err1>err2) break;
1671  }
1672  if(icount>=100) /* line search fails */
1673  {
1674  MLP_Line(w0,0); /* reset weights */
1675  free(w0);
1676  return(1);
1677  }
1678  *alpmin = alpha2;
1679  }
1680 
1681 /* set the weights */
1682  MLP_Line(w0,*alpmin);
1683 
1684 /* store weight changes */
1685  for(il=1; il<NET.Nlayer; il++)
1686  for(in=0; in<NET.Nneur[il]; in++)
1687  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1688  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1689  - w0[il][in][jn];
1690 
1691  for(il=1; il<NET.Nlayer; il++)
1692  for(in=0; in<NET.Nneur[il]; in++)
1693  free(w0[il][in]);
1694  for(il=1; il<NET.Nlayer; il++)
1695  free(w0[il]);
1696  free(w0);
1697 
1698  return(0);
1699 }
1700 
1701 
1702 /* extern "C"Dllexport */int FixedStep(dbl alpha)
1703 {
1704  dbl ***w0;
1705  int il, in, jn;
1706 
1707  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1708  for(il=1; il<NET.Nlayer; il++)
1709  {
1710  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1711  for(in=0; in<NET.Nneur[il]; in++)
1712  {
1713  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1714  sizeof(dbl));
1715  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1716  {
1717  w0[il][in][jn] = NET.Weights[il][in][jn];
1718  }
1719  }
1720  }
1721 
1722 
1723 /* set the weights */
1724  MLP_Line(w0,alpha);
1725 
1726 /* store weight changes */
1727  for(il=1; il<NET.Nlayer; il++)
1728  for(in=0; in<NET.Nneur[il]; in++)
1729  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1730  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1731  - w0[il][in][jn];
1732 
1733  for(il=1; il<NET.Nlayer; il++)
1734  for(in=0; in<NET.Nneur[il]; in++)
1735  free(w0[il][in]);
1736  for(il=1; il<NET.Nlayer; il++)
1737  free(w0[il]);
1738  free(w0);
1739 
1740  return(0);
1741 }
1742 
1743 /***********************************************************/
1744 /* MLP_Line */
1745 /* */
1746 /* sets the weights to a point along a line */
1747 /* */
1748 /* inputs: dbl ***w0 = initial point */
1749 /* dbl alpha = step length */
1750 /* */
1751 /* Author: B.Mansoulie 01-Jul-98 */
1752 /***********************************************************/
1753 
1754 /* extern "C"Dllexport */
1755 void MLP_Line(dbl ***w0, dbl alpha)
1756 {
1757  register int il,in,jn;
1758 
1759  for(il=1; il<NET.Nlayer; il++)
1760  for(in=0; in<NET.Nneur[il]; in++)
1761  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1762  NET.Weights[il][in][jn] = w0[il][in][jn]+
1763  alpha*dir[il][in][jn];
1764 
1765 }
1766 
1767 
1768 /***********************************************************/
1769 /* LineSearchHyb */
1770 /* */
1771 /* search along the line defined by dir */
1772 /* */
1773 /* outputs: dbl *alpmin = optimal step length */
1774 /* */
1775 /* Author: B.Mansoulie 01-Jul-98 */
1776 /***********************************************************/
1777 
1778 /* extern "C"Dllexport */
1779 int LineSearchHyb(dbl *alpmin, int *Ntest)
1780 {
1781  dbl ***w0;
1782  dbl alpha1, alpha2, alpha3;
1783  dbl err1, err2, err3;
1784  dbl tau;
1785  int icount, il, in, jn;
1786 
1787 /* char buf [50];
1788  sprintf (buf,"entree linesearchhyb\n");
1789  MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
1790 
1791  if(NET.Debug>=4){
1792  printf(" entry LineSearchHyb \n");
1793  }
1794  tau=LEARN.Tau;
1795 
1796 /* store weights before line search */
1797 
1798  *Ntest = 0;
1799  w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
1800  for(il=1; il<NET.Nlayer-1; il++)
1801  {
1802  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1803  for(in=0; in<NET.Nneur[il]; in++)
1804  {
1805  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1806  sizeof(dbl));
1807  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1808  {
1809  w0[il][in][jn] = NET.Weights[il][in][jn];
1810  }
1811  }
1812  }
1813 
1814 /* compute error(w0) */
1815  err1 = MLP_Test(0,1);
1816  (*Ntest) ++;
1817  if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);
1818 
1819  *alpmin = 0;
1820  alpha1 = 0;
1821 /* alpha2 = 0.05;
1822  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1823  alpha2 = LastAlpha;
1824  if(alpha2 < 0.01) alpha2 = 0.01;
1825  if(alpha2 > 2.0) alpha2 = 2.0;
1826  MLP_LineHyb(w0,alpha2);
1827  err2 = MLP_Test(0,1);
1828  (*Ntest) ++;
1829 
1830  alpha3 = alpha2;
1831  err3 = err2;
1832 
1833 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1834  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1835 
1836  if(err1>err2)
1837  {
1838  for(icount=1;icount<=100;icount++)
1839  {
1840  alpha3 = alpha3*tau;
1841  MLP_LineHyb(w0,alpha3);
1842  err3 = MLP_Test(0,1);
1843  (*Ntest) ++;
1844  if(err3>err2) break;
1845  alpha1 = alpha2;
1846  err1 = err2;
1847  alpha2 = alpha3;
1848  err2 = err3;
1849  }
1850  if(icount>=100) /* line search fails */
1851  {
1852  MLP_LineHyb(w0,0); /* reset weights */
1853  free(w0);
1854  return(1);
1855  }
1856  }
1857  else
1858  {
1859  for(icount=1;icount<=100;icount++)
1860  {
1861  alpha2 = alpha2/tau;
1862  MLP_LineHyb(w0,alpha2);
1863  err2 = MLP_Test(0,1);
1864  (*Ntest) ++;
1865  if(err1>err2) break;
1866  alpha3 = alpha2;
1867  err3 = err2;
1868  }
1869  if(icount>=100) /* line search fails */
1870  {
1871  MLP_LineHyb(w0,0); /* reset weights */
1872  free(w0);
1873  return(1);
1874  }
1875  }
1876 
1877 /* find bottom of parabola */
1878 
1879  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1880  -(err2-err1)/(alpha2-alpha1)));
1881  if(*alpmin>10000) *alpmin=10000;
1882 
1883 /* set the weights */
1884  MLP_LineHyb(w0,*alpmin);
1885  LastAlpha = *alpmin;
1886 
1887 /* store weight changes */
1888  for(il=1; il<NET.Nlayer-1; il++)
1889  for(in=0; in<NET.Nneur[il]; in++)
1890  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1891  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1892  - w0[il][in][jn];
1893 
1894  for(il=1; il<NET.Nlayer-1; il++)
1895  for(in=0; in<NET.Nneur[il]; in++)
1896  free(w0[il][in]);
1897  for(il=1; il<NET.Nlayer-1; il++)
1898  free(w0[il]);
1899  free(w0);
1900  if(NET.Debug>=4){
1901  printf(" exit LineSearchHyb \n");
1902  }
1903 
1904  return(0);
1905 }
1906 
1907 
1908 /***********************************************************/
1909 /* MLP_LineHyb */
1910 /* */
1911 /* sets the weights to a point along a line */
1912 /* (for hybrid methods) */
1913 /* */
1914 /* inputs: dbl ***w0 = initial point */
1915 /* dbl alpha = step length */
1916 /* */
1917 /* Author: B.Mansoulie 01-Jul-98 */
1918 /***********************************************************/
1919 
1920 /* extern "C"Dllexport */
1921 void MLP_LineHyb(dbl ***w0, dbl alpha)
1922 {
1923  int il,in,jn;
1924  for(il=1; il<NET.Nlayer-1; il++)
1925  for(in=0; in<NET.Nneur[il]; in++)
1926  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1927  {
1928  NET.Weights[il][in][jn] = w0[il][in][jn]+
1929  alpha*dir[il][in][jn];
1930  }
1931  MLP_ResLin();
1932 }
1933 
1934 
1935 /***********************************************************/
1936 /* SetLambda */
1937 /* */
1938 /* sets the coefficient of the regularisation term for */
1939 /* hybrid learning method */
1940 /* */
1941 /* input: double Wmax = typical maximum weight */
1942 /* */
1943 /* Author: J.Schwindling 13-Apr-99 */
1944 /***********************************************************/
1945 
1946 /* extern "C"Dllexport */
1947 void SetLambda(double Wmax)
1948 {
1949  dbl err;
1950  err = MLP_Test(0,0);
1951  LEARN.Alambda =
1952  LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
1953 }
1954 
1955 
1956 /***********************************************************/
1957 /* MLP_ResLin */
1958 /* */
1959 /* resolution of linear system of equations for hybrid */
1960 /* training method */
1961 /* */
1962 /* */
1963 /* Author: B.Mansoulie end-98 */
1964 /* Modified: J.Schwindling 29-APR-99 */
1965 /* use dgels from LAPACK */
1966 /***********************************************************/
1967 
1968 /* extern "C"Dllexport */
1970 {
1971 /* dbl rrans[NMAX], rrout[NMAX];*/
1972 /* type_pat rrin[NMAX];*/
1973  doublereal *HR,*dpat; //,*wlin,*SV;
1974  double err,lambda,lambda2;
1975  integer Nl,M,Nhr,khr,nrhs,iret,ierr;
1976  int il, in, inl, ipat;
1977  /*register dbl a;*/ //a unused
1978  char Trans = 'N';
1979 
1980 
1981 /* integer rank; */
1982 // doublereal rcond = -1; /* use machine precision */
1983 
1984  lambda2 = LEARN.Alambda;
1985 
1986 /* Nl = number of linear weights
1987  M = number of terms in linear system = number of examples + regularisation*/
1988  Nl = NET.Nneur[NET.Nlayer-2] + 1;
1989  M = PAT.Npat[0]+Nl;
1990 
1991  integer Lwork = 5 * M;
1992  doublereal *Work = (doublereal*) malloc((int) Lwork*sizeof(doublereal));
1993 
1994 /* memory allocation */
1995  dpat = (doublereal*) malloc((int) M*sizeof(doublereal));
1996 // wlin = (doublereal*) malloc((int) Nl*sizeof(doublereal));
1997 // SV = (doublereal*) malloc((int) Nl*sizeof(doublereal));
1998 
1999  Nhr = M * Nl;
2000  HR = (doublereal*) malloc((int) Nhr*sizeof(doublereal));
2001  err = 0.;
2002  for(ipat=0;ipat<PAT.Npat[0];ipat++)
2003  {
2004 /* *** Filling dpat and HR *** */
2005 /* for(in=0; in<NET.Nneur[0]; in++)
2006  {
2007  rrin[in] = PAT.Rin[0][ipat][in];
2008  }*/
2009 
2010  MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
2011 /* MLP_Out(rrin,rrout);*/
2012  /*for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
2013  {
2014  a = (dbl) PAT.Rans[0][ipat][in]; //a was not used
2015  } */
2016  il = NET.Nlayer-2;
2017  dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
2018  khr = ipat;
2019  HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
2020  for(in=0;in<NET.Nneur[il];in++)
2021  {
2022  khr = M *(in+1) + ipat;
2023  HR[khr] = NET.Outn[il][in]*
2024  (dbl) sqrt(PAT.Pond[0][ipat]);
2025  }
2026  }
2027  il = NET.Nlayer-2;
2028  lambda = sqrt(lambda2);
2029  for(ipat=0;ipat<=NET.Nneur[il];ipat++)
2030  {
2031  dpat[ipat+PAT.Npat[0]] = 0;
2032  for(in=0;in<=NET.Nneur[il];in++)
2033  {
2034  khr = M *in + ipat + PAT.Npat[0];
2035  HR[khr] = 0;
2036  if(in==ipat) HR[khr]=lambda;
2037  }
2038  }
2039  if(NET.Debug>=4)
2040  {
2041  err = MLP_Test(0,0);
2042  printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err);
2043  }
2044 /* */
2045 /* Trouve les poids lineaires par resolution lineaire */
2046 /* */
2047  nrhs = 1;
2048  ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
2049  &Lwork,&iret);
2050  if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
2051  if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
2052 
2053 /* ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
2054  &iret);
2055  if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
2056  if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
2057 
2058  il = NET.Nlayer-1;
2059  for (inl=0; inl<=NET.Nneur[il-1];inl++)
2060  {
2061  NET.Weights[il][0][inl] = dpat[inl];
2062  }
2063  if(NET.Debug>=4)
2064  {
2065  err = MLP_Test(0,0);
2066  printf("ResLin, apres tlsfor, err= %f\n",err);
2067  }
2068  free(Work);
2069  free(dpat);
2070 // free(wlin);
2071  free(HR);
2072 // free(SV);
2073 }
2074 
2075 /***********************************************************/
2076 /* EtaDecay */
2077 /* */
2078 /* decreases the learning parameter eta by the factor */
2079 /* LEARN.Decay */
2080 /* */
2081 /* Author: J.Schwindling 14-Apr-99 */
2082 /***********************************************************/
2083 
2084 void EtaDecay()
2085 {
2086  LEARN.eta *= LEARN.Decay;
2087 }
2088 
2089 
2090 /***********************************************************/
2091 /* ShuffleExamples */
2092 /* */
2093 /* Shuffles the learning examples (for stochastic method) */
2094 /* */
2095 /* Author: J.Schwindling 14-Apr-1999 */
2096 /* Modified: J.Schwindling 21-Jul-1999 inline MLP_Rand */
2097 /***********************************************************/
2098 
2099 /* extern "C"Dllexport */
2100 int ShuffleExamples(int n, int *index)
2101 {
2102  int i,ii,itmp;
2103  dbl a = (dbl) (n-1);
2104 
2105  for(i=0;i<n;i++)
2106  {
2107  ii = (int) MLP_Rand(0.,a);
2108  itmp = index[ii];
2109  index[ii] = index[i];
2110  index[i] = itmp;
2111  }
2112  return 0;
2113 }
2114 
2115 
2116 /***********************************************************/
2117 /* MLP_Rand */
2118 /* */
2119 /* Random Numbers (to initialize weights) */
2120 /* */
2121 /* inputs : dbl min, dbl max = random number between */
2122 /* min and max */
2123 /* return value: (double) random number */
2124 /* */
2125 /* Author: J.Schwindling 14-Apr-99 */
2126 /***********************************************************/
2127 
2128 /* extern "C"Dllexport */
2129 double MLP_Rand(dbl mini, dbl maxi)
2130 {
2131 return mini+(maxi-mini)*random()/RAND_MAX;
2132 }
2133 
2134 
2135 /***********************************************************/
2136 /* InitWeights */
2137 /* */
2138 /* initializes the weights to random numbers between */
2139 /* -0.5 : 0.5 */
2140 /* */
2141 /* Author: J.Schwindling 14-Apr-99 */
2142 /***********************************************************/
2143 
2144 /* extern "C"Dllexport */
2146 {
2147  int ilayer,ineur,i;
2148 
2149  for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
2150  for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
2151  for(i=0;i<=NET.Nneur[ilayer-1];i++)
2152  NET.Weights[ilayer][ineur][i]=
2153  (dbl) MLP_Rand(-0.5, 0.5);
2154 }
2155 
2156 
2157 /***********************************************************/
2158 /* PrintWeights */
2159 /* */
2160 /* Print weights on the screen */
2161 /* */
2162 /* Author: J.Schwindling 14-Apr-99 */
2163 /***********************************************************/
2164 
2165 /* extern "C"Dllexport */
2167 {
2168  int ilayer,ineur,i;
2169 
2170  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2171  {
2172  if(MessLang==1)
2173  {
2174  printf("Couche %d\n",ilayer);
2175  }
2176  else
2177  {
2178  printf("Layer %d\n",ilayer);
2179  }
2180  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2181  {
2182  if(MessLang==1)
2183  {
2184  printf("Neurone %d",ineur);
2185  }
2186  else
2187  {
2188  printf("Neuron %d",ineur);
2189  }
2190  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2191  {
2192  printf(" %f",
2193  (double) NET.Weights[ilayer][ineur][i]);
2194  }
2195  printf("\n");
2196  }
2197  printf("\n");
2198  }
2199 }
2200 
2201 
2202 /***********************************************************/
2203 /* ReadPatterns */
2204 /* */
2205 /* parser for learn.pat or test.pat files */
2206 /* */
2207 /* inputs: char *filename = name of the file to read */
2208 /* int ifile = 0: learning examples */
2209 /* 1: test examples */
2210 /* */
2211 /* outputs: int *inet = 1 if a network is defined */
2212 /* int *ilearn = 1 if a learning method is defined*/
2213 /* int *iexamples = 1 if examples are defined */
2214 /* */
2215 /* return value (int) = 0: no error */
2216 /* = -1: file could not be opened */
2217 /* */
2218 /* Author: J.Schwindling 20-Apr-99 */
2219 /* Modified: J.Schwindling 01-Jun-99 return inet, ilearn */
2220 /* iexamples */
2221 /* J.Schwindling 21-Sep-99 return value = -1 */
2222 /***********************************************************/
2223 
2224 #define CLEN 1024
2225 
2226 /* extern "C"Dllexport */
2227 int ReadPatterns(char *filename, int ifile,
2228  int *inet, int *ilearn, int *iexamples)
2229 {
2230 char s[CLEN], s2[CLEN], cc[6], cc2[6];
2231 char otherfile[CLEN];
2232 double p;
2233 //int line,i,j;
2234 int line,i;
2235 //int l,ll,ipat,nmax,il,in,tf;
2236 int l,ll,ipat,nmax;
2237 int np=0; /* nombre d'exemples */
2238 int nin=0; /* nombre d'entrees */
2239 int nout=0; /* nombre de sorties */
2240 int npon=0;
2241 int ntot, ierr;
2242 //char **ss;
2243 char **ss=0;
2244 FILE *LVQpat;
2245 int nlayer, nneur[NLMAX];
2246 
2247 printf("\nLoading file %s\n",filename);
2248 LVQpat=fopen(filename,"r");
2249 if(LVQpat == 0) return -1;
2250 
2251 line=0;
2252 
2253 while(fgets(s,CLEN,LVQpat))
2254  {
2255  if(*s=='N')
2256  {
2257  if(*(s+1)=='N') /* NNEU */
2258  {
2259  printf("Number of neurons %s",s);
2260  *inet = 1;
2261  sscanf(s,"%s %s",cc,s2);
2262  ierr = GetNetStructure(s2,&nlayer,nneur);
2263  if(ierr != 0) return ierr;
2264  ierr = MLP_SetNet(&nlayer,nneur);
2265  if(ierr != 0) return ierr;
2266  }
2267  else
2268  {
2269  sscanf(s,"%s %d",cc,&l);
2270  if(*(cc+1)=='P') /* NPAT */
2271  {
2272  np=l;
2273  printf("Number of patterns %d\n",np);
2274  }
2275  else if(*(cc+1)=='I') /* NINP */
2276  {
2277  nin=l;
2278  PAT.Nin = nin;
2279  printf("Number of inputs %d\n",nin);
2280  }
2281  else if(*(cc+1)=='O' && *(cc+2)=='U') /* NOUT */
2282  {
2283  nout=l;
2284  PAT.Nout = nout;
2285  printf("Number of outputs %d\n",nout);
2286  }
2287  else if(*(cc+1)=='O' && *(cc+2)=='R') /* NORM */
2288  {
2289  DIVERS.Norm=l;
2290  if(l==1) printf("Normalize inputs\n");
2291  }
2292 /* obsolete datacard NLAY */
2293  else if(*(cc+1)=='L')
2294  {
2295  printf("NLAY datacard is no longer needed\n");
2296  }
2297  else if(*(cc+1)=='E') /* NEPO */
2298  {
2299  LEARN.Nepoch=l;
2300  printf("Number of epochs %d\n",l);
2301  }
2302  else if(*(cc+1)=='R') /* NRES */
2303  {
2304  LEARN.Nreset=l;
2305  printf(
2306  "Reset to steepest descent every %d epochs\n",
2307  l);
2308  }
2309  }
2310  }
2311  else if(*s=='L')
2312  {
2313  if(*(s+1)=='P') /* LPAR */
2314  {
2315  sscanf(s,"%s %le",cc,&p);
2316  printf("Learning parameter %f\n",p);
2317  LEARN.eta = (dbl) p;
2318  }
2319  else if(*(s+1)=='M') /* LMET */
2320  {
2321  *ilearn = 1;
2322  sscanf(s,"%s %d",cc,&(LEARN.Meth));
2323  printf("Learning method = ");
2324  switch(LEARN.Meth)
2325  {
2326  case 1: printf("Stochastic Minimization\n");
2327  break;
2328  case 2: printf("Steepest descent with fixed step\n");
2329  break;
2330  case 3: printf("Steepest descent with line search\n"); break;
2331  case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
2332  case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
2333  break;
2334  case 6: printf("BFGS\n");
2335  break;
2336  case 7: printf("Hybrid BFGS-linear\n");
2337  break;
2338  default: printf("Error: unknown method\n"); break;
2339  }
2340 
2341  }
2342  else if(*(s+1)=='T') /* LTAU */
2343  {
2344  sscanf(s,"%s %lf",cc,&p);
2345  printf("Tau %f\n",p);
2346  LEARN.Tau = (dbl) p;
2347  }
2348  else if(*(s+1)=='A') /* LAMB */
2349  {
2350  sscanf(s,"%s %lf",cc,&p);
2351  printf("Lambda %f\n",p);
2352  LEARN.Lambda = (dbl) p;
2353  }
2354  }
2355  else if(*s=='F')
2356  {
2357  if(*(s+1)=='S') /* FSPO */
2358  {
2359  sscanf(s,"%s %le",cc,&p);
2360  printf("Flat spot elimination parameter %f\n",p);
2361  LEARN.delta = (dbl) p;
2362  }
2363  else if(*(s+1)=='I') /* FILE */
2364  {
2365  sscanf(s,"%s %s",cc,otherfile);
2366  ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
2367  if(ierr != 0) return ierr;
2368  }
2369  }
2370  else if(*s=='M') /* momentum */
2371  {
2372  sscanf(s,"%s %le",cc,&p);
2373  printf("Momentum term %f\n",p);
2374  LEARN.epsilon = (dbl) p;
2375  }
2376  else if(*s=='O') /* OUTx */
2377  {
2378  if(*(s+3)=='W') /* OUTW */
2379  {
2380  sscanf(s,"%s %d",cc,&OutputWeights);
2381  if(OutputWeights == 0)
2382  {
2383  printf("Never write file weights.out\n");
2384  }
2385  else if(OutputWeights == -1)
2386  {
2387  printf("Write weights to output file at the end\n");
2388  }
2389  else
2390  {
2391  printf("Write weights to file every %d epochs\n",
2392  OutputWeights);
2393  }
2394  }
2395  else if(*(s+3)=='F') /* OUTF */
2396  {
2397  sscanf(s,"%s %s",cc,cc2);
2398  if(*cc2=='F' || *cc2=='C')
2399  {
2400  DIVERS.Outf = *cc2;
2401  }
2402  else
2403  {
2404  printf(" *** Error while loading file %s at line %s :",
2405  filename,s);
2406  printf(" unknown language\n");
2407  }
2408  }
2409  else
2410  {
2411  printf(" *** Error while loading file %s at line %s\n",
2412  filename,s);
2413  }
2414  }
2415  else if(*s=='R') /* RDWT */
2416  {
2417  sscanf(s,"%s %d",cc,&(NET.Rdwt));
2418  if(NET.Rdwt == 0)
2419  {
2420  printf("Random weights \n");
2421  }
2422  else
2423  {
2424  printf("Read weights from file weights.in\n");
2425  }
2426  }
2427  else if(*s=='S') /* STAT */
2428  {
2429  sscanf(s,"%s %d",cc,&(DIVERS.Stat));
2430  }
2431 /* else if(*s=='T') TFUN
2432  {
2433  sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
2434  SetTransFunc(il,in,tf);
2435  } */
2436  else if(*s=='H') /* HESS */
2437  {
2438  sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
2439  }
2440  else if(*s=='D')
2441  {
2442  if(*(s+1)=='C') /* DCAY */
2443  {
2444  sscanf(s,"%s %le",cc,&p);
2445  LEARN.Decay = p;
2446  printf("Learning parameter decay %f\n",
2447  (double) LEARN.Decay);
2448  }
2449  if(*(s+1)=='B') /* DBIN */
2450  {
2451  sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
2452  printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
2453  }
2454  if(*(s+1)=='E') /* DEBU */
2455  {
2456  sscanf(s,"%s %d",cc,&(NET.Debug));
2457  printf("Debug mode %d\n",NET.Debug);
2458  }
2459  }
2460  else if(*s=='P') /* POND */
2461  {
2462  npon = CountLexemes(s);
2463  if(npon==2)
2464  {
2465  sscanf(s,"%s %d",cc,&(PAT.Iponde));
2466  }
2467  else
2468  {
2469  ss = (char**) malloc((npon+1)*sizeof(char*));
2470  for(i=0;i<=npon;i++)
2471  ss[i]=(char*) malloc(40*sizeof(char));
2472  getnLexemes(npon,s,ss);
2473  sscanf(ss[1],"%d",&(PAT.Iponde));
2474  for(i=2;i<npon;i++)
2475  {
2476  sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
2477  }
2478  }
2479  if(PAT.Iponde==0)
2480  {
2481  npon = 0;
2482  }
2483  else
2484  {
2485  npon = 1;
2486  }
2487  }
2488  else if(*s=='#') /* comments */
2489  {
2490  }
2491  else /* exemple itself */
2492  {
2493  if(np==0) return 1;
2494  if(nin==0) return 2;
2495  if(nout==0) return 3;
2496 
2497 
2498 /* store number of exemples and allocate memory*/
2499  if(line==0)
2500  {
2501  PAT.Npat[ifile] = np;
2502  ierr = AllocPatterns(ifile,np,nin,nout,0);
2503  if(ierr != 0) return ierr;
2504  *iexamples = 1;
2505  }
2506 
2507 /* now get exemple */
2508 
2509  line++;
2510  ll = (line-1)%2;
2511  ipat = (line-1)/2;
2512  /* printf("Loading event \t %d\r",ipat);*/
2513 /* if(ipat>NPMAX)
2514  {
2515  printf("Too many examples in file\n");
2516  printf("Loading %d examples\n",NPMAX);
2517  PAT.Npat[ifile] = NPMAX;
2518  break;
2519  }
2520 */
2521 
2522 /* allocate the number of lines */
2523 
2524  if(line==1)
2525  {
2526 
2527  nmax = nin;
2528  if(nout>nin) nmax=nout;
2529  ss = (char**) malloc((nmax+1)*sizeof(char*));
2530  if(ss == 0) return -111;
2531  for(i=0;i<=nmax;i++)
2532  {
2533  ss[i]=(char*) malloc(40*sizeof(char));
2534  if(ss[i] == 0) return -111;
2535  }
2536  }
2537 
2538  if(ll==0) /* inputs */
2539  {
2540  getnLexemes(nin,s,ss);
2541  for(i=0;i<nin;i++)
2542  {
2543  sscanf(ss[i],"%le",&p);
2544  PAT.Rin[ifile][ipat][i] = (type_pat) p;
2545  }
2546  }
2547  else /* answers */
2548  {
2549  ntot=nout+npon;
2550  getnLexemes(ntot,s,ss);
2551  for(i=0;i<ntot;i++)
2552  {
2553  sscanf(ss[i],"%le",&p);
2554  if(i<nout)
2555  {
2556  PAT.Rans[ifile][ipat][i] = (type_pat) p;
2557  }
2558  else
2559  {
2560  if(PAT.Iponde==1)
2561  {
2562  PAT.Pond[ifile][ipat] =
2563  (type_pat) p;
2564  }
2565  else
2566  {
2567  PAT.Pond[ifile][ipat] =
2568  (type_pat) PAT.Ponds[(int) p -1];
2569  }
2570  }
2571  }
2572  }
2573  }
2574  }
2575  printf("%d examples loaded \n\n",PAT.Npat[ifile]);
2576  fclose(LVQpat);
2577  return 0;
2578 }
2579 
2580 
2581 /* CountLexemes returns the number of lexemes in s separated by blanks.*/
2582 /* extern "C"Dllexport */
2583 int CountLexemes(char *s)
2584 {
2585  char tmp[1024];
2586  int i=0;
2587 
2588  strcpy(tmp,s);
2589  if (strtok(tmp," "))
2590  {
2591  i=1;
2592  while (strtok(NULL," ")) i++;
2593  }
2594  return i;
2595 }
2596 
2597 /* splits s in substrings ss separated by blanks*/
2598 /* extern "C"Dllexport */
2599 void getnLexemes(int n, char *s, char **ss)
2600 {
2601  char tmp[1024];
2602  int i;
2603  strcpy(tmp,s);
2604  if (n>0)
2605  {
2606  strcpy(ss[0],strtok(tmp," "));
2607  for (i=1;i<n;i++)
2608  strcpy(ss[i],strtok(NULL," "));
2609  }
2610 }
2611 
2612 /* extern "C"Dllexport */
2613 void getLexemes(char *s,char **ss)
2614 {
2615  char tmp[1024];
2616  int i,n;
2617 
2618  strcpy(tmp,s);
2619  n=CountLexemes(tmp);
2620  if (n>0)
2621  {
2622  strcpy(ss[0],strtok(tmp," "));
2623  for (i=1;i<n;i++)
2624  strcpy(ss[i],strtok(NULL," "));
2625  }
2626 }
2627 
2628 
2629 /***********************************************************/
2630 /* LearnFree */
2631 /* */
2632 /* frees memory allocated for learning */
2633 /* */
2634 /* Author: J.Schwindling 28-May-99 */
2635 /***********************************************************/
2636 
2637 /* extern "C"Dllexport */
2639 {
2640  int il,in;
2641  if(LearnMemory==0) return;
2642  LearnMemory = 0;
2643  for(il=0; il<NET.Nlayer; il++)
2644  {
2645  for(in=0; in<NET.Nneur[il]; in++)
2646  {
2647  free(dir[il][in]);
2648  }
2649  free(dir[il]);
2650  }
2651  free(dir);
2652  if(BFGSMemory==0) return;
2653  BFGSMemory = 0;
2654  for(il=0; il<NET.Nweights; il++)
2655  {
2656  free(BFGSH[il]);
2657  }
2658  free(BFGSH);
2659  free(Gamma);
2660  free(delta);
2661 
2662 /* if(JacobianMemory == 0) return;
2663  JacobianMemory = 0;
2664  for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
2665  free(JacobianMatrix); */
2666 }
2667 
2668 
2669 /***********************************************************/
2670 /* LearnAlloc */
2671 /* */
2672 /* memory allocation for vectors and matrices used in */
2673 /* conjugate gradients or BFGS like methods */
2674 /* */
2675 /* return value (int) = error code: 0 no error */
2676 /* -111 no memory */
2677 /* */
2678 /* Author: J.Schwindling 20-Apr-99 */
2679 /* Modified: J.Schwindling 31-Jan-2000 error code */
2680 /***********************************************************/
2681 
2682 /* extern "C"Dllexport */
2684 {
2685  int il,in,i;
2686  int Nweights = 0;
2687 
2688  if(LearnMemory != 0) LearnFree();
2689  LearnMemory = 1;
2690  dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
2691  if(dir == 0) return -111;
2692 
2693  for(il=0; il<NET.Nlayer; il++)
2694  {
2695  dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
2696  if(dir[il] == 0) return -111;
2697  for(in=0; in<NET.Nneur[il]; in++)
2698  {
2699  if(il==0)
2700  {
2701 /* TODO: understand implications of hard-coded 101 */
2702  dir[0][in] = (dbl *)
2703  malloc(101*sizeof(dbl));
2704  if(dir[0][in] == 0) return -111;
2705  }
2706  else
2707  {
2708  dir[il][in] = (dbl *)
2709  malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
2710  if(dir[il][in] == 0) return -111;
2711  Nweights += NET.Nneur[il-1]+1;
2712  }
2713  }
2714  }
2715  NET.Nweights = Nweights;
2716 
2717  if(BFGSMemory==0 && LEARN.Meth>= 6)
2718  {
2719  BFGSMemory = 1;
2720  Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
2721  delta = (dbl*) malloc(Nweights*sizeof(dbl));
2722  BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
2723  if(Gamma == 0 || delta == 0 || BFGSH == 0)
2724  return -111;
2725 
2726  for(i=0; i<Nweights; i++)
2727  {
2728  BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
2729  if(BFGSH[i] == 0) return -111;
2730  }
2731  }
2732 
2733 /* if(JacobianMemory==0)
2734  {
2735  JacobianMemory = 1;
2736  printf("JacobianMemory = %d\n",JacobianMemory);
2737  JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
2738  for(i=0; i<PAT.Npat[0]; i++)
2739  JacobianMatrix[i] =
2740  (dbl*) malloc(Nweights*sizeof(dbl));
2741  printf("end memory alloc\n");
2742  }
2743 
2744  if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
2745 
2746  return 0;
2747 }
2748 
2749 
2750 /***********************************************************/
2751 /* MLP_PrFFun */
2752 /* */
2753 /* writes the MLP function to file as a fortran function */
2754 /* */
2755 /* inputs : char *filename = name of the output file */
2756 /* */
2757 /* return value (int) = 0: no error */
2758 /* -1: could not open file */
2759 /* */
2760 /* Author: J.Schwindling 20-Apr-99 */
2761 /* Modified: J.Schwindling 05-May-99 */
2762 /* add normalization of inputs */
2763 /* J.Schwindling 30-Nov-99 return code */
2764 /***********************************************************/
2765 
2766 /* extern "C"Dllexport */
2768 {
2769  int il,in,jn;
2770  FILE *W;
2771 
2772  W=fopen(filename,"w");
2773  if(W==0) return -1;
2774  fprintf(W," SUBROUTINE RNNFUN(rin,rout)\n");
2775  fprintf(W," DIMENSION RIN(%d)\n",NET.Nneur[0]);
2776  fprintf(W," DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
2777  fprintf(W,"C\n");
2778 
2779  for(in=0; in<NET.Nneur[0]; in++)
2780  {
2781  if(DIVERS.Norm==0)
2782  {
2783  fprintf(W," OUT%d = RIN(%d)\n",in+1,in+1);
2784  }
2785  else
2786  {
2787  fprintf(W," OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
2788  STAT.mean[in],STAT.sigma[in]);
2789  }
2790  }
2791  for(il=1; il<NET.Nlayer-1; il++)
2792  {
2793  fprintf(W,"C\n");
2794  fprintf(W,"C layer %d\n",il+1);
2795  for(in=0; in<NET.Nneur[il]; in++)
2796  {
2797  fprintf(W," RIN%d = %e\n",in+1,
2798  (double) NET.Weights[il][in][0]);
2799  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2800  fprintf(W," > +(%e) * OUT%d\n",
2801  (double) NET.Weights[il][in][jn],jn);
2802  }
2803  fprintf(W,"C\n");
2804  for(in=0; in<NET.Nneur[il]; in++)
2805  {
2806  if(NET.T_func[il][in]==0)
2807  {
2808  fprintf(W," OUT%d = 0\n",in+1);
2809  }
2810  else if(NET.T_func[il][in]==1)
2811  {
2812  fprintf(W," OUT%d = RIN%d\n",in+1,in+1);
2813  }
2814  else if(NET.T_func[il][in]==2)
2815  {
2816  fprintf(W," OUT%d = SIGMOID(RIN%d)\n",
2817  in+1,in+1);
2818  }
2819  }
2820  }
2821  il = NET.Nlayer-1;
2822  fprintf(W,"C\n");
2823  fprintf(W,"C layer %d\n",il+1);
2824  for(in=0; in<NET.Nneur[il]; in++)
2825  {
2826  fprintf(W," RIN%d = %e\n",in+1,
2827  (double) NET.Weights[il][in][0]);
2828  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2829  fprintf(W," > +(%e) * OUT%d\n",
2830  (double) NET.Weights[il][in][jn],jn);
2831  }
2832  fprintf(W,"C\n");
2833  for(in=0; in<NET.Nneur[il]; in++)
2834  {
2835  if(NET.T_func[il][in]==0)
2836  {
2837  fprintf(W," ROUT(%d) = 0\n",in+1);
2838  }
2839  else if(NET.T_func[il][in]==1)
2840  {
2841  fprintf(W," ROUT(%d) = RIN%d\n",in+1,in+1);
2842  }
2843  else if(NET.T_func[il][in]==2)
2844  {
2845  fprintf(W," ROUT(%d) = SIGMOID(RIN%d)\n",
2846  in+1,in+1);
2847  }
2848  }
2849 
2850  fprintf(W,"C\n");
2851  fprintf(W," END\n");
2852  fprintf(W," REAL FUNCTION SIGMOID(X)\n");
2853  fprintf(W," SIGMOID = 1./(1.+EXP(-X))\n");
2854  fprintf(W," END\n");
2855 
2856  fclose(W);
2857  return 0;
2858 }
2859 
2860 
2861 /***********************************************************/
2862 /* MLP_PrCFun */
2863 /* */
2864 /* writes the MLP function to file as a C function */
2865 /* */
2866 /* inputs : char *filename = name of the output file */
2867 /* */
2868 /* return value (int) = 0: no error */
2869 /* -1: could not open file */
2870 /* */
2871 /* Author: J.Schwindling 23-Apr-99 */
2872 /* Modified: J.Schwindling 30-Nov-99 return code */
2873 /***********************************************************/
2874 
2875 /* extern "C"Dllexport */
2877 {
2878  int il,in,jn;
2879  FILE *W;
2880 
2881  W=fopen(filename,"w");
2882  if(W==0) return -1;
2883 
2884  fprintf(W,"double sigmoid(double x)\n");
2885  fprintf(W,"{\n");
2886  fprintf(W,"return 1/(1+exp(-x));\n");
2887  fprintf(W,"}\n");
2888  fprintf(W,"void rnnfun(double *rin,double *rout)\n");
2889  fprintf(W,"{\n");
2890  fprintf(W," double out1[%d];\n",NET.Nneur[0]);
2891  fprintf(W," double out2[%d];\n",NET.Nneur[1]);
2892  if(NET.Nlayer>=3) fprintf(W," double out3[%d];\n",NET.Nneur[2]);
2893  if(NET.Nlayer>=4) fprintf(W," double out4[%d];\n",NET.Nneur[3]);
2894  fprintf(W,"\n");
2895 
2896  for(in=0; in<NET.Nneur[0]; in++)
2897  {
2898  if(DIVERS.Norm==0)
2899  {
2900  fprintf(W," out1[%d] = rin[%d];\n",in,in);
2901  }
2902  else
2903  {
2904  fprintf(W," out1[%d] = (rin[%d]-%e)/%e;\n",
2905  in,in,
2906  STAT.mean[in],STAT.sigma[in]);
2907  }
2908  }
2909 
2910  for(il=1; il<=NET.Nlayer-1; il++)
2911  {
2912  fprintf(W,"\n");
2913  fprintf(W,"/* layer %d */\n",il+1);
2914  for(in=0; in<NET.Nneur[il]; in++)
2915  {
2916  fprintf(W," out%d[%d] = %e\n",il+1,in,
2917  (double) NET.Weights[il][in][0]);
2918  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2919  fprintf(W," +(%e) * out%d[%d]\n",
2920  (double) NET.Weights[il][in][jn],il,jn-1);
2921  fprintf(W," ;\n");
2922  }
2923  fprintf(W,"\n");
2924  for(in=0; in<NET.Nneur[il]; in++)
2925  {
2926  if(NET.T_func[il][in]==0)
2927  {
2928  fprintf(W," out%d[%d] = 0;\n",il+1,in);
2929  }
2930  else if(NET.T_func[il][in]==1)
2931  {
2932  }
2933  else if(NET.T_func[il][in]==2)
2934  {
2935  fprintf(W," out%d[%d] = sigmoid(out%d[%d]);\n",
2936  il+1,in,il+1,in);
2937  }
2938  }
2939  }
2940  il = NET.Nlayer-1;
2941  for(in=0; in<NET.Nneur[il]; in++)
2942  {
2943  fprintf(W," rout[%d] = out%d[%d];\n",in,il+1,in);
2944  }
2945  fprintf(W,"}\n");
2946  fclose(W);
2947  return 0;
2948 }
2949 
2950 
2951 /***********************************************************/
2952 /* SaveWeights */
2953 /* */
2954 /* writes the weights to file */
2955 /* */
2956 /* inputs : char *filename = name of the output file */
2957 /* int iepoch = epoch number */
2958 /* */
2959 /* return value (int): 0 if OK */
2960 /* -1 if file could not be opened */
2961 /* */
2962 /* Author: J.Schwindling 20-Apr-99 */
2963 /* Modified: J.Schwindling 11-Jun-99 */
2964 /* print net structure in header */
2965 /* Modified: J.Schwindling 05-Nov-99 */
2966 /* return error code */
2967 /***********************************************************/
2968 
2969 /* extern "C"Dllexport */
2970 int SaveWeights(char *filename, int iepoch)
2971 {
2972  FILE *W;
2973  int ilayer,ineur,i;
2974 
2975  W=fopen(filename,"w");
2976  if(W==0) return -1;
2977 
2978  fprintf(W,"# network structure ");
2979  for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
2980  {
2981  fprintf(W,"%d ",NET.Nneur[ilayer]);
2982  }
2983 
2984  fprintf(W,"\n %d\n",iepoch);
2985  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2986  {
2987  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2988  {
2989  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2990  {
2991  fprintf(W," %1.15e\n",
2992  (double) NET.Weights[ilayer][ineur][i]);
2993  }
2994  }
2995  }
2996  fclose(W);
2997  return 0;
2998 }
2999 
3000 
3001 /***********************************************************/
3002 /* LoadWeights */
3003 /* */
3004 /* reads the weights from file */
3005 /* */
3006 /* input : char *filename = name of the input file */
3007 /* output : int *iepoch = epoch number */
3008 /* */
3009 /* return value (int): 0 if OK */
3010 /* -1 if file could not be opened */
3011 /* */
3012 /* Author: J.Schwindling 20-Apr-99 */
3013 /* Modified: J.Schwindling 11-Jun-99 */
3014 /* lines starting with # are skipped */
3015 /* Modified: J.Schwindling 05-Nov-99 */
3016 /* return error code */
3017 /***********************************************************/
3018 
3019 /* extern "C"Dllexport */
3020 int LoadWeights(char *filename, int *iepoch)
3021 {
3022  FILE *W;
3023  int ilayer,ineur,i;
3024  double p;
3025  char s[80];
3026 
3027  W=fopen(filename,"r");
3028  if(W==0) return -1;
3029  do
3030  {
3031  fgets(s,80,W);
3032  }
3033  while(*s == '#');
3034  sscanf(s," %d",iepoch);
3035  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
3036  {
3037  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
3038  {
3039  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
3040  {
3041  fscanf(W," %le",&p);
3042  NET.Weights[ilayer][ineur][i] = (dbl) p;
3043  }
3044  }
3045  }
3046 
3047  fclose(W);
3048  return 0;
3049 }
3050 
3051 
3052 /***********************************************************/
3053 /* AllocPatterns */
3054 /* */
3055 /* allocate memory for the examples */
3056 /* */
3057 /* input : int ifile = file number (0 or 1) */
3058 /* int npat = number of examples */
3059 /* int nin = number of inputs */
3060 /* int nout = number of outputs */
3061 /* int iadd = 0: new examples */
3062 /* 1: add to existing ones */
3063 /* */
3064 /* return value (int) = error code: 0 = no error */
3065 /* 1 = wrong file number */
3066 /* -111 = no memory */
3067 /* */
3068 /* Author: J.Schwindling 21-Apr-99 */
3069 /* Modified: J.Schwindling 26-Apr-99 */
3070 /* - frees memory if already booked and iadd=0 */
3071 /* (should remove need to call mlpfree) */
3072 /* - implement iadd = 1 */
3073 /***********************************************************/
3074 
3075 /* extern "C"Dllexport */int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
3076 {
3077  int j;
3078  type_pat *tmp, *tmp3;
3079  type_pat **tmp2;
3080  int ntot;
3081 
3082  if(ifile>1 || ifile<0) return(1);
3083 /* scanf("%d",&j); */
3084  if(ExamplesMemory==0)
3085  {
3086  ExamplesMemory=1;
3087  PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
3088  PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
3089  PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
3090  PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
3091  if(PAT.Pond == 0 || PAT.Rin == 0
3092  || PAT.Rans == 0 || PAT.vRin == 0) return -111;
3093  }
3094 
3095 
3096 /* if iadd=0, check that memory not already allocated. Otherwise free it */
3097  if(iadd==0 && PatMemory[ifile]!=0)
3098  {
3099  FreePatterns(ifile);
3100  }
3101 
3102 /* allocate memory and initialize ponderations */
3103  if(iadd==0 || PatMemory[ifile]==0)
3104  {
3105  PatMemory[ifile] = 1;
3106  PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
3107  if(PAT.Pond[ifile] == 0) return -111;
3108  for(j=0; j<npat; j++)
3109  PAT.Pond[ifile][j] = 1;
3110 
3111  PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3112  if(PAT.Rin[ifile] == 0) return -111;
3113  PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3114  if(PAT.Rans[ifile] == 0) return -111;
3115 
3116  PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
3117  sizeof(type_pat));
3118  if(PAT.vRin[ifile] == 0) return -111;
3119 
3120  for(j=0; j<npat; j++)
3121  {
3122  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3123  PAT.vRin[ifile][j*(nin+1)] = 1;
3124  }
3125  for(j=0; j<npat; j++)
3126  {
3127  PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
3128  if(PAT.Rans[ifile][j] == 0) return -111;
3129  }
3130  PAT.Npat[ifile] = npat;
3131 
3132  if(ifile==0)
3133  {
3134  ExamplesIndex = (int *) malloc(npat*sizeof(int));
3135  if(ExamplesIndex == 0) return -111;
3136  for(j=0; j<npat; j++) ExamplesIndex[j] = j;
3137  }
3138  }
3139  else /* add examples */
3140  {
3141  ntot = PAT.Npat[ifile]+npat;
3142 
3143 /* event weighting */
3144  tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
3145  if(tmp == 0) return -111;
3146 
3147  for(j=0; j<PAT.Npat[ifile]; j++)
3148  {
3149  tmp[j] = PAT.Pond[ifile][j];
3150  }
3151  for(j=PAT.Npat[ifile];j<ntot;j++)
3152  {
3153  tmp[j] = 1;
3154  }
3155  if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
3156  PAT.Pond[ifile] = tmp;
3157 
3158 /* examples */
3159 /* tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3160  for(j=0; j<PAT.Npat[ifile]; j++)
3161  {
3162  tmp2[j] = PAT.Rin[ifile][j];
3163  }
3164  for(j=PAT.Npat[ifile];j<ntot;j++)
3165  {
3166  tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
3167  }
3168  if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
3169  PAT.Rin[ifile] = tmp2; */
3170 
3171  tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
3172  if(tmp3 == 0) return -111;
3173 
3174  for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
3175  {
3176  tmp3[j] = PAT.vRin[ifile][j];
3177  }
3178  if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
3179  PAT.vRin[ifile] = tmp3;
3180  for(j=0; j<ntot; j++)
3181  {
3182  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3183  PAT.vRin[ifile][j*(nin+1)] = 1;
3184  }
3185 
3186  tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3187  if(tmp2 == 0) return -111;
3188  for(j=0; j<PAT.Npat[ifile]; j++)
3189  {
3190  tmp2[j] = PAT.Rans[ifile][j];
3191  }
3192  for(j=PAT.Npat[ifile];j<ntot;j++)
3193  {
3194  tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
3195  if(tmp2[j] == 0) return -111;
3196  }
3197  if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
3198  PAT.Rans[ifile] = tmp2;
3199  PAT.Npat[ifile] = ntot;
3200  PatMemory[ifile] = 1;
3201 
3202 /* indices */
3203  if(ifile==0)
3204  {
3205  free(ExamplesIndex);
3206  ExamplesIndex = (int *) malloc(ntot*sizeof(int));
3207  if(ExamplesIndex == 0) return -111;
3208  for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
3209  }
3210  }
3211 
3212  return 0;
3213 }
3214 
3215 
3216 /***********************************************************/
3217 /* FreePatterns */
3218 /* */
3219 /* frees memory for the examples */
3220 /* */
3221 /* input : int ifile = file number (0 or 1) */
3222 /* */
3223 /* return value (int) = error code: 0 = no error */
3224 /* 1 = wrong file number */
3225 /* 2 = no mem allocated */
3226 /* */
3227 /* Author: J.Schwindling 26-Apr-99 */
3228 /***********************************************************/
3229 
3230 /* extern "C"Dllexport */int FreePatterns(int ifile)
3231 {
3232  int i;
3233 
3234  if(ifile>1 || ifile<0) return 1;
3235 /* printf("%d %d \n",ifile,PatMemory[ifile]);*/
3236  if(PatMemory[ifile]==0) return 2;
3237 
3238  free(PAT.Pond[ifile]);
3239  for(i=0; i<PAT.Npat[ifile]; i++)
3240  {
3241 /* free(PAT.Rin[ifile][i]); */
3242  free(PAT.Rans[ifile][i]);
3243  }
3244  free(PAT.Rin[ifile]);
3245  free(PAT.Rans[ifile]);
3246  free(PAT.vRin[ifile]);
3247  PatMemory[ifile] = 0;
3248  PAT.Npat[ifile] = 0;
3249 
3250  return 0;
3251 }
3252 
3253 
3254 /***********************************************************/
3255 /* MLP_StatInputs */
3256 /* */
3257 /* compute statistics about the inputs: mean, RMS, min, max*/
3258 /* */
3259 /* inputs: int Nexamples = number of examples */
3260 /* int Niputs = number of quantities */
3261 /* type_pat **inputs = input values */
3262 /* */
3263 /* outputs: dbl *mean = mean value */
3264 /* dbl *sigma = RMS */
3265 /* dbl *minimum = minimum */
3266 /* dbl *maximum = maximum */
3267 /* */
3268 /* return value (int): always = 0 */
3269 /* */
3270 /* Author: J.Schwindling 11-Oct-99 */
3271 /***********************************************************/
3272 
3273 int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs,
3274  dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
3275 {
3276  dbl *fmean;
3277  int j, ipat, nmax;
3278 
3279 /* first compute a fast rough mean using the first 100 events */
3280  fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
3281  nmax = 100;
3282  if(Nexamples<100) nmax=Nexamples;
3283 
3284  for(j=0;j<Ninputs;j++)
3285  {
3286  fmean[j] = 0;
3287  for(ipat=0;ipat<nmax;ipat++)
3288  {
3289  fmean[j] += (dbl) inputs[ipat][j];
3290  }
3291  fmean[j] = fmean[j]/(dbl) nmax;
3292 
3293 /* now compute real mean and sigma, min and max */
3294  mean[j] = 0;
3295  sigma[j] = 0;
3296  minimum[j] = 99999;
3297  maximum[j] = -99999;
3298  for(ipat=0;ipat<Nexamples;ipat++)
3299  {
3300  mean[j] += (dbl) inputs[ipat][j];
3301  sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
3302  ((dbl) inputs[ipat][j]-fmean[j]);
3303  if((dbl) inputs[ipat][j] > maximum[j])
3304  maximum[j]=(dbl) inputs[ipat][j];
3305  if((dbl) inputs[ipat][j] < minimum[j])
3306  minimum[j]=(dbl) inputs[ipat][j];
3307  }
3308  mean[j] = mean[j]/(dbl) Nexamples;
3309  sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples -
3310  (mean[j]-fmean[j])*
3311  (mean[j]-fmean[j]));
3312  }
3313  free(fmean);
3314  return 0;
3315 }
3316 
3317 /***********************************************************/
3318 /* MLP_PrintInputStat */
3319 /* */
3320 /* prints statistics about the inputs: mean, RMS, min, max */
3321 /* */
3322 /* return value (int) = error code: 0 = OK */
3323 /* -111 = could not */
3324 /* allocate memory */
3325 /* */
3326 /* Author: J.Schwindling 11-Oct-99 */
3327 /* Modified: J.Schwindling 31-Jan-2000: return value */
3328 /***********************************************************/
3329 
3331 {
3332  int j;
3333  dbl *mean, *sigma, *minimum, *maximum;
3334 
3335 /* allocate memory */
3336  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3337  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3338  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3339  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3340 
3341  if(mean == 0 || sigma == 0 || minimum == 0
3342  || maximum == 0) return -111;
3343 
3344  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
3345  mean,sigma,minimum,maximum);
3346 
3347  printf("\t mean \t\t RMS \t\t min \t\t max\n");
3348  for(j=0;j<NET.Nneur[0];j++)
3349  {
3350  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3351  mean[j],sigma[j],minimum[j],maximum[j]);
3352  }
3353 
3354  free(mean);
3355  free(sigma);
3356  free(minimum);
3357  free(maximum);
3358  printf("\n");
3359  return 0;
3360 }
3361 
3362 
3363 /***********************************************************/
3364 /* NormalizeInputs */
3365 /* */
3366 /* normalize the inputs: I' = (I - <I>) / RMS(I) */
3367 /* */
3368 /* return value (int) = error code: 0 = OK */
3369 /* -111 = could not */
3370 /* allocate memory */
3371 /* */
3372 /* Author: J.Schwindling 04-May-1999 */
3373 /* Modified: J.Schwindling 31-Jan-2000: return value */
3374 /***********************************************************/
3375 
3376 /* extern "C"Dllexport */int NormalizeInputs()
3377 {
3378  int j, ipat;
3379  dbl *mean, *sigma, *minimum, *maximum;
3380 
3381 /* allocate memory */
3382  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3383  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3384  STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3385  STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3386  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3387  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3388 
3389  if(mean == 0 || sigma == 0 || minimum == 0
3390  || maximum == 0 || STAT.mean == 0 ||
3391  STAT.sigma == 0) return -111;
3392 
3393  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
3394  mean,sigma,minimum,maximum);
3395 
3396  if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
3397  for(j=0;j<NET.Nneur[0];j++)
3398  {
3399  if(NET.Debug>=1)
3400  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3401  mean[j],sigma[j],minimum[j],maximum[j]);
3402 
3403 /* store mean and sigma for output function */
3404  STAT.mean[j] = mean[j];
3405  STAT.sigma[j] = sigma[j];
3406 
3407 /* finally apply the normalization */
3408  for(ipat=0;ipat<PAT.Npat[0];ipat++)
3409  {
3410  PAT.Rin[0][ipat][j] =
3411  (PAT.Rin[0][ipat][j]-(float) mean[j])/
3412  (float) sigma[j];
3413  }
3414  for(ipat=0;ipat<PAT.Npat[1];ipat++)
3415  {
3416  PAT.Rin[1][ipat][j] =
3417  (PAT.Rin[1][ipat][j]-(float) mean[j])/
3418  (float) sigma[j];
3419  }
3420  }
3421 
3422  free(mean);
3423  free(sigma);
3424  free(minimum);
3425  free(maximum);
3426  if(NET.Debug>=1) printf("\n");
3427  return 0;
3428 }
3429 
3430 
3431 /***********************************************************/
3432 /* AllocNetwork */
3433 /* */
3434 /* memory allocation for weights, etc */
3435 /* */
3436 /* inputs: int Nlayer: number of layers */
3437 /* int *Neurons: nulber of neurons per layer */
3438 /* */
3439 /* return value (int): error = 0: no error */
3440 /* = -111: could not allocate mem*/
3441 /* */
3442 /* Author: J.Schwindling 28-Sep-99 */
3443 /***********************************************************/
3444 
3445 int AllocNetwork(int Nlayer, int *Neurons)
3446 {
3447  int i, j, k, l;
3448 
3449  if(NetMemory != 0) FreeNetwork();
3450  NetMemory = 1;
3451 
3452  NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
3453  if(NET.Nneur == 0) return -111;
3454 
3455  NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
3456  NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
3457  NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3458  NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3459  NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
3460  if(NET.T_func == 0 || NET.Deriv1 == 0
3461  || NET.Inn == 0 || NET.Outn == 0
3462  || NET.Delta == 0) return -111;
3463 
3464  for(i=0; i<Nlayer; i++)
3465  {
3466  NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
3467  NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3468  NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3469  NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3470  NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3471  if(NET.T_func[i] == 0 || NET.Deriv1[i] == 0
3472  || NET.Inn[i] == 0 || NET.Outn[i] == 0
3473  || NET.Delta[i] ==0 ) return -111;
3474  }
3475 
3476  NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3477  NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
3478  LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3479  LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3480  LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3481  if(NET.Weights == 0 || NET.vWeights == 0
3482  || LEARN.Odw == 0 || LEARN.ODeDw == 0
3483  || LEARN.DeDw == 0) return -111;
3484 
3485  for(i=1; i<Nlayer; i++)
3486  {
3487  k = Neurons[i-1]+1;
3488  NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
3489  sizeof(dbl));
3490  NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3491  LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3492  LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3493  LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3494  if(NET.Weights[i] == 0 || NET.vWeights[i] == 0
3495  || LEARN.Odw[i] == 0 || LEARN.ODeDw[i] == 0
3496  || LEARN.DeDw[i] == 0) return -111;
3497 
3498  for(j=0; j<Neurons[i]; j++)
3499  {
3500  NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
3501  LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3502  LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3503  LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3504  if(LEARN.Odw[i][j] == 0
3505  || LEARN.ODeDw[i][j] == 0
3506  || LEARN.DeDw[i][j] == 0) return -111;
3507 
3508  for(l=0; l<k; l++)
3509  {
3510  LEARN.Odw[i][j][l] = 0;
3511  LEARN.ODeDw[i][j][l] = 0;
3512  }
3513  }
3514  }
3515  return 0;
3516 }
3517 
3518 
3519 /***********************************************************/
3520 /* FreeNetwork */
3521 /* */
3522 /* frees the memory allocated for the network */
3523 /* */
3524 /* Author: J.Schwindling 06-Oct-99 */
3525 /***********************************************************/
3526 
3528 {
3529  int i, j;
3530  for(i=1; i<NET.Nlayer; i++)
3531  {
3532  for(j=0; j<NET.Nneur[i]; j++)
3533  {
3534 /* free(NET.Weights[i][j]); */
3535  free(LEARN.Odw[i][j]);
3536  free(LEARN.ODeDw[i][j]);
3537  free(LEARN.DeDw[i][j]);
3538  }
3539  free(NET.vWeights[i]);
3540  free(NET.Weights[i]);
3541  free(LEARN.Odw[i]);
3542  free(LEARN.ODeDw[i]);
3543  free(LEARN.DeDw[i]);
3544  }
3545  free(NET.Weights);
3546  free(LEARN.Odw);
3547  free(LEARN.ODeDw);
3548  free(LEARN.DeDw);
3549 
3550  free(NET.Nneur);
3551 
3552  for(i=0; i<NET.Nlayer; i++)
3553  {
3554  free(NET.T_func[i]);
3555  free(NET.Deriv1[i]);
3556  free(NET.Inn[i]);
3557  free(NET.Outn[i]);
3558  free(NET.Delta[i]);
3559  }
3560  free(NET.T_func);
3561  free(NET.Deriv1);
3562  free(NET.Inn);
3563  free(NET.Outn);
3564  free(NET.Delta);
3565 
3566  NetMemory = 0;
3567 }
3568 
3569 /***********************************************************/
3570 /* GetNetStructure */
3571 /* */
3572 /* given a strinng like "3,4,1" returns the network */
3573 /* structure */
3574 /* */
3575 /* inputs: char *s: input string */
3576 /* */
3577 /* outputs: int *Nlayer: number of layers */
3578 /* int *Nneur: number of neurons per layer */
3579 /* */
3580 /* return value (int): error = 0: no error */
3581 /* = -1: s is empty */
3582 /* = -2: s is too long */
3583 /* = -3: too many layers */
3584 /* */
3585 /* Author: J.Schwindling 04-Oct-99 */
3586 /***********************************************************/
3587 
3588 int GetNetStructure(char *s, int *Nlayer, int *Nneur)
3589 {
3590  int i=0;
3591  char tmp[1024];
3592 
3593  if(strlen(s)==0) return -1;
3594  if(strlen(s)>1024) return -2;
3595 
3596  strcpy(tmp,s);
3597  if (strtok(tmp,","))
3598  {
3599  i=1;
3600  while (strtok(NULL,",")) i++;
3601  }
3602  *Nlayer = i;
3603  if(i > NLMAX) return -3;
3604 
3605  strcpy(tmp,s);
3606  if (*Nlayer>0)
3607  {
3608  sscanf(strtok(tmp,","),"%d",&(Nneur[0]));
3609  for (i=1;i<*Nlayer;i++)
3610  sscanf(strtok(NULL,","),"%d",&(Nneur[i]));
3611  }
3612 
3613  return 0;
3614 }
3615 
3616 
3617 /***********************************************************/
3618 /* MLP_SetNet */
3619 /* */
3620 /* to set the structure of a neural network */
3621 /* inputs: int *nl = number of layers */
3622 /* int *nn = number of neurons */
3623 /* */
3624 /* return value (int) = error value: */
3625 /* 0: no error */
3626 /* 1: N layers > NLMAX */
3627 /* 2: N layers < 2 */
3628 /* -111: Error allocating memory */
3629 /* */
3630 /* Author: J.Schwindling 14-Apr-99 */
3631 /* Modified: J.Schwindling 05-Oct-99 allocate memory */
3632 /* Modified: J.Schwindling 29-Nov-99 LearnFree, LearnAlloc */
3633 /***********************************************************/
3634 
3635 int MLP_SetNet(int *nl, int *nn)
3636 {
3637  int il,ierr;
3638 
3639  if((*nl)>NLMAX) return(1);
3640  if((*nl)<2) return(2);
3641 
3642 /* LearnFree(); */
3643 /* allocate memory */
3644  ierr = AllocNetwork(*nl,nn);
3645  if(ierr != 0) return ierr;
3646 
3647 /* set number of layers */
3648  NET.Nlayer = (int) *nl;
3649 
3650 /* set number of neurons */
3651  for(il=0; il<NET.Nlayer; il++) {
3652  NET.Nneur[il] = nn[il];
3653  }
3654 
3655 /* set transfer functions */
3656  SetDefaultFuncs();
3657 /* LearnAlloc(); */
3658 
3659  return(0);
3660 }
3661 
3662 
3663 /***********************************************************/
3664 /* MLP_MatrixVectorBias */
3665 /* */
3666 /* computes a Matrix-Vector product */
3667 /* r[j] = M[j][0] + Sum_i M[j][i] v[i] */
3668 /* */
3669 /* inputs: dbl *M = matrix (n lines, m+1 columns) */
3670 /* dbl *v = vector (dimension m) */
3671 /* dbl *r = resulting vector (dimension n) */
3672 /* int n */
3673 /* int m */
3674 /* */
3675 /* Author: J.Schwindling 24-Jan-00 */
3676 /***********************************************************/
3677 
3678 void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
3679 {
3680  int i,j;
3681  register dbl a1, a2, a3, a4, c, d;
3682  dbl *pM1 = M;
3683  dbl *pM2 = &(M[m+1]);
3684  dbl *pM3 = &(M[2*(m+1)]);
3685  dbl *pM4 = &(M[3*(m+1)]);
3686  dbl *pr = r;
3687  int mp1 = m+1;
3688 
3689  for(i=0; i<n-3;
3690  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3691  pr+=4)
3692  {
3693  a1 = *pM1;
3694  a2 = *pM2;
3695  a3 = *pM3;
3696  a4 = *pM4;
3697  pM1++; pM2++; pM3++; pM4++;
3698  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3699  {
3700  c = v[j];
3701  d = v[j+1];
3702  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3703  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3704  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3705  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3706  }
3707  for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3708  {
3709  c = v[j];
3710  a1 = a1 + *pM1 * c;
3711  a2 = a2 + *pM2 * c;
3712  a3 = a3 + *pM3 * c;
3713  a4 = a4 + *pM4 * c;
3714  }
3715  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3716  }
3717  for(i=i; i<n; i++)
3718  {
3719  pM1 = &(M[i*(m+1)]);
3720  a1 = *pM1;
3721  pM1++;
3722  for(j=0; j<m; j++, pM1++)
3723  {
3724  a1 = a1 + *pM1 * v[j];
3725  }
3726  r[i] = a1;
3727  }
3728 }
3729 /***********************************************************/
3730 /* MLP_MatrixVector */
3731 /* */
3732 /* computes a Matrix-Vector product */
3733 /* r[j] = Sum_i M[j][i] v[i] */
3734 /* */
3735 /* inputs: dbl *M = matrix (n lines, m+1 columns) */
3736 /* dbl *v = vector (dimension m) */
3737 /* dbl *r = resulting vector (dimension n) */
3738 /* int n */
3739 /* int m */
3740 /* */
3741 /* Author: J.Schwindling 24-Jan-00 */
3742 /***********************************************************/
3743 
3744 void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
3745 {
3746  int i,j;
3747  register dbl a1, a2, a3, a4, c, d;
3748  dbl *pM1 = M;
3749  dbl *pM2 = &(M[m]);
3750  dbl *pM3 = &(M[2*m]);
3751  dbl *pM4 = &(M[3*m]);
3752  dbl *pr = r;
3753  int mp1 = m;
3754 
3755  for(i=0; i<n-3;
3756  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3757  pr+=4)
3758  {
3759  a1 = 0;
3760  a2 = 0;
3761  a3 = 0;
3762  a4 = 0;
3763  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3764  {
3765  c = v[j];
3766  d = v[j+1];
3767  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3768  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3769  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3770  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3771  }
3772  for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3773  {
3774  c = v[j];
3775  a1 = a1 + *pM1 * c;
3776  a2 = a2 + *pM2 * c;
3777  a3 = a3 + *pM3 * c;
3778  a4 = a4 + *pM4 * c;
3779  }
3780  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3781  }
3782  for(i=i; i<n; i++)
3783  {
3784  pM1 = &(M[i*m]);
3785  a1 = 0;
3786  for(j=0; j<m; j++, pM1++)
3787  {
3788  a1 = a1 + *pM1 * v[j];
3789  }
3790  r[i] = a1;
3791  }
3792 }
3793 
3794 
3795 /***********************************************************/
3796 /* MLP_MM2rows */
3797 /* */
3798 /* computes a Matrix-Matrix product, with the first matrix */
3799 /* having 2 rows */
3800 /* */
3801 /* inputs: dbl *c = resulting matrix (Nj * Nk) */
3802 /* dbl *a = first matrix (Ni * Nj) */
3803 /* dbl *b = second matrix (Nj * Nk) */
3804 /* int Ni */
3805 /* int Nj */
3806 /* int Nk */
3807 /* int NaOffs */
3808 /* int NbOffs */
3809 /* */
3810 /* Author: J.Schwindling 24-Jan-00 */
3811 /***********************************************************/
3812 
3814  int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
3815 {
3816 //int i,j,k;
3817 int j,k;
3818 dbl s00,s01,s10,s11;
3819 type_pat *pa0,*pa1;
3820 dbl *pb0,*pb1,*pc0,*pc1;
3821 
3822  for (j=0; j<=Nj-2; j+=2)
3823  {
3824  pc0 = c+j;
3825  pc1 = c+j+Nj;
3826  s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
3827 
3828  for (k=0,pb0=b+k+NbOffs*j,
3829  pb1=b+k+NbOffs*(j+1),
3830  pa0=a+k,
3831  pa1=a+k+NaOffs;
3832  k<Nk;
3833  k++,pa0++,
3834  pa1++,
3835  pb0++,
3836  pb1++)
3837  {
3838  s00 += (*pa0)*(*pb0);
3839  s01 += (*pa0)*(*pb1);
3840  s10 += (*pa1)*(*pb0);
3841  s11 += (*pa1)*(*pb1);
3842  }
3843  *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
3844  }
3845  for (j=j; j<Nj; j++)
3846  {
3847  pc0 = c+j;
3848  pc1 = c+j+Nj;
3849  s00 = 0.0; s10 = 0.0;
3850  for (k=0,pb0=b+k+NbOffs*j,
3851  pa0=a+k,
3852  pa1=a+k+NaOffs;
3853  k<Nk;
3854  k++,pa0++,
3855  pa1++,
3856  pb0++)
3857  {
3858  s00 += (*pa0)*(*pb0);
3859  s10 += (*pa1)*(*pb0);
3860  }
3861  *pc0 = s00; *pc1 = s10;
3862  }
3863 }
3864 
3865 #ifdef __cplusplus
3866 } // extern "C"
3867 #endif
const double beta
dbl * delta
Definition: mlp_gen.cc:36
dbl MLP_Epoch(int iepoch, dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:709
int i
Definition: DBlmapReader.cc:9
int MessLang
Definition: mlp_gen.cc:22
float alpha
Definition: AMPTWrapper.h:95
Definition: mlp_gen.h:56
void getnLexemes(int n, char *s, char **ss)
Definition: mlp_gen.cc:2599
int JacobianMemory
Definition: mlp_gen.cc:28
Definition: mlp_gen.h:28
dbl MLP_Test_MM(int ifile, dbl *tmp)
Definition: mlp_gen.cc:266
void LearnFree()
Definition: mlp_gen.cc:2638
dbl * sigma
Definition: mlp_gen.h:58
dbl DeDwProd()
Definition: mlp_gen.cc:1023
int ReadPatterns(char *filename, int ifile, int *inet, int *ilearn, int *iexamples)
Definition: mlp_gen.cc:2227
void CGDir(dbl beta)
Definition: mlp_gen.cc:1269
int FixedStep(dbl alpha)
Definition: mlp_gen.cc:1702
void DeDwSaveZero()
Definition: mlp_gen.cc:1100
int DecreaseSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1614
void InitWeights()
Definition: mlp_gen.cc:2145
Definition: mlp_gen.h:38
int OutputWeights
Definition: mlp_gen.cc:23
int * ExamplesIndex
Definition: mlp_gen.cc:40
long int integer
Definition: mlp_lapack.h:12
int SaveWeights(char *filename, int iepoch)
Definition: mlp_gen.cc:2970
#define NULL
Definition: scimark2.h:8
int NLineSearchFail
Definition: mlp_gen.cc:33
void MLP_Out(type_pat *rrin, dbl *rrout)
Definition: mlp_gen.cc:63
double doublereal
Definition: mlp_lapack.h:17
#define NLMAX
Definition: mlp_gen.cc:14
#define STAT
Definition: mlp_gen.h:60
TRandom random
Definition: MVATrainer.cc:138
T eta() const
int MLP_SetNet(int *nl, int *nn)
Definition: mlp_gen.cc:3635
int ii
Definition: cuy.py:588
int LearnMemory
Definition: mlp_gen.cc:29
void EtaDecay()
Definition: mlp_gen.cc:2084
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e g
Definition: Activities.doc:4
#define PAT
Definition: mlp_gen.h:45
dbl ** BFGSH
Definition: mlp_gen.cc:37
int MLP_PrCFun(char *filename)
Definition: mlp_gen.cc:2876
tuple s2
Definition: indexGen.py:106
int nin
float MLPfitVersion
Definition: mlp_gen.cc:31
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3273
void SetLambda(double Wmax)
Definition: mlp_gen.cc:1947
void SteepestDir()
Definition: mlp_gen.cc:1247
int GetBFGSH(int Nweights)
Definition: mlp_gen.cc:1421
int BFGSMemory
Definition: mlp_gen.cc:27
int np
Definition: AMPTWrapper.h:33
int GetNetStructure(char *s, int *Nlayer, int *Nneur)
Definition: mlp_gen.cc:3588
void PrintWeights()
Definition: mlp_gen.cc:2166
void MLP_Out_T(type_pat *rrin)
Definition: mlp_gen.cc:115
int StochStep()
Definition: mlp_gen.cc:968
int LearnAlloc()
Definition: mlp_gen.cc:2683
dbl MLP_Stochastic()
Definition: mlp_gen.cc:521
T sqrt(T t)
Definition: SSEVec.h:48
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
double type_pat
Definition: mlp_gen.h:13
int MLP_Train(int *ipat, dbl *err)
Definition: mlp_gen.cc:898
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2100
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2129
dbl DeDwNorm()
Definition: mlp_gen.cc:1001
int DeDwSum(type_pat *ans, dbl *out, int ipat)
Definition: mlp_gen.cc:1126
dbl LastAlpha
Definition: mlp_gen.cc:32
int j
Definition: DBlmapReader.cc:9
int CountLexemes(char *s)
Definition: mlp_gen.cc:2583
struct net_ net_ MLP_HIDDEN
Definition: mlp_gen.cc:16
int NetMemory
Definition: mlp_gen.cc:30
int LoadWeights(char *filename, int *iepoch)
Definition: mlp_gen.cc:3020
void MLP_LineHyb(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1921
Definition: mlp_gen.h:16
int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
Definition: mlp_gen.cc:3075
dbl ** JacobianMatrix
Definition: mlp_gen.cc:39
#define DIVERS
Definition: mlp_gen.h:54
int k[5][pyjets_maxn]
int SetTransFunc(int layer, int neuron, int func)
Definition: mlp_gen.cc:1203
tuple out
Definition: dbtoconf.py:99
int StochStepHyb()
Definition: mlp_gen.cc:929
void InitBFGSH(int Nweights)
Definition: mlp_gen.cc:1391
void GetGammaDelta()
Definition: mlp_gen.cc:1316
int PatMemory[2]
Definition: mlp_gen.cc:26
void MLP_ResLin()
Definition: mlp_gen.cc:1969
void MLP_MM2rows(dbl *c, type_pat *a, dbl *b, int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
Definition: mlp_gen.cc:3813
void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3744
void DeDwScale(int Nexamples)
Definition: mlp_gen.cc:1063
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1755
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3678
void getLexemes(char *s, char **ss)
Definition: mlp_gen.cc:2613
#define LEARN
Definition: mlp_gen.h:36
int dgels_(char *trans, integer *m, integer *n, integer *nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *work, integer *lwork, integer *info)
double b
Definition: hdecay.h:120
void MLP_vSigmoide(dbl *x, int n)
void FreeNetwork()
Definition: mlp_gen.cc:3527
int NormalizeInputs()
Definition: mlp_gen.cc:3376
int ExamplesMemory
Definition: mlp_gen.cc:24
int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1476
void DeDwSave()
Definition: mlp_gen.cc:1081
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int nout
void BFGSdir(int Nweights)
Definition: mlp_gen.cc:1343
double a
Definition: hdecay.h:121
tuple filename
Definition: lut2db_cfg.py:20
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
dbl MLP_Sigmoide(dbl x)
Definition: mlp_sigmoide.cc:27
int WeightsMemory
Definition: mlp_gen.cc:25
int MLP_PrintInputStat()
Definition: mlp_gen.cc:3330
#define CLEN
Definition: mlp_gen.cc:2224
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:186
void SetDefaultFuncs()
Definition: mlp_gen.cc:1226
int FreePatterns(int ifile)
Definition: mlp_gen.cc:3230
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:449
int MLP_PrFFun(char *filename)
Definition: mlp_gen.cc:2767
void DeDwZero()
Definition: mlp_gen.cc:1044
int AllocNetwork(int Nlayer, int *Neurons)
Definition: mlp_gen.cc:3445
dbl ** Hessian
Definition: mlp_gen.cc:41
int LineSearchHyb(dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:1779
dbl DerivDir()
Definition: mlp_gen.cc:1291