CMS 3D CMS Logo

mlp_gen.cc
Go to the documentation of this file.
1 #include <cmath>
2 #include <cstring>
3 #include <cstdio>
4 #include <cstdlib>
5 
6 #include "mlp_gen.h"
7 #include "mlp_sigmoide.h"
8 
9 #ifdef __cplusplus
10 extern "C" {
11 #endif
12 
13 #define NPMAX 100000
14 #define NLMAX 1000
15 
17 struct learn_ learn_ MLP_HIDDEN;
18 struct pat_ pat_ MLP_HIDDEN;
20 struct stat_ stat_ MLP_HIDDEN;
21 
22 int MessLang = 0;
23 int OutputWeights = 100;
25 int WeightsMemory = 0;
26 int PatMemory[2] = {0,0};
27 int BFGSMemory = 0;
29 int LearnMemory = 0;
30 int NetMemory = 0;
31 float MLPfitVersion = (float) 1.40;
34 
35 dbl ***dir;
42 
43 /* The following lines are needed to use the dgels routine from the LAPACK
44  library in Reslin() */
45 
46 /* Subroutine */ int dgels_(char *trans, int *m, int *n, int *
47  nrhs, double *a, int *lda, double *b, int *ldb,
48  double *work, int *lwork, int *info);
49 
50 /***********************************************************/
51 /* MLP_Out */
52 /* */
53 /* computes the output of the Neural Network */
54 /* inputs: double *rrin = inputs of the MLP */
55 /* outputs: double *rrout = outputs of the MLP */
56 /* */
57 /* Author: J.Schwindling 29-Mar-99 */
58 /* Modified: J.Schwindling 16-Jul-99 unrolled loops */
59 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid */
60 /***********************************************************/
61 
62 /* extern "C"Dllexport */void MLP_Out(type_pat *rrin, dbl *rrout)
63 {
64  int i, il, in, j, m, mp1;
65  dbl **deriv1;
66 
67 /* input layer */
68 
69  deriv1 = NET.Deriv1;
70  m = NET.Nneur[0]%4;
71  if(m==0) goto L10;
72  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
73 L10:
74  mp1 = m+1;
75  for(i=mp1; i<=NET.Nneur[0]; i+=4)
76  {
77  NET.Outn[0][i-1] = rrin[i-1];
78  NET.Outn[0][i] = rrin[i];
79  NET.Outn[0][i+1] = rrin[i+1];
80  NET.Outn[0][i+2] = rrin[i+2];
81  }
82 
83 /* hidden and output layers */
84 
85  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
86  NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
87 
88  for(il=2; il<NET.Nlayer; il++)
89  {
90  MLP_vSigmoideDeriv(NET.Outn[il-1],
91  deriv1[il-1],NET.Nneur[il-1]);
92  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
93  NET.Outn[il],NET.Nneur[il],
94  NET.Nneur[il-1]);
95  }
96  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
97  {
98  deriv1[NET.Nlayer-1][in] = 1;
99  }
100 }
101 
102 
103 /***********************************************************/
104 /* MLP_Out_T */
105 /* */
106 /* computes the output of the Neural Network when called */
107 /* from MLP_Test */
108 /* inputs: double *rrin = inputs of the MLP */
109 /* */
110 /* Author: J.Schwindling 23-Jul-1999 */
111 /***********************************************************/
112 
113 /* extern "C"Dllexport */void MLP_Out_T(type_pat *rrin)
114 {
115  int i, il, in, j, ilm1, m, mp1;
116  dbl a;
117 
118 /* input layer */
119 
120  m = NET.Nneur[0]%4;
121  if(m==0) goto L10;
122  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
123 L10:
124  mp1 = m+1;
125  for(i=mp1; i<=NET.Nneur[0]; i+=4)
126  {
127  NET.Outn[0][i-1] = rrin[i-1];
128  NET.Outn[0][i] = rrin[i];
129  NET.Outn[0][i+1] = rrin[i+1];
130  NET.Outn[0][i+2] = rrin[i+2];
131  }
132 
133 /* hidden and output layers */
134 
135 /* for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
136  NET.Outn[0][in],NET.Weights[1][0][in]);
137  printf("\n"); */
138  for(il=1; il<NET.Nlayer; il++)
139  {
140  ilm1 = il-1;
141  m = NET.Nneur[ilm1]%4;
142  for(in=0; in<NET.Nneur[il]; in++)
143  {
144  a = NET.Weights[il][in][0];
145  if(m==0) goto L20;
146  for(j=1;j<=m;j++) a +=
147  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
148 L20:
149  mp1 = m+1;
150  for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
151  {
152  a +=
153  NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
154  NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
155  NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
156  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
157  }
158  switch(NET.T_func[il][in])
159  {
160  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
161  break;
162  case 1: NET.Outn[il][in] = a;
163  break;
164  case 0: NET.Outn[il][in] = 0;
165  break;
166  }
167  }
168  }
169 }
170 
171 
172 /***********************************************************/
173 /* MLP_Out2 */
174 /* */
175 /* computes the output of the Neural Network */
176 /* inputs: double *rrin = inputs of the MLP */
177 /* outputs: double *rrout = outputs of the MLP */
178 /* */
179 /* Author: J.Schwindling 29-Mar-99 */
180 /* Modified: J.Schwindling 16-Jul-99 unrolled loops */
181 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid */
182 /***********************************************************/
183 
184 /* extern "C"Dllexport */void MLP_Out2(type_pat *rrin)
185 {
186  int il, in, m, mp1;
187  int i;
188  dbl **rrout, **deriv1;
189  dbl *prrout;
190  type_pat *prrin;
191  int nhid = NET.Nneur[1];
192  int nin = NET.Nneur[0];
193 
194  rrout = NET.Outn;
195  deriv1 = NET.Deriv1;
196 
197  m = NET.Nneur[0]%4;
198  if(m==0) goto L10;
199  if(m==1)
200  {
201  rrout[0][0] = rrin[1];
202  goto L10;
203  }
204  else if(m==2)
205  {
206  rrout[0][0] = rrin[1];
207  rrout[0][1] = rrin[2];
208  goto L10;
209  }
210  else if(m==3)
211  {
212  rrout[0][0] = rrin[1];
213  rrout[0][1] = rrin[2];
214  rrout[0][2] = rrin[3];
215  goto L10;
216  }
217 L10:
218  mp1 = m+1;
219  prrout = &(rrout[0][mp1]);
220  prrin = &(rrin[mp1+1]);
221  for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
222  {
223  *(prrout-1) = *(prrin-1);
224  *prrout = *prrin;
225  *(prrout+1)= *(prrin+1);
226  *(prrout+2) = *(prrin+2);
227  }
228 
229 /* input layer */
230 
231  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
232  NET.Outn[1],nhid,nin);
233 
234 
235 /* hidden and output layers */
236 
237  for(il=2; il<NET.Nlayer; il++)
238  {
239  MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
240  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
241  NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
242  }
243  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
244  deriv1[NET.Nlayer-1][in] = 1;
245 }
246 
247 
248 /***********************************************************/
249 /* MLP_Test_MM */
250 /* */
251 /* computes the MLP error function using matrix-matrix */
252 /* multiplications */
253 /* inputs: int ifile = file number: 0=learn, 1=test */
254 /* dbl *tmp = a pointer to an array of size */
255 /* 2 x number of neurons in first */
256 /* hidden layer */
257 /* */
258 /* return value (dbl) = error value */
259 /* */
260 /* Author: J.Schwindling 25-Jan-2000 */
261 /***********************************************************/
262 
264 {
265  int ipat;
266  int npat = PAT.Npat[ifile];
267  int nhid = NET.Nneur[1];
268  int nin = NET.Nneur[0];
269  int jpat, j, il, ilm1, m, in, mp1;
270  dbl err, a, rrans;
271  dbl *pweights, *ptmp;
272 
273  err = 0;
274  for(ipat=0; ipat<npat-1; ipat+=2)
275  {
276  MLP_MM2rows(tmp, &(PAT.vRin[ifile][ipat*(nin+1)]),
277  NET.vWeights[1], 2, nhid, nin+1,
278  nin+1, nin+1);
279 
280  switch(NET.T_func[1][0])
281  {
282  case 2:
283  ptmp = &(tmp[0]);
284  MLP_vSigmoide(ptmp,2*nhid);
285  break;
286 
287  case 1:
288  break;
289 
290  case 0:
291  for(jpat=0; jpat<2; jpat++)
292  {
293  for(j=0; j<nhid; j++)
294  {
295  tmp[j+jpat*nhid] = 0;
296  }
297  }
298  break;
299  }
300 
301  for(jpat=0; jpat<2; jpat++)
302  {
303  for(in=0; in<nhid; in++)
304  {
305  NET.Outn[1][in] = tmp[jpat*nhid+in];
306  }
307  for(il=2; il<NET.Nlayer; il++)
308  {
309  ilm1 = il-1;
310  m = NET.Nneur[ilm1]%4;
311  for(in=0; in<NET.Nneur[il]; in++)
312  {
313  pweights = &(NET.Weights[il][in][0]);
314  a = *pweights;
315  pweights++;
316  if(m==0) goto L20;
317  for(j=1;j<=m;j++,pweights++) a +=
318  (*pweights)*NET.Outn[ilm1][j-1];
319 L20:
320  mp1 = m+1;
321  for(j=mp1; j<=NET.Nneur[ilm1];
322  j+=4, pweights+=4)
323  {
324  a +=
325  *(pweights+3)*NET.Outn[ilm1][j+2]+
326  *(pweights+2)*NET.Outn[ilm1][j+1]+
327  *(pweights+1)*NET.Outn[ilm1][j]+
328  *(pweights )*NET.Outn[ilm1][j-1];
329  }
330  switch(NET.T_func[il][in])
331  {
332  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
333  break;
334  case 1: NET.Outn[il][in] = a;
335  break;
336  case 0: NET.Outn[il][in] = 0;
337  break;
338  }
339  }
340  if(il == NET.Nlayer-1)
341  {
342  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
343  {
344  rrans = (dbl) PAT.Rans[ifile][ipat+jpat][in];
345  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
346  (rrans-NET.Outn[NET.Nlayer-1][in])*
347  PAT.Pond[ifile][ipat+jpat];
348  }
349  }
350  }
351  }
352  }
353 
354 /* cas npat impair */
355  for(/*'ipat' set above*/; ipat<npat; ipat++)
356  {
357  MLP_MatrixVector(NET.vWeights[1],
358  &(PAT.vRin[ifile][ipat*(nin+1)]),tmp,
359  nhid,nin+1);
360 
361  switch(NET.T_func[1][0])
362  {
363  case 2:
364  ptmp = &(tmp[0]);
365  MLP_vSigmoide(ptmp,2*nhid);
366  break;
367 
368  case 1:
369  break;
370 
371  case 0:
372  for(j=0; j<nhid; j++)
373  {
374  tmp[j] = 0;
375  }
376  break;
377  }
378 
379  for(in=0; in<nhid; in++)
380  {
381  NET.Outn[1][in] = tmp[in];
382  }
383  for(il=2; il<NET.Nlayer; il++)
384  {
385  ilm1 = il-1;
386  m = NET.Nneur[ilm1]%4;
387  for(in=0; in<NET.Nneur[il]; in++)
388  {
389  pweights = &(NET.Weights[il][in][0]);
390  a = *pweights;
391  pweights++;
392  if(m==0) goto L25;
393  for(j=1;j<=m;j++,pweights++) a +=
394  (*pweights)*NET.Outn[ilm1][j-1];
395 L25:
396  mp1 = m+1;
397  for(j=mp1; j<=NET.Nneur[ilm1];
398  j+=4, pweights+=4)
399  {
400  a +=
401  *(pweights+3)*NET.Outn[ilm1][j+2]+
402  *(pweights+2)*NET.Outn[ilm1][j+1]+
403  *(pweights+1)*NET.Outn[ilm1][j]+
404  *(pweights )*NET.Outn[ilm1][j-1];
405  }
406  switch(NET.T_func[il][in])
407  {
408  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
409  break;
410  case 1: NET.Outn[il][in] = a;
411  break;
412  case 0: NET.Outn[il][in] = 0;
413  break;
414  }
415  }
416  if(il == NET.Nlayer-1)
417  {
418  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
419  {
420  rrans = (dbl) PAT.Rans[ifile][ipat][in];
421  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
422  (rrans-NET.Outn[NET.Nlayer-1][in])*
423  PAT.Pond[ifile][ipat];
424  }
425  }
426  }
427  }
428  return(err);
429 }
430 
431 
432 /***********************************************************/
433 /* MLP_Test */
434 /* */
435 /* computes the MLP error function */
436 /* inputs: int ifile = file number: 0=learn, 1=test */
437 /* int regul = 0: no regularisation term */
438 /* 1: regularisation term */
439 /* (for hybrid learning method) */
440 /* */
441 /* return value (dbl) = error value */
442 /* */
443 /* Author: J.Schwindling 31-Mar-99 */
444 /***********************************************************/
445 
446 /* extern "C"Dllexport */dbl MLP_Test(int ifile,int regul)
447 {
448  dbl err, rrans;
449  int in,jn,ipat,ipati;
450 
451  dbl *tmp;
452 
453  tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl));
454  if(tmp == nullptr) /* not enough memory */
455  {
456  printf("not enough memory in MLP_Test\n");
457  err = 0;
458  for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
459  {
460  if(ifile==0)
461  {
462  ipati = ExamplesIndex[ipat];
463  }
464  else
465  {
466  ipati = ipat;
467  }
468  MLP_Out_T(PAT.Rin[ifile][ipati]);
469  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
470  {
471  rrans = (dbl) PAT.Rans[ifile][ipati][in];
472  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
473  (rrans-NET.Outn[NET.Nlayer-1][in])*
474  PAT.Pond[ifile][ipati];
475  }
476  }
477 
478  if(regul>=1)
479  {
480  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
481  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
482  {
483  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
484  NET.Weights[NET.Nlayer-1][in][jn];
485  }
486  }
487  free(tmp);
488  return(err);
489  }
490  else /* computation using matrix - matrix multiply */
491  {
492  err = MLP_Test_MM(ifile, tmp);
493  if(regul>=1)
494  {
495  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
496  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
497  {
498  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
499  NET.Weights[NET.Nlayer-1][in][jn];
500  }
501  }
502  free(tmp);
503  return(err);
504  }
505 }
506 
507 
508 /***********************************************************/
509 /* MLP_Stochastic */
510 /* */
511 /* one epoch of MLP stochastic training */
512 /* (optimized for speed) */
513 /* */
514 /* Author: J.Schwindling 08-Jul-99 */
515 /* Modified: J.Schwindling 20-Jul-99 remove unused cases */
516 /***********************************************************/
517 
518 /* extern "C"Dllexport */dbl MLP_Stochastic()
519 {
520  int ipat, ii, inm1;
521  dbl err = 0;
522  int il, in1, in, itest2;
523  dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
524  dbl eta, eps;
525  dbl a, b, dd, a1, a2, a3, a4;
526  dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
527  dbl ***weights;
528 
529  if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");
530  weights = NET.Weights;
531 /* shuffle patterns */
532  ShuffleExamples(PAT.Npat[0],ExamplesIndex);
533 
534 /* reduce learning parameter */
535  if(LEARN.Decay<1) EtaDecay();
536 
537  eta = -LEARN.eta;
538  eps = LEARN.epsilon;
539 
540 /* loop on the examples */
541  for(ipat=0;ipat<PAT.Npat[0];ipat++)
542  {
543  ii = ExamplesIndex[ipat];
544  pond = PAT.Pond[0][ii];
545 
546  MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)]));
547 
548 /* next lines are equivalent to DeDwSum */
549  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
550  {
551  deriv = NET.Deriv1[NET.Nlayer-1][in];
552  a = (dbl) PAT.Rans[0][ii][in];
553  b = NET.Outn[NET.Nlayer-1][in]-a;
554  err += b*b*pond;
555  NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
556  }
557 
558  for(il=NET.Nlayer-2; il>0; il--)
559  {
560  dd = NET.Delta[il+1][0];
561  for(in=0; in<NET.Nneur[il]-3; in+=4)
562  {
563  deriv1 = NET.Deriv1[il][in];
564  deriv2 = NET.Deriv1[il][in+1];
565  deriv3 = NET.Deriv1[il][in+2];
566  deriv4 = NET.Deriv1[il][in+3];
567  itest2 = (NET.Nneur[il+1]==1);
568  a1 = dd*weights[il+1][0][in+1];
569  a2 = dd*weights[il+1][0][in+2];
570  a3 = dd*weights[il+1][0][in+3];
571  a4 = dd*weights[il+1][0][in+4];
572  if(itest2) goto L1;
573  pdelta = &(NET.Delta[il+1][1]);
574  for(in1=1; in1<NET.Nneur[il+1];
575  in1++, pdelta++)
576  {
577  a1 += *pdelta * weights[il+1][in1][in+1];
578  a2 += *pdelta * weights[il+1][in1][in+2];
579  a3 += *pdelta * weights[il+1][in1][in+3];
580  a4 += *pdelta * weights[il+1][in1][in+4];
581  }
582 L1: NET.Delta[il][in] = a1*deriv1;
583  NET.Delta[il][in+1] = a2*deriv2;
584  NET.Delta[il][in+2] = a3*deriv3;
585  NET.Delta[il][in+3] = a4*deriv4;
586  }
587  for(/*'in' set above*/; in<NET.Nneur[il]; in++)
588  {
589  deriv = NET.Deriv1[il][in];
590  itest2 = (NET.Nneur[il+1]==1);
591  a = dd*weights[il+1][0][in+1];
592  if(itest2) goto L2;
593  pdelta = &(NET.Delta[il+1][1]);
594  for(in1=1; in1<NET.Nneur[il+1];
595  in1++, pdelta++)
596  {
597  a += *pdelta *
598  weights[il+1][in1][in+1];
599  }
600 L2: NET.Delta[il][in] = a*deriv;
601  }
602 
603  } /* end of loop on layers */
604 
605 
606 /* update the weights */
607  if(eps==0)
608  {
609  for(il=1; il<NET.Nlayer; il++)
610  {
611  inm1 = NET.Nneur[il-1];
612  for(in=0; in<NET.Nneur[il]-3; in+=4)
613  {
614  a1 = NET.Delta[il][in];
615  a2 = NET.Delta[il][in+1];
616  a3 = NET.Delta[il][in+2];
617  a4 = NET.Delta[il][in+3];
618  pout = &(NET.Outn[il-1][0]);
619  weights[il][in][0] += a1;
620  weights[il][in+1][0] += a2;
621  weights[il][in+2][0] += a3;
622  weights[il][in+3][0] += a4;
623  weights[il][in][1] += a1* (*pout);
624  weights[il][in+1][1] += a2* (*pout);
625  weights[il][in+2][1] += a3* (*pout);
626  weights[il][in+3][1] += a4* (*pout);
627  pout++;
628  pw1 = &(weights[il][in][2]);
629  pw2 = &(weights[il][in+1][2]);
630  pw3 = &(weights[il][in+2][2]);
631  pw4 = &(weights[il][in+3][2]);
632  for(in1=2; in1<=inm1;
633  ++in1, ++pout, ++pw1, ++pw2,
634  ++pw3, ++pw4)
635  {
636  *pw1 += a1 * *pout;
637  *pw2 += a2 * *pout;
638  *pw3 += a3 * *pout;
639  *pw4 += a4 * *pout;
640  }
641  }
642  for(/*'in' set above*/; in<NET.Nneur[il]; in++)
643  {
644  a1 = NET.Delta[il][in];
645  pout = &(NET.Outn[il-1][0]);
646  weights[il][in][0] += a1;
647  weights[il][in][1] += a1* (*pout);
648  pout++;
649  pw1 = &(weights[il][in][2]);
650  for(in1=2; in1<=inm1;
651  ++in1, ++pout, ++pw1)
652  {
653  *pw1 += a1 * *pout;
654  }
655  }
656  }
657  }
658  else
659  {
660  for(il=1; il<NET.Nlayer; il++)
661  {
662  for(in=0; in<NET.Nneur[il]; in++)
663  {
664 
665  a = NET.Delta[il][in];
666  LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
667  NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
668 
669  b = a*NET.Outn[il-1][0];
670  LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
671  NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
672 
673  for(in1=2; in1<=NET.Nneur[il-1]; in1++)
674  {
675  b = a*NET.Outn[il-1][in1-1];
676  LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
677  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
678  }
679  }
680  }
681  }
682 
683  } /* end of loop on examples */
684  return(err);
685 }
686 
687 
688 /***********************************************************/
689 /* MLP_Epoch */
690 /* */
691 /* one epoch of MLP training */
692 /* inputs: int iepoch = epoch number */
693 /* dbl *alpmin = optimal alpha from Line Search*/
694 /* */
695 /* return value (dbl) = error value on learning sample */
696 /* BEFORE changing the weights */
697 /* */
698 /* Author: J.Schwindling 31-Mar-99 */
699 /* Modified: J.Schwindling 09-Apr-99 */
700 /* re-organize routine */
701 /* J.Schwindling 13-Apr-99 */
702 /* remove Quickprop algorithm */
703 /* implement Ribiere-Polak conjugate grad. */
704 /***********************************************************/
705 
706 /* extern "C"Dllexport */dbl MLP_Epoch(int iepoch, dbl *alpmin, int *Ntest)
707 {
708  dbl err, ONorm, beta, prod, ddir;
709 /* int *index;*/
710  int Nweights, Nlinear, ipat, ierr;
711  int nn;
712 
713  err = 0;
714  *alpmin = 0.;
715 
716  Nweights = NET.Nweights;
717  Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
718 
719  if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");
720 /* stochastic minimization */
721  if(LEARN.Meth==1)
722  {
723 
724  err = MLP_Stochastic();
725 
726  }
727  else
728  {
729  if(iepoch==1 && LEARN.Meth==7)
730  {
731  SetLambda(10000);
732  MLP_ResLin();
733  if(NET.Debug>=2) PrintWeights();
734  }
735 
736 /* save previous gradient and reset current one */
737  DeDwSaveZero();
738  if(LEARN.Meth==16)
739  {
740  ShuffleExamples(PAT.Npat[0],ExamplesIndex);
741  nn = PAT.Npat[0];
742  PAT.Npat[0] = nn/10;
743  for(ipat=0;ipat<nn;ipat++)
744  {
745  ierr = MLP_Train(&ExamplesIndex[ipat],&err);
746  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
747  }
748  }
749  else
750  {
751  for(ipat=0;ipat<PAT.Npat[0];ipat++)
752  {
753  ierr = MLP_Train(&ipat,&err);
754  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
755  }
756  }
757  DeDwScale(PAT.Npat[0]);
758  if(LEARN.Meth==2) StochStep();
759  if(LEARN.Meth==3)
760  {
761  SteepestDir();
762  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
763  }
764 
765 /* Conjugate Gradients Ribiere - Polak */
766  if(LEARN.Meth==4)
767  {
768  if((iepoch-1)%LEARN.Nreset==0)
769  {
770  LEARN.Norm = DeDwNorm(); /* for next epoch */
771  SteepestDir();
772  }
773  else
774  {
775  ONorm = LEARN.Norm;
776  LEARN.Norm = DeDwNorm();
777  prod = DeDwProd();
778  beta = (LEARN.Norm-prod)/ONorm;
779  CGDir(beta);
780  }
781  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
782  }
783 
784 /* Conjugate Gradients Fletcher - Reeves */
785  if(LEARN.Meth==5)
786  {
787  if((iepoch-1)%LEARN.Nreset==0)
788  {
789  LEARN.Norm = DeDwNorm(); /* for next epoch */
790  SteepestDir();
791  }
792  else
793  {
794  ONorm = LEARN.Norm;
795  LEARN.Norm = DeDwNorm();
796  beta = LEARN.Norm/ONorm;
797  CGDir(beta);
798  }
799  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
800  }
801  if(LEARN.Meth==6)
802  {
803  if((iepoch-1)%LEARN.Nreset==0)
804  {
805  SteepestDir();
806  InitBFGSH(Nweights);
807  }
808  else
809  {
810  GetGammaDelta();
811  ierr = GetBFGSH(Nweights);
812  if(ierr)
813  {
814  SteepestDir();
815  InitBFGSH(Nweights);
816  }
817  else
818  {
819  BFGSdir(Nweights);
820  }
821  }
822  ddir = DerivDir();
823  if(ddir>0)
824  {
825  SteepestDir();
826  InitBFGSH(Nweights);
827  ddir = DerivDir();
828  }
829  if(LineSearch(alpmin,Ntest,err)==1)
830  {
831  InitBFGSH(Nweights);
832  SteepestDir();
833  if(LineSearch(alpmin,Ntest,err)==1)
834  {
835  printf("Line search fail \n");
836  }
837  }
838  }
839  if(LEARN.Meth==7)
840  {
841  if((iepoch-1)%LEARN.Nreset==0)
842  {
843  SteepestDir();
844  InitBFGSH(Nweights-Nlinear);
845  }
846  else
847  {
848  if(NET.Debug>=5) printf("Before GetGammaDelta \n");
849  GetGammaDelta();
850  if(NET.Debug>=5) printf("After GetGammaDelta \n");
851  ierr = GetBFGSH(Nweights-Nlinear);
852  if(NET.Debug>=5) printf("After GetBFGSH \n");
853  if(ierr)
854  {
855  SteepestDir();
856  InitBFGSH(Nweights-Nlinear);
857  }
858  else
859  {
860  BFGSdir(Nweights-Nlinear);
861  }
862  if(NET.Debug>=5) printf("After BFGSdir \n");
863  }
864  SetLambda(10000);
865  if(LineSearchHyb(alpmin,Ntest)==1)
866  {
867  InitBFGSH(Nweights-Nlinear);
868  SteepestDir();
869  if(LineSearchHyb(alpmin,Ntest)==1)
870  {
871  printf("Line search fail \n");
872  }
873  }
874  }
875  }
876 
877  if(NET.Debug>=5) printf(" End MLP_Epoch\n");
878  return(err);
879 }
880 
881 
882 /***********************************************************/
883 /* MLP_Train */
884 /* */
885 /* Train Network: compute output, update DeDw */
886 /* inputs: int *ipat = pointer to pattern number */
887 /* input/output: dbl *err = current error */
888 /* */
889 /* return value (int) = error value: 1 wrong pattern number*/
890 /* 2 *ipat < 0 */
891 /* */
892 /* Author: J.Schwindling 09-Apr-99 */
893 /***********************************************************/
894 
895 /* extern "C"Dllexport */int MLP_Train(int *ipat, dbl *err)
896 {
897  int in;
898 
899 /* if(*ipat>=PAT.Npat[0]) return(1);*/
900  if(*ipat<0) return(2);
901 
902 /* MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
903  MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
904  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
905  {
906  *err += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
907  *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
908  PAT.Pond[0][*ipat];
909  }
910  DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);
911  return(0);
912 }
913 
914 
915 /***********************************************************/
916 /* StochStepHyb */
917 /* */
918 /* Update the weights according to stochastic minimization */
919 /* formula (for hybrid methods) */
920 /* */
921 /* return value (int) = 0 */
922 /* */
923 /* Author: J.Schwindling 09-Apr-99 */
924 /***********************************************************/
925 
926 /* extern "C"Dllexport */int StochStepHyb()
927 {
928  int il, in1, in;
929  dbl eta, eps;
930 
931  eta = LEARN.eta;
932  eps = LEARN.epsilon;
933  for(il=NET.Nlayer-2; il>0; il--) {
934 
935  for(in=0; in<NET.Nneur[il]; in++) {
936 
937  /* compute delta weights */
938  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
939  LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
940  + eps * LEARN.Odw[il][in][in1];
941  }
942 
943  /* update weights */
944  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
945  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
946  }
947  }
948  }
949  MLP_ResLin();
950  return(0);
951 }
952 
953 
954 /***********************************************************/
955 /* StochStep */
956 /* */
957 /* Update the weights according to stochastic minimization */
958 /* formula */
959 /* */
960 /* return value (int) = 0 */
961 /* */
962 /* Author: J.Schwindling 09-Apr-99 */
963 /***********************************************************/
964 
965 /* extern "C"Dllexport */int StochStep()
966 {
967  int il, in1, in;
968  dbl eta, eps, epseta;
969 
970  eta = -LEARN.eta;
971  eps = LEARN.epsilon;
972  epseta = eps/eta;
973  for(il=NET.Nlayer-1; il>0; il--) {
974  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
975 
976  /* compute delta weights */
977  for(in=0; in<NET.Nneur[il]; in++) {
978  LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
979  + epseta * LEARN.Odw[il][in][in1]);
980  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
981  }
982 
983  }
984  }
985 
986  return(0);
987 }
988 
989 
990 /***********************************************************/
991 /* DeDwNorm */
992 /* */
993 /* computes the norm of the gradient */
994 /* */
995 /* Author: J.Schwindling 31-Mar-99 */
996 /***********************************************************/
997 
998 /* extern "C"Dllexport */dbl DeDwNorm()
999 {
1000  int il,in,jn;
1001  dbl dd=0;
1002  for(il=1; il<NET.Nlayer; il++)
1003  for(in=0; in<NET.Nneur[il]; in++)
1004  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1005  dd += LEARN.DeDw[il][in][jn]*
1006  LEARN.DeDw[il][in][jn];
1007  return(dd);
1008 }
1009 
1010 
1011 /***********************************************************/
1012 /* DeDwProd */
1013 /* */
1014 /* scalar product between new gradient and previous one */
1015 /* (used in Polak-Ribiere Conjugate Gradient method */
1016 /* */
1017 /* Author: J.Schwindling 26-Mar-99 */
1018 /***********************************************************/
1019 
1020 /* extern "C"Dllexport */dbl DeDwProd()
1021 {
1022  int il,in,jn;
1023  dbl dd=0;
1024  for(il=1; il<NET.Nlayer; il++)
1025  for(in=0; in<NET.Nneur[il]; in++)
1026  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1027  dd += LEARN.DeDw[il][in][jn]*
1028  LEARN.ODeDw[il][in][jn];
1029  return(dd);
1030 }
1031 
1032 
1033 /***********************************************************/
1034 /* DeDwZero */
1035 /* */
1036 /* resets to 0 the gradient (should be done before DeDwSum)*/
1037 /* */
1038 /* Author: J.Schwindling 31-Mar-99 */
1039 /***********************************************************/
1040 
1041 /* extern "C"Dllexport */void DeDwZero()
1042 {
1043  int il, in, jn;
1044  for(il=1; il<NET.Nlayer; il++)
1045  for(in=0; in<NET.Nneur[il]; in++)
1046  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1047  LEARN.DeDw[il][in][jn] = 0;
1048 }
1049 
1050 
1051 /***********************************************************/
1052 /* DeDwScale */
1053 /* */
1054 /* divides the gradient by the number of examples */
1055 /* inputs: int Nexamples = number of examples */
1056 /* */
1057 /* Author: J.Schwindling 31-Mar-99 */
1058 /***********************************************************/
1059 
1060 /* extern "C"Dllexport */void DeDwScale(int Nexamples)
1061 {
1062  int il, in, jn;
1063  for(il=1; il<NET.Nlayer; il++)
1064  for(in=0; in<NET.Nneur[il]; in++)
1065  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1066  LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
1067 }
1068 
1069 
1070 /***********************************************************/
1071 /* DeDwSave */
1072 /* */
1073 /* copies the gradient DeDw into ODeDw */
1074 /* */
1075 /* Author: J.Schwindling 31-Mar-99 */
1076 /***********************************************************/
1077 
1078 /* extern "C"Dllexport */void DeDwSave()
1079 {
1080  int il, in, jn;
1081  for(il=1; il<NET.Nlayer; il++)
1082  for(in=0; in<NET.Nneur[il]; in++)
1083  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1084  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1085 }
1086 
1087 
1088 /***********************************************************/
1089 /* DeDwSaveZero */
1090 /* */
1091 /* copies the gradient DeDw into ODeDw */
1092 /* resets DeDw to 0 */
1093 /* */
1094 /* Author: J.Schwindling 23-Apr-99 */
1095 /***********************************************************/
1096 
1097 /* extern "C"Dllexport */void DeDwSaveZero()
1098 {
1099  int il, in, jn;
1100  for(il=1; il<NET.Nlayer; il++)
1101  for(in=0; in<NET.Nneur[il]; in++)
1102  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1103  {
1104  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1105  LEARN.DeDw[il][in][jn] = 0;
1106  }
1107 }
1108 
1109 
1110 /***********************************************************/
1111 /* DeDwSum */
1112 /* */
1113 /* adds to the total gradient the gradient in the current */
1114 /* example */
1115 /* inputs: int Nexamples = number of examples */
1116 /* */
1117 /* Author: J.Schwindling 31-Mar-99 */
1118 /* Modified: B.Mansoulie 23-Apr-99 */
1119 /* faster and still correct way to compute the */
1120 /* sigmoid derivative */
1121 /***********************************************************/
1122 
1123 /* extern "C"Dllexport */int DeDwSum(type_pat *ans, dbl *out, int ipat)
1124 {
1125  int il, in1, in, ii;
1126 /* dbl err[NMAX][4]; */
1127  dbl deriv;
1128  dbl *pout, *pdedw, *pdelta;
1129  dbl a, b;
1130 /* char buf[50];*/
1131 
1132 /* output layer */
1133  b = (dbl) PAT.Pond[0][ipat];
1134  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
1135  {
1136  deriv = NET.Deriv1[NET.Nlayer-1][in];
1137  NET.Delta[NET.Nlayer-1][in] =
1138  (out[in] - (dbl) ans[in])*deriv*b;
1139  }
1140 
1141  for(il=NET.Nlayer-2; il>0; il--)
1142  {
1143 
1144  for(in=0; in<NET.Nneur[il]; in++)
1145  {
1146  deriv = NET.Deriv1[il][in];
1147  a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
1148  pdelta = &(NET.Delta[il+1][1]);
1149  for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++)
1150  {
1151  a += *pdelta * NET.Weights[il+1][in1][in+1];
1152  }
1153  NET.Delta[il][in] = a * deriv;
1154  }
1155  }
1156 
1157  for(il=1; il<NET.Nlayer; il++)
1158  {
1159  ii = NET.Nneur[il-1];
1160  for(in=0; in<NET.Nneur[il]; in++)
1161  {
1162  a = NET.Delta[il][in];
1163  LEARN.DeDw[il][in][0] += a;
1164  LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
1165  pout = &(NET.Outn[il-1][1]);
1166  pdedw = &(LEARN.DeDw[il][in][2]);
1167  for(in1=1; in1<ii; ++in1, ++pout, ++pdedw)
1168  {
1169  (*pdedw) += a * (*pout);
1170  }
1171  }
1172  }
1173 
1174  return(0);
1175 }
1176 
1177 
1178 /***********************************************************/
1179 /* SetTransFunc */
1180 /* */
1181 /* to set the transfert function of a neuron */
1182 /* inputs: int layer = layer number (1 -> Nlayer) */
1183 /* int neuron = neuron number (1 -> Nneur) */
1184 /* int func = transfert function: */
1185 /* 0: y=0 */
1186 /* 1: y=x */
1187 /* 2: y=1/(1+exp(-x)) */
1188 /* 3: y=tanh(x) */
1189 /* 4: y=delta*x+1/(1+exp(-x)) */
1190 /* 5: y=exp(-x**2) */
1191 /* */
1192 /* return value (int) = error value: */
1193 /* 0: no error */
1194 /* 1: layer > 4 */
1195 /* 2: neuron > NMAX */
1196 /* */
1197 /* Author: J.Schwindling 02-Apr-99 */
1198 /***********************************************************/
1199 
1200 /* extern "C"Dllexport */int SetTransFunc(int layer, int neuron,
1201  int func)
1202 {
1203  if(layer>NLMAX) return(1);
1204 /* if(neuron>NMAX) return(2);*/
1205 
1206  NET.T_func[layer-1][neuron-1] = func;
1207 
1208  return(0);
1209 }
1210 
1211 
1212 /***********************************************************/
1213 /* SetDefaultFuncs */
1214 /* */
1215 /* - sets the default transfer functions to sigmoid for */
1216 /* hidden layers and linear for output layer */
1217 /* - sets temperatures to 1 */
1218 /* */
1219 /* */
1220 /* Author: J.Schwindling 08-Apr-99 */
1221 /***********************************************************/
1222 
1224 {
1225  int il,in;
1226  for(il=0; il<NET.Nlayer; il++) {
1227  for(in=0; in<NET.Nneur[il]; in++) {
1228  NET.T_func[il][in] = 2;
1229  if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
1230  }
1231  }
1232 
1233 }
1234 
1235 
1236 /***********************************************************/
1237 /* SteepestDir */
1238 /* */
1239 /* sets the search direction to steepest descent */
1240 /* */
1241 /* Author: J.Schwindling 02-Apr-99 */
1242 /***********************************************************/
1243 
1244 /* extern "C"Dllexport */void SteepestDir()
1245 {
1246  int il,in,jn;
1247  for(il=1; il<NET.Nlayer; il++)
1248  for(in=0; in<NET.Nneur[il]; in++)
1249  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1250  dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
1251 }
1252 
1253 
1254 /***********************************************************/
1255 /* CGDir */
1256 /* */
1257 /* sets the search direction to conjugate gradient dir */
1258 /* inputs: dbl beta : d(t+1) = -g(t+1) + beta d(t) */
1259 /* beta should be: */
1260 /* ||g(t+1)||^2 / ||g(t)||^2 (Fletcher-Reeves) */
1261 /* g(t+1) (g(t+1)-g(t)) / ||g(t)||^2 (Polak-Ribiere) */
1262 /* */
1263 /* Author: J.Schwindling 02-Apr-99 */
1264 /***********************************************************/
1265 
1266 /* extern "C"Dllexport */void CGDir(dbl beta)
1267 {
1268  int il,in,jn;
1269  for(il=1; il<NET.Nlayer; il++)
1270  for(in=0; in<NET.Nneur[il]; in++)
1271  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1272  {
1273  dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
1274  beta*dir[il][in][jn];
1275  }
1276 }
1277 
1278 
1279 /***********************************************************/
1280 /* DerivDir */
1281 /* */
1282 /* scalar product between gradient and direction */
1283 /* = derivative along direction */
1284 /* */
1285 /* Author: J.Schwindling 01-Jul-99 */
1286 /***********************************************************/
1287 
1288 /* extern "C"Dllexport */dbl DerivDir()
1289 {
1290  int il,in,jn;
1291  dbl ddir = 0;
1292 
1293  for(il=1; il<NET.Nlayer; il++)
1294  for(in=0; in<NET.Nneur[il]; in++)
1295  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1296  {
1297  ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
1298  }
1299  return(ddir);
1300 }
1301 
1302 
1303 /***********************************************************/
1304 /* GetGammaDelta */
1305 /* */
1306 /* sets the vectors Gamma (=g(t+1)-g(t)) */
1307 /* and delta (=w(t+1)-w(t)) */
1308 /* (for BFGS and Hybrid learning methods) */
1309 /* */
1310 /* Author: J.Schwindling 02-Apr-99 */
1311 /***********************************************************/
1312 
1313 /* extern "C"Dllexport */void GetGammaDelta()
1314 {
1315  int i=0;
1316  int il,in,jn;
1317  for(il=1; il<NET.Nlayer; il++)
1318  for(in=0; in<NET.Nneur[il]; in++)
1319  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1320  {
1321  Gamma[i] = LEARN.DeDw[il][in][jn]-
1322  LEARN.ODeDw[il][in][jn];
1323  delta[i] = LEARN.Odw[il][in][jn];
1324  i++;
1325  }
1326 }
1327 
1328 
1329 /***********************************************************/
1330 /* BFGSDir */
1331 /* */
1332 /* sets the search direction to BFGS direction from the */
1333 /* BFGS matrix */
1334 /* */
1335 /* inputs: int Nweights = number of weights */
1336 /* */
1337 /* Author: J.Schwindling 02-Apr-99 */
1338 /***********************************************************/
1339 
1340 /* extern "C"Dllexport */void BFGSdir(int Nweights)
1341 {
1342  dbl *g, *s;
1343  int kk=0;
1344  int il,i,j,in,jn;
1345 
1346  g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
1347  s = (dbl*) malloc(Nweights*sizeof(dbl));
1348 
1349  for(il=1; kk<Nweights; il++)
1350  for(in=0; in<NET.Nneur[il]; in++)
1351  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1352  {
1353  g[kk] = LEARN.DeDw[il][in][jn];
1354  kk++;
1355  }
1356  for(i=0; i<Nweights; i++)
1357  {
1358  s[i] = 0;
1359  for(j=0; j<Nweights; j++)
1360  {
1361  s[i] += BFGSH[i][j] * g[j];
1362  }
1363  }
1364 
1365  kk = 0;
1366  for(il=1; kk<Nweights; il++)
1367  for(in=0; in<NET.Nneur[il]; in++)
1368  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1369  {
1370  dir[il][in][jn] = -s[kk];
1371  kk++;
1372  }
1373  free(g);
1374  free(s);
1375 }
1376 
1377 
1378 /***********************************************************/
1379 /* InitBFGS */
1380 /* */
1381 /* initializes BFGS matrix to identity */
1382 /* */
1383 /* inputs: int Nweights = number of weights */
1384 /* */
1385 /* Author: J.Schwindling 02-Apr-99 */
1386 /***********************************************************/
1387 
1388 /* extern "C"Dllexport */void InitBFGSH(int Nweights)
1389 {
1390  int i,j;
1391  for(i=0; i<Nweights; i++)
1392  for(j=0; j<Nweights; j++)
1393  {
1394  BFGSH[i][j] = 0;
1395  if(i==j) BFGSH[i][j] = 1;
1396  }
1397 }
1398 
1399 
1400 /***********************************************************/
1401 /* GetBFGSH */
1402 /* */
1403 /* sets the BFGS matrix */
1404 /* */
1405 /* inputs: int Nweights = number of weights */
1406 /* */
1407 /* return value (int) = 0 if no problem */
1408 /* 1 is deltaTgamma = 0 -> switch to */
1409 /* steepest dir */
1410 /* */
1411 /* Author: J.Schwindling 02-Apr-99 */
1412 /* Modified: J.Schwindling 04-May-99 */
1413 /* computations as Nw^2 , matrices removed */
1414 /* Modified: J.Schwindling 11-Jun-99 */
1415 /* return value */
1416 /***********************************************************/
1417 
1418 /* extern "C"Dllexport */int GetBFGSH(int Nweights)
1419 {
1420  typedef double dble;
1421  dble deltaTgamma=0;
1422  dble factor=0;
1423  dble *Hgamma;
1424  dble *tmp;
1425  dble a, b;
1426  int i,j;
1427 
1428  Hgamma = (dble *) malloc(Nweights*sizeof(dble));
1429  tmp = (dble *) malloc(Nweights*sizeof(dble));
1430 
1431  for(i=0; i<Nweights; i++)
1432  {
1433  deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
1434  a = 0;
1435  b = 0;
1436  for(j=0; j<Nweights; j++)
1437  {
1438  a += (dble) BFGSH[i][j] * (dble) Gamma[j];
1439  b += (dble) Gamma[j] * (dble) BFGSH[j][i];
1440  }
1441  Hgamma[i] = a;
1442  tmp[i] = b;
1443  factor += (dble) Gamma[i]*Hgamma[i];
1444  }
1445  if(deltaTgamma == 0)
1446  {
1447  free(tmp);
1448  free(Hgamma);
1449  return 1;
1450  }
1451  a = 1 / deltaTgamma;
1452  factor = 1 + factor*a;
1453 
1454  for(i=0; i<Nweights; i++)
1455  {
1456  b = (dble) delta[i];
1457  for(j=0; j<Nweights; j++)
1458  BFGSH[i][j] += (dbl) (factor*b* (dble)
1459  delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;
1460  }
1461  free(Hgamma);
1462  free(tmp);
1463  return 0;
1464 }
1465 
1466 
1467 /***********************************************************/
1468 /* LineSearch */
1469 /* */
1470 /* search along the line defined by dir */
1471 /* */
1472 /* outputs: dbl *alpmin = optimal step length */
1473 /* */
1474 /* Author: B.Mansoulie 01-Jul-98 */
1475 /***********************************************************/
1476 
1477 /* extern "C"Dllexport */
1478 int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
1479 {
1480  dbl ***w0;
1481  dbl alpha1, alpha2, alpha3;
1482  dbl err1, err2, err3;
1483  dbl tau;
1484  int icount, il, in, jn;
1485 
1486  tau=LEARN.Tau;
1487 
1488 /* store weights before line search */
1489 
1490  *Ntest = 0;
1491  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1492  for(il=1; il<NET.Nlayer; il++)
1493  {
1494  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1495  for(in=0; in<NET.Nneur[il]; in++)
1496  {
1497  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1498  sizeof(dbl));
1499  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1500  {
1501  w0[il][in][jn] = NET.Weights[il][in][jn];
1502  }
1503  }
1504  }
1505 
1506 /* compute error(w0) */
1507 
1508 /* err1 = MLP_Test(0,0);
1509  (*Ntest) ++;*/
1510  err1 = Err0;
1511 
1512  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1513 
1514  *alpmin = 0;
1515  alpha1 = 0;
1516 /* alpha2 = 0.05;
1517  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1518  alpha2 = LastAlpha;
1519  if(alpha2 < 0.01) alpha2 = 0.01;
1520  if(alpha2 > 2.0) alpha2 = 2.0;
1521  MLP_Line(w0,alpha2);
1522  err2 = MLP_Test(0,0);
1523  (*Ntest) ++;
1524  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1525 
1526  alpha3 = alpha2;
1527  err3 = err2;
1528 
1529 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1530  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1531 
1532  if(err1>err2)
1533  {
1534  for(icount=1;icount<=100;icount++)
1535  {
1536  alpha3 = alpha3*tau;
1537  MLP_Line(w0,alpha3);
1538  err3 =MLP_Test(0,0);
1539  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);
1540  (*Ntest) ++;
1541  if(err3>err2) break;
1542  alpha1 = alpha2;
1543  err1 = err2;
1544  alpha2 = alpha3;
1545  err2 = err3;
1546  }
1547  if(icount>=100) /* line search fails */
1548  {
1549  MLP_Line(w0,0); /* reset weights */
1550  free(w0);
1551  return(1);
1552  }
1553  }
1554  else
1555  {
1556  for(icount=1;icount<=100;icount++)
1557  {
1558  alpha2 = alpha2/tau;
1559  MLP_Line(w0,alpha2);
1560  err2 = MLP_Test(0,0);
1561  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1562  (*Ntest) ++;
1563  if(err1>err2) break;
1564  alpha3 = alpha2;
1565  err3 = err2;
1566  }
1567  if(icount>=100) /* line search fails */
1568  {
1569  MLP_Line(w0,0); /* reset weights */
1570  free(w0);
1571  LastAlpha = 0.05; /* try to be safe */
1572  return(1);
1573  }
1574  }
1575 
1576 /* find bottom of parabola */
1577 
1578  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1579  -(err2-err1)/(alpha2-alpha1)));
1580  if(*alpmin>10000) *alpmin=10000;
1581 
1582 /* set the weights */
1583  MLP_Line(w0,*alpmin);
1584  LastAlpha = *alpmin;
1585 
1586 /* store weight changes */
1587  for(il=1; il<NET.Nlayer; il++)
1588  for(in=0; in<NET.Nneur[il]; in++)
1589  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1590  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1591  - w0[il][in][jn];
1592 
1593  for(il=1; il<NET.Nlayer; il++)
1594  for(in=0; in<NET.Nneur[il]; in++)
1595  free(w0[il][in]);
1596  for(il=1; il<NET.Nlayer; il++)
1597  free(w0[il]);
1598  free(w0);
1599 
1600  return(0);
1601 }
1602 
1603 
1604 /***********************************************************/
1605 /* DecreaseSearch */
1606 /* */
1607 /* search along the line defined by dir for a point where */
1608 /* error is decreased (faster than full line search) */
1609 /* */
1610 /* outputs: dbl *alpmin = step length */
1611 /* */
1612 /* Author: J.Schwindling 11-May-99 */
1613 /***********************************************************/
1614 
1615 /* extern "C"Dllexport */
1616 int DecreaseSearch(dbl *alpmin, int *Ntest, dbl Err0)
1617 {
1618  dbl ***w0;
1619  dbl alpha2;
1620  dbl err1, err2;
1621  dbl tau;
1622  int icount, il, in, jn;
1623 
1624  tau=LEARN.Tau;
1625 
1626 /* store weights before line search */
1627 
1628  *Ntest = 0;
1629  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1630  for(il=1; il<NET.Nlayer; il++)
1631  {
1632  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1633  for(in=0; in<NET.Nneur[il]; in++)
1634  {
1635  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1636  sizeof(dbl));
1637  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1638  {
1639  w0[il][in][jn] = NET.Weights[il][in][jn];
1640  }
1641  }
1642  }
1643 
1644 /* compute error(w0) */
1645 
1646 /* err1 = MLP_Test(0,0);
1647  (*Ntest) ++;*/
1648  err1 = Err0;
1649 
1650  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1651 
1652  *alpmin = 0;
1653  alpha2 = 0.05;
1654  MLP_Line(w0,alpha2);
1655  err2 = MLP_Test(0,0);
1656  (*Ntest) ++;
1657 
1658  if(err2<err1)
1659  {
1660  *alpmin = alpha2;
1661  }
1662  else
1663  {
1664 
1665 
1666  for(icount=1;icount<=100;icount++)
1667  {
1668  alpha2 = alpha2/tau;
1669  MLP_Line(w0,alpha2);
1670  err2 = MLP_Test(0,0);
1671  (*Ntest) ++;
1672  if(err1>err2) break;
1673  }
1674  if(icount>=100) /* line search fails */
1675  {
1676  MLP_Line(w0,0); /* reset weights */
1677  free(w0);
1678  return(1);
1679  }
1680  *alpmin = alpha2;
1681  }
1682 
1683 /* set the weights */
1684  MLP_Line(w0,*alpmin);
1685 
1686 /* store weight changes */
1687  for(il=1; il<NET.Nlayer; il++)
1688  for(in=0; in<NET.Nneur[il]; in++)
1689  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1690  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1691  - w0[il][in][jn];
1692 
1693  for(il=1; il<NET.Nlayer; il++)
1694  for(in=0; in<NET.Nneur[il]; in++)
1695  free(w0[il][in]);
1696  for(il=1; il<NET.Nlayer; il++)
1697  free(w0[il]);
1698  free(w0);
1699 
1700  return(0);
1701 }
1702 
1703 
1704 /* extern "C"Dllexport */int FixedStep(dbl alpha)
1705 {
1706  dbl ***w0;
1707  int il, in, jn;
1708 
1709  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1710  for(il=1; il<NET.Nlayer; il++)
1711  {
1712  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1713  for(in=0; in<NET.Nneur[il]; in++)
1714  {
1715  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1716  sizeof(dbl));
1717  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1718  {
1719  w0[il][in][jn] = NET.Weights[il][in][jn];
1720  }
1721  }
1722  }
1723 
1724 
1725 /* set the weights */
1726  MLP_Line(w0,alpha);
1727 
1728 /* store weight changes */
1729  for(il=1; il<NET.Nlayer; il++)
1730  for(in=0; in<NET.Nneur[il]; in++)
1731  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1732  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1733  - w0[il][in][jn];
1734 
1735  for(il=1; il<NET.Nlayer; il++)
1736  for(in=0; in<NET.Nneur[il]; in++)
1737  free(w0[il][in]);
1738  for(il=1; il<NET.Nlayer; il++)
1739  free(w0[il]);
1740  free(w0);
1741 
1742  return(0);
1743 }
1744 
1745 /***********************************************************/
1746 /* MLP_Line */
1747 /* */
1748 /* sets the weights to a point along a line */
1749 /* */
1750 /* inputs: dbl ***w0 = initial point */
1751 /* dbl alpha = step length */
1752 /* */
1753 /* Author: B.Mansoulie 01-Jul-98 */
1754 /***********************************************************/
1755 
1756 /* extern "C"Dllexport */
1757 void MLP_Line(dbl ***w0, dbl alpha)
1758 {
1759  int il,in,jn;
1760 
1761  for(il=1; il<NET.Nlayer; il++)
1762  for(in=0; in<NET.Nneur[il]; in++)
1763  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1764  NET.Weights[il][in][jn] = w0[il][in][jn]+
1765  alpha*dir[il][in][jn];
1766 
1767 }
1768 
1769 
1770 /***********************************************************/
1771 /* LineSearchHyb */
1772 /* */
1773 /* search along the line defined by dir */
1774 /* */
1775 /* outputs: dbl *alpmin = optimal step length */
1776 /* */
1777 /* Author: B.Mansoulie 01-Jul-98 */
1778 /***********************************************************/
1779 
1780 /* extern "C"Dllexport */
1781 int LineSearchHyb(dbl *alpmin, int *Ntest)
1782 {
1783  dbl ***w0;
1784  dbl alpha1, alpha2, alpha3;
1785  dbl err1, err2, err3;
1786  dbl tau;
1787  int icount, il, in, jn;
1788 
1789 /* char buf [50];
1790  sprintf (buf,"entree linesearchhyb\n");
1791  MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
1792 
1793  if(NET.Debug>=4){
1794  printf(" entry LineSearchHyb \n");
1795  }
1796  tau=LEARN.Tau;
1797 
1798 /* store weights before line search */
1799 
1800  *Ntest = 0;
1801  w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
1802  for(il=1; il<NET.Nlayer-1; il++)
1803  {
1804  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1805  for(in=0; in<NET.Nneur[il]; in++)
1806  {
1807  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1808  sizeof(dbl));
1809  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1810  {
1811  w0[il][in][jn] = NET.Weights[il][in][jn];
1812  }
1813  }
1814  }
1815 
1816 /* compute error(w0) */
1817  err1 = MLP_Test(0,1);
1818  (*Ntest) ++;
1819  if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);
1820 
1821  *alpmin = 0;
1822  alpha1 = 0;
1823 /* alpha2 = 0.05;
1824  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1825  alpha2 = LastAlpha;
1826  if(alpha2 < 0.01) alpha2 = 0.01;
1827  if(alpha2 > 2.0) alpha2 = 2.0;
1828  MLP_LineHyb(w0,alpha2);
1829  err2 = MLP_Test(0,1);
1830  (*Ntest) ++;
1831 
1832  alpha3 = alpha2;
1833  err3 = err2;
1834 
1835 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1836  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1837 
1838  if(err1>err2)
1839  {
1840  for(icount=1;icount<=100;icount++)
1841  {
1842  alpha3 = alpha3*tau;
1843  MLP_LineHyb(w0,alpha3);
1844  err3 = MLP_Test(0,1);
1845  (*Ntest) ++;
1846  if(err3>err2) break;
1847  alpha1 = alpha2;
1848  err1 = err2;
1849  alpha2 = alpha3;
1850  err2 = err3;
1851  }
1852  if(icount>=100) /* line search fails */
1853  {
1854  MLP_LineHyb(w0,0); /* reset weights */
1855  free(w0);
1856  return(1);
1857  }
1858  }
1859  else
1860  {
1861  for(icount=1;icount<=100;icount++)
1862  {
1863  alpha2 = alpha2/tau;
1864  MLP_LineHyb(w0,alpha2);
1865  err2 = MLP_Test(0,1);
1866  (*Ntest) ++;
1867  if(err1>err2) break;
1868  alpha3 = alpha2;
1869  err3 = err2;
1870  }
1871  if(icount>=100) /* line search fails */
1872  {
1873  MLP_LineHyb(w0,0); /* reset weights */
1874  free(w0);
1875  return(1);
1876  }
1877  }
1878 
1879 /* find bottom of parabola */
1880 
1881  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1882  -(err2-err1)/(alpha2-alpha1)));
1883  if(*alpmin>10000) *alpmin=10000;
1884 
1885 /* set the weights */
1886  MLP_LineHyb(w0,*alpmin);
1887  LastAlpha = *alpmin;
1888 
1889 /* store weight changes */
1890  for(il=1; il<NET.Nlayer-1; il++)
1891  for(in=0; in<NET.Nneur[il]; in++)
1892  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1893  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1894  - w0[il][in][jn];
1895 
1896  for(il=1; il<NET.Nlayer-1; il++)
1897  for(in=0; in<NET.Nneur[il]; in++)
1898  free(w0[il][in]);
1899  for(il=1; il<NET.Nlayer-1; il++)
1900  free(w0[il]);
1901  free(w0);
1902  if(NET.Debug>=4){
1903  printf(" exit LineSearchHyb \n");
1904  }
1905 
1906  return(0);
1907 }
1908 
1909 
1910 /***********************************************************/
1911 /* MLP_LineHyb */
1912 /* */
1913 /* sets the weights to a point along a line */
1914 /* (for hybrid methods) */
1915 /* */
1916 /* inputs: dbl ***w0 = initial point */
1917 /* dbl alpha = step length */
1918 /* */
1919 /* Author: B.Mansoulie 01-Jul-98 */
1920 /***********************************************************/
1921 
1922 /* extern "C"Dllexport */
1923 void MLP_LineHyb(dbl ***w0, dbl alpha)
1924 {
1925  int il,in,jn;
1926  for(il=1; il<NET.Nlayer-1; il++)
1927  for(in=0; in<NET.Nneur[il]; in++)
1928  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1929  {
1930  NET.Weights[il][in][jn] = w0[il][in][jn]+
1931  alpha*dir[il][in][jn];
1932  }
1933  MLP_ResLin();
1934 }
1935 
1936 
1937 /***********************************************************/
1938 /* SetLambda */
1939 /* */
1940 /* sets the coefficient of the regularisation term for */
1941 /* hybrid learning method */
1942 /* */
1943 /* input: double Wmax = typical maximum weight */
1944 /* */
1945 /* Author: J.Schwindling 13-Apr-99 */
1946 /***********************************************************/
1947 
1948 /* extern "C"Dllexport */
1949 void SetLambda(double Wmax)
1950 {
1951  dbl err;
1952  err = MLP_Test(0,0);
1953  LEARN.Alambda =
1954  LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
1955 }
1956 
1957 
1958 /***********************************************************/
1959 /* MLP_ResLin */
1960 /* */
1961 /* resolution of linear system of equations for hybrid */
1962 /* training method */
1963 /* */
1964 /* */
1965 /* Author: B.Mansoulie end-98 */
1966 /* Modified: J.Schwindling 29-APR-99 */
1967 /* use dgels from LAPACK */
1968 /***********************************************************/
1969 
1970 /* extern "C"Dllexport */
1972 {
1973 /* dbl rrans[NMAX], rrout[NMAX];*/
1974 /* type_pat rrin[NMAX];*/
1975  double *HR,*dpat; //,*wlin,*SV;
1976  double err,lambda,lambda2;
1977  int Nl,M,Nhr,khr,nrhs,iret,ierr;
1978  int il, in, inl, ipat;
1979  /*register dbl a;*/ //a unused
1980  char Trans = 'N';
1981 
1982 
1983 /* int rank; */
1984 // double rcond = -1; /* use machine precision */
1985 
1986  lambda2 = LEARN.Alambda;
1987 
1988 /* Nl = number of linear weights
1989  M = number of terms in linear system = number of examples + regularisation*/
1990  Nl = NET.Nneur[NET.Nlayer-2] + 1;
1991  M = PAT.Npat[0]+Nl;
1992 
1993  int Lwork = 5 * M;
1994  double *Work = (double*) malloc((int) Lwork*sizeof(double));
1995 
1996 /* memory allocation */
1997  dpat = (double*) malloc((int) M*sizeof(double));
1998 // wlin = (double*) malloc((int) Nl*sizeof(double));
1999 // SV = (double*) malloc((int) Nl*sizeof(double));
2000 
2001  Nhr = M * Nl;
2002  HR = (double*) malloc((int) Nhr*sizeof(double));
2003  err = 0.;
2004  for(ipat=0;ipat<PAT.Npat[0];ipat++)
2005  {
2006 /* *** Filling dpat and HR *** */
2007 /* for(in=0; in<NET.Nneur[0]; in++)
2008  {
2009  rrin[in] = PAT.Rin[0][ipat][in];
2010  }*/
2011 
2012  MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
2013 /* MLP_Out(rrin,rrout);*/
2014  /*for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
2015  {
2016  a = (dbl) PAT.Rans[0][ipat][in]; //a was not used
2017  } */
2018  il = NET.Nlayer-2;
2019  dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
2020  khr = ipat;
2021  HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
2022  for(in=0;in<NET.Nneur[il];in++)
2023  {
2024  khr = M *(in+1) + ipat;
2025  HR[khr] = NET.Outn[il][in]*
2026  (dbl) sqrt(PAT.Pond[0][ipat]);
2027  }
2028  }
2029  il = NET.Nlayer-2;
2030  lambda = sqrt(lambda2);
2031  for(ipat=0;ipat<=NET.Nneur[il];ipat++)
2032  {
2033  dpat[ipat+PAT.Npat[0]] = 0;
2034  for(in=0;in<=NET.Nneur[il];in++)
2035  {
2036  khr = M *in + ipat + PAT.Npat[0];
2037  HR[khr] = 0;
2038  if(in==ipat) HR[khr]=lambda;
2039  }
2040  }
2041  if(NET.Debug>=4)
2042  {
2043  err = MLP_Test(0,0);
2044  printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err);
2045  }
2046 /* */
2047 /* Trouve les poids lineaires par resolution lineaire */
2048 /* */
2049  nrhs = 1;
2050  ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
2051  &Lwork,&iret);
2052  if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
2053  if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
2054 
2055 /* ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
2056  &iret);
2057  if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
2058  if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
2059 
2060  il = NET.Nlayer-1;
2061  for (inl=0; inl<=NET.Nneur[il-1];inl++)
2062  {
2063  NET.Weights[il][0][inl] = dpat[inl];
2064  }
2065  if(NET.Debug>=4)
2066  {
2067  err = MLP_Test(0,0);
2068  printf("ResLin, apres tlsfor, err= %f\n",err);
2069  }
2070  free(Work);
2071  free(dpat);
2072 // free(wlin);
2073  free(HR);
2074 // free(SV);
2075 }
2076 
2077 /***********************************************************/
2078 /* EtaDecay */
2079 /* */
2080 /* decreases the learning parameter eta by the factor */
2081 /* LEARN.Decay */
2082 /* */
2083 /* Author: J.Schwindling 14-Apr-99 */
2084 /***********************************************************/
2085 
2086 void EtaDecay()
2087 {
2088  LEARN.eta *= LEARN.Decay;
2089 }
2090 
2091 
2092 /***********************************************************/
2093 /* ShuffleExamples */
2094 /* */
2095 /* Shuffles the learning examples (for stochastic method) */
2096 /* */
2097 /* Author: J.Schwindling 14-Apr-1999 */
2098 /* Modified: J.Schwindling 21-Jul-1999 inline MLP_Rand */
2099 /***********************************************************/
2100 
2101 /* extern "C"Dllexport */
2102 int ShuffleExamples(int n, int *index)
2103 {
2104  int i,ii,itmp;
2105  dbl a = (dbl) (n-1);
2106 
2107  for(i=0;i<n;i++)
2108  {
2109  ii = (int) MLP_Rand(0.,a);
2110  itmp = index[ii];
2111  index[ii] = index[i];
2112  index[i] = itmp;
2113  }
2114  return 0;
2115 }
2116 
2117 
2118 /***********************************************************/
2119 /* MLP_Rand */
2120 /* */
2121 /* Random Numbers (to initialize weights) */
2122 /* */
2123 /* inputs : dbl min, dbl max = random number between */
2124 /* min and max */
2125 /* return value: (double) random number */
2126 /* */
2127 /* Author: J.Schwindling 14-Apr-99 */
2128 /***********************************************************/
2129 
2130 /* extern "C"Dllexport */
2131 double MLP_Rand(dbl mini, dbl maxi)
2132 {
2133 return mini+(maxi-mini)*random()/RAND_MAX;
2134 }
2135 
2136 
2137 /***********************************************************/
2138 /* InitWeights */
2139 /* */
2140 /* initializes the weights to random numbers between */
2141 /* -0.5 : 0.5 */
2142 /* */
2143 /* Author: J.Schwindling 14-Apr-99 */
2144 /***********************************************************/
2145 
2146 /* extern "C"Dllexport */
2148 {
2149  int ilayer,ineur,i;
2150 
2151  for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
2152  for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
2153  for(i=0;i<=NET.Nneur[ilayer-1];i++)
2154  NET.Weights[ilayer][ineur][i]=
2155  (dbl) MLP_Rand(-0.5, 0.5);
2156 }
2157 
2158 
2159 /***********************************************************/
2160 /* PrintWeights */
2161 /* */
2162 /* Print weights on the screen */
2163 /* */
2164 /* Author: J.Schwindling 14-Apr-99 */
2165 /***********************************************************/
2166 
2167 /* extern "C"Dllexport */
2169 {
2170  int ilayer,ineur,i;
2171 
2172  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2173  {
2174  if(MessLang==1)
2175  {
2176  printf("Couche %d\n",ilayer);
2177  }
2178  else
2179  {
2180  printf("Layer %d\n",ilayer);
2181  }
2182  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2183  {
2184  if(MessLang==1)
2185  {
2186  printf("Neurone %d",ineur);
2187  }
2188  else
2189  {
2190  printf("Neuron %d",ineur);
2191  }
2192  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2193  {
2194  printf(" %f",
2195  (double) NET.Weights[ilayer][ineur][i]);
2196  }
2197  printf("\n");
2198  }
2199  printf("\n");
2200  }
2201 }
2202 
2203 
2204 /***********************************************************/
2205 /* ReadPatterns */
2206 /* */
2207 /* parser for learn.pat or test.pat files */
2208 /* */
2209 /* inputs: char *filename = name of the file to read */
2210 /* int ifile = 0: learning examples */
2211 /* 1: test examples */
2212 /* */
2213 /* outputs: int *inet = 1 if a network is defined */
2214 /* int *ilearn = 1 if a learning method is defined*/
2215 /* int *iexamples = 1 if examples are defined */
2216 /* */
2217 /* return value (int) = 0: no error */
2218 /* = -1: file could not be opened */
2219 /* */
2220 /* Author: J.Schwindling 20-Apr-99 */
2221 /* Modified: J.Schwindling 01-Jun-99 return inet, ilearn */
2222 /* iexamples */
2223 /* J.Schwindling 21-Sep-99 return value = -1 */
2224 /***********************************************************/
2225 
2226 #define CLEN 1024
2227 
2228 /* extern "C"Dllexport */
2229 int ReadPatterns(char *filename, int ifile,
2230  int *inet, int *ilearn, int *iexamples)
2231 {
2232 char s[CLEN], s2[CLEN], cc[6], cc2[6];
2233 char otherfile[CLEN];
2234 double p;
2235 //int line,i,j;
2236 int line,i;
2237 //int l,ll,ipat,nmax,il,in,tf;
2238 int l,ll,ipat,nmax;
2239 int np=0; /* nombre d'exemples */
2240 int nin=0; /* nombre d'entrees */
2241 int nout=0; /* nombre de sorties */
2242 int npon=0;
2243 int ntot, ierr;
2244 //char **ss;
2245 char **ss=nullptr;
2246 FILE *LVQpat;
2247 int nlayer, nneur[NLMAX];
2248 
2249 printf("\nLoading file %s\n",filename);
2250 LVQpat=fopen(filename,"r");
2251 if(LVQpat == nullptr) return -1;
2252 
2253 line=0;
2254 
2255 while(fgets(s,CLEN,LVQpat))
2256  {
2257  if(*s=='N')
2258  {
2259  if(*(s+1)=='N') /* NNEU */
2260  {
2261  printf("Number of neurons %s",s);
2262  *inet = 1;
2263  sscanf(s,"%s %s",cc,s2);
2264  ierr = GetNetStructure(s2,&nlayer,nneur);
2265  if(ierr != 0) return ierr;
2266  ierr = MLP_SetNet(&nlayer,nneur);
2267  if(ierr != 0) return ierr;
2268  }
2269  else
2270  {
2271  sscanf(s,"%s %d",cc,&l);
2272  if(*(cc+1)=='P') /* NPAT */
2273  {
2274  np=l;
2275  printf("Number of patterns %d\n",np);
2276  }
2277  else if(*(cc+1)=='I') /* NINP */
2278  {
2279  nin=l;
2280  PAT.Nin = nin;
2281  printf("Number of inputs %d\n",nin);
2282  }
2283  else if(*(cc+1)=='O' && *(cc+2)=='U') /* NOUT */
2284  {
2285  nout=l;
2286  PAT.Nout = nout;
2287  printf("Number of outputs %d\n",nout);
2288  }
2289  else if(*(cc+1)=='O' && *(cc+2)=='R') /* NORM */
2290  {
2291  DIVERS.Norm=l;
2292  if(l==1) printf("Normalize inputs\n");
2293  }
2294 /* obsolete datacard NLAY */
2295  else if(*(cc+1)=='L')
2296  {
2297  printf("NLAY datacard is no longer needed\n");
2298  }
2299  else if(*(cc+1)=='E') /* NEPO */
2300  {
2301  LEARN.Nepoch=l;
2302  printf("Number of epochs %d\n",l);
2303  }
2304  else if(*(cc+1)=='R') /* NRES */
2305  {
2306  LEARN.Nreset=l;
2307  printf(
2308  "Reset to steepest descent every %d epochs\n",
2309  l);
2310  }
2311  }
2312  }
2313  else if(*s=='L')
2314  {
2315  if(*(s+1)=='P') /* LPAR */
2316  {
2317  sscanf(s,"%s %le",cc,&p);
2318  printf("Learning parameter %f\n",p);
2319  LEARN.eta = (dbl) p;
2320  }
2321  else if(*(s+1)=='M') /* LMET */
2322  {
2323  *ilearn = 1;
2324  sscanf(s,"%s %d",cc,&(LEARN.Meth));
2325  printf("Learning method = ");
2326  switch(LEARN.Meth)
2327  {
2328  case 1: printf("Stochastic Minimization\n");
2329  break;
2330  case 2: printf("Steepest descent with fixed step\n");
2331  break;
2332  case 3: printf("Steepest descent with line search\n"); break;
2333  case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
2334  case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
2335  break;
2336  case 6: printf("BFGS\n");
2337  break;
2338  case 7: printf("Hybrid BFGS-linear\n");
2339  break;
2340  default: printf("Error: unknown method\n"); break;
2341  }
2342 
2343  }
2344  else if(*(s+1)=='T') /* LTAU */
2345  {
2346  sscanf(s,"%s %lf",cc,&p);
2347  printf("Tau %f\n",p);
2348  LEARN.Tau = (dbl) p;
2349  }
2350  else if(*(s+1)=='A') /* LAMB */
2351  {
2352  sscanf(s,"%s %lf",cc,&p);
2353  printf("Lambda %f\n",p);
2354  LEARN.Lambda = (dbl) p;
2355  }
2356  }
2357  else if(*s=='F')
2358  {
2359  if(*(s+1)=='S') /* FSPO */
2360  {
2361  sscanf(s,"%s %le",cc,&p);
2362  printf("Flat spot elimination parameter %f\n",p);
2363  LEARN.delta = (dbl) p;
2364  }
2365  else if(*(s+1)=='I') /* FILE */
2366  {
2367  sscanf(s,"%s %s",cc,otherfile);
2368  ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
2369  if(ierr != 0) return ierr;
2370  }
2371  }
2372  else if(*s=='M') /* momentum */
2373  {
2374  sscanf(s,"%s %le",cc,&p);
2375  printf("Momentum term %f\n",p);
2376  LEARN.epsilon = (dbl) p;
2377  }
2378  else if(*s=='O') /* OUTx */
2379  {
2380  if(*(s+3)=='W') /* OUTW */
2381  {
2382  sscanf(s,"%s %d",cc,&OutputWeights);
2383  if(OutputWeights == 0)
2384  {
2385  printf("Never write file weights.out\n");
2386  }
2387  else if(OutputWeights == -1)
2388  {
2389  printf("Write weights to output file at the end\n");
2390  }
2391  else
2392  {
2393  printf("Write weights to file every %d epochs\n",
2394  OutputWeights);
2395  }
2396  }
2397  else if(*(s+3)=='F') /* OUTF */
2398  {
2399  sscanf(s,"%s %s",cc,cc2);
2400  if(*cc2=='F' || *cc2=='C')
2401  {
2402  DIVERS.Outf = *cc2;
2403  }
2404  else
2405  {
2406  printf(" *** Error while loading file %s at line %s :",
2407  filename,s);
2408  printf(" unknown language\n");
2409  }
2410  }
2411  else
2412  {
2413  printf(" *** Error while loading file %s at line %s\n",
2414  filename,s);
2415  }
2416  }
2417  else if(*s=='R') /* RDWT */
2418  {
2419  sscanf(s,"%s %d",cc,&(NET.Rdwt));
2420  if(NET.Rdwt == 0)
2421  {
2422  printf("Random weights \n");
2423  }
2424  else
2425  {
2426  printf("Read weights from file weights.in\n");
2427  }
2428  }
2429  else if(*s=='S') /* STAT */
2430  {
2431  sscanf(s,"%s %d",cc,&(DIVERS.Stat));
2432  }
2433 /* else if(*s=='T') TFUN
2434  {
2435  sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
2436  SetTransFunc(il,in,tf);
2437  } */
2438  else if(*s=='H') /* HESS */
2439  {
2440  sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
2441  }
2442  else if(*s=='D')
2443  {
2444  if(*(s+1)=='C') /* DCAY */
2445  {
2446  sscanf(s,"%s %le",cc,&p);
2447  LEARN.Decay = p;
2448  printf("Learning parameter decay %f\n",
2449  (double) LEARN.Decay);
2450  }
2451  if(*(s+1)=='B') /* DBIN */
2452  {
2453  sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
2454  printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
2455  }
2456  if(*(s+1)=='E') /* DEBU */
2457  {
2458  sscanf(s,"%s %d",cc,&(NET.Debug));
2459  printf("Debug mode %d\n",NET.Debug);
2460  }
2461  }
2462  else if(*s=='P') /* POND */
2463  {
2464  npon = CountLexemes(s);
2465  if(npon==2)
2466  {
2467  sscanf(s,"%s %d",cc,&(PAT.Iponde));
2468  }
2469  else
2470  {
2471  ss = (char**) malloc((npon+1)*sizeof(char*));
2472  for(i=0;i<=npon;i++)
2473  ss[i]=(char*) malloc(40*sizeof(char));
2474  getnLexemes(npon,s,ss);
2475  sscanf(ss[1],"%d",&(PAT.Iponde));
2476  for(i=2;i<npon;i++)
2477  {
2478  sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
2479  }
2480  }
2481  if(PAT.Iponde==0)
2482  {
2483  npon = 0;
2484  }
2485  else
2486  {
2487  npon = 1;
2488  }
2489  }
2490  else if(*s=='#') /* comments */
2491  {
2492  }
2493  else /* exemple itself */
2494  {
2495  if(np==0) return 1;
2496  if(nin==0) return 2;
2497  if(nout==0) return 3;
2498 
2499 
2500 /* store number of exemples and allocate memory*/
2501  if(line==0)
2502  {
2503  PAT.Npat[ifile] = np;
2504  ierr = AllocPatterns(ifile,np,nin,nout,0);
2505  if(ierr != 0) return ierr;
2506  *iexamples = 1;
2507  }
2508 
2509 /* now get exemple */
2510 
2511  line++;
2512  ll = (line-1)%2;
2513  ipat = (line-1)/2;
2514  /* printf("Loading event \t %d\r",ipat);*/
2515 /* if(ipat>NPMAX)
2516  {
2517  printf("Too many examples in file\n");
2518  printf("Loading %d examples\n",NPMAX);
2519  PAT.Npat[ifile] = NPMAX;
2520  break;
2521  }
2522 */
2523 
2524 /* allocate the number of lines */
2525 
2526  if(line==1)
2527  {
2528 
2529  nmax = nin;
2530  if(nout>nin) nmax=nout;
2531  ss = (char**) malloc((nmax+1)*sizeof(char*));
2532  if(ss == nullptr) return -111;
2533  for(i=0;i<=nmax;i++)
2534  {
2535  ss[i]=(char*) malloc(40*sizeof(char));
2536  if(ss[i] == nullptr) return -111;
2537  }
2538  }
2539 
2540  if(ll==0) /* inputs */
2541  {
2542  getnLexemes(nin,s,ss);
2543  for(i=0;i<nin;i++)
2544  {
2545  sscanf(ss[i],"%le",&p);
2546  PAT.Rin[ifile][ipat][i] = (type_pat) p;
2547  }
2548  }
2549  else /* answers */
2550  {
2551  ntot=nout+npon;
2552  getnLexemes(ntot,s,ss);
2553  for(i=0;i<ntot;i++)
2554  {
2555  sscanf(ss[i],"%le",&p);
2556  if(i<nout)
2557  {
2558  PAT.Rans[ifile][ipat][i] = (type_pat) p;
2559  }
2560  else
2561  {
2562  if(PAT.Iponde==1)
2563  {
2564  PAT.Pond[ifile][ipat] =
2565  (type_pat) p;
2566  }
2567  else
2568  {
2569  PAT.Pond[ifile][ipat] =
2570  (type_pat) PAT.Ponds[(int) p -1];
2571  }
2572  }
2573  }
2574  }
2575  }
2576  }
2577  printf("%d examples loaded \n\n",PAT.Npat[ifile]);
2578  fclose(LVQpat);
2579  return 0;
2580 }
2581 
2582 
2583 /* CountLexemes returns the number of lexemes in s separated by blanks.*/
2584 /* extern "C"Dllexport */
2585 int CountLexemes(char *s)
2586 {
2587  char tmp[1024];
2588  int i=0;
2589 
2590  strcpy(tmp,s);
2591  if (strtok(tmp," "))
2592  {
2593  i=1;
2594  while (strtok(nullptr," ")) i++;
2595  }
2596  return i;
2597 }
2598 
2599 /* splits s in substrings ss separated by blanks*/
2600 /* extern "C"Dllexport */
2601 void getnLexemes(int n, char *s, char **ss)
2602 {
2603  char tmp[1024];
2604  int i;
2605  strcpy(tmp,s);
2606  if (n>0)
2607  {
2608  strcpy(ss[0],strtok(tmp," "));
2609  for (i=1;i<n;i++)
2610  strcpy(ss[i],strtok(nullptr," "));
2611  }
2612 }
2613 
2614 /* extern "C"Dllexport */
2615 void getLexemes(char *s,char **ss)
2616 {
2617  char tmp[1024];
2618  int i,n;
2619 
2620  strcpy(tmp,s);
2621  n=CountLexemes(tmp);
2622  if (n>0)
2623  {
2624  strcpy(ss[0],strtok(tmp," "));
2625  for (i=1;i<n;i++)
2626  strcpy(ss[i],strtok(nullptr," "));
2627  }
2628 }
2629 
2630 
2631 /***********************************************************/
2632 /* LearnFree */
2633 /* */
2634 /* frees memory allocated for learning */
2635 /* */
2636 /* Author: J.Schwindling 28-May-99 */
2637 /***********************************************************/
2638 
2639 /* extern "C"Dllexport */
2641 {
2642  int il,in;
2643  if(LearnMemory==0) return;
2644  LearnMemory = 0;
2645  for(il=0; il<NET.Nlayer; il++)
2646  {
2647  for(in=0; in<NET.Nneur[il]; in++)
2648  {
2649  free(dir[il][in]);
2650  }
2651  free(dir[il]);
2652  }
2653  free(dir);
2654  if(BFGSMemory==0) return;
2655  BFGSMemory = 0;
2656  for(il=0; il<NET.Nweights; il++)
2657  {
2658  free(BFGSH[il]);
2659  }
2660  free(BFGSH);
2661  free(Gamma);
2662  free(delta);
2663 
2664 /* if(JacobianMemory == 0) return;
2665  JacobianMemory = 0;
2666  for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
2667  free(JacobianMatrix); */
2668 }
2669 
2670 
2671 /***********************************************************/
2672 /* LearnAlloc */
2673 /* */
2674 /* memory allocation for vectors and matrices used in */
2675 /* conjugate gradients or BFGS like methods */
2676 /* */
2677 /* return value (int) = error code: 0 no error */
2678 /* -111 no memory */
2679 /* */
2680 /* Author: J.Schwindling 20-Apr-99 */
2681 /* Modified: J.Schwindling 31-Jan-2000 error code */
2682 /***********************************************************/
2683 
2684 /* extern "C"Dllexport */
2686 {
2687  int il,in,i;
2688  int Nweights = 0;
2689 
2690  if(LearnMemory != 0) LearnFree();
2691  LearnMemory = 1;
2692  dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
2693  if(dir == nullptr) return -111;
2694 
2695  for(il=0; il<NET.Nlayer; il++)
2696  {
2697  dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
2698  if(dir[il] == nullptr) return -111;
2699  for(in=0; in<NET.Nneur[il]; in++)
2700  {
2701  if(il==0)
2702  {
2703 /* TODO: understand implications of hard-coded 101 */
2704  dir[0][in] = (dbl *)
2705  malloc(101*sizeof(dbl));
2706  if(dir[0][in] == nullptr) return -111;
2707  }
2708  else
2709  {
2710  dir[il][in] = (dbl *)
2711  malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
2712  if(dir[il][in] == nullptr) return -111;
2713  Nweights += NET.Nneur[il-1]+1;
2714  }
2715  }
2716  }
2717  NET.Nweights = Nweights;
2718 
2719  if(BFGSMemory==0 && LEARN.Meth>= 6)
2720  {
2721  BFGSMemory = 1;
2722  Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
2723  delta = (dbl*) malloc(Nweights*sizeof(dbl));
2724  BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
2725  if(Gamma == nullptr || delta == nullptr || BFGSH == nullptr)
2726  return -111;
2727 
2728  for(i=0; i<Nweights; i++)
2729  {
2730  BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
2731  if(BFGSH[i] == nullptr) return -111;
2732  }
2733  }
2734 
2735 /* if(JacobianMemory==0)
2736  {
2737  JacobianMemory = 1;
2738  printf("JacobianMemory = %d\n",JacobianMemory);
2739  JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
2740  for(i=0; i<PAT.Npat[0]; i++)
2741  JacobianMatrix[i] =
2742  (dbl*) malloc(Nweights*sizeof(dbl));
2743  printf("end memory alloc\n");
2744  }
2745 
2746  if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
2747 
2748  return 0;
2749 }
2750 
2751 
2752 /***********************************************************/
2753 /* MLP_PrFFun */
2754 /* */
2755 /* writes the MLP function to file as a fortran function */
2756 /* */
2757 /* inputs : char *filename = name of the output file */
2758 /* */
2759 /* return value (int) = 0: no error */
2760 /* -1: could not open file */
2761 /* */
2762 /* Author: J.Schwindling 20-Apr-99 */
2763 /* Modified: J.Schwindling 05-May-99 */
2764 /* add normalization of inputs */
2765 /* J.Schwindling 30-Nov-99 return code */
2766 /***********************************************************/
2767 
2768 /* extern "C"Dllexport */
2770 {
2771  int il,in,jn;
2772  FILE *W;
2773 
2774  W=fopen(filename,"w");
2775  if(W==nullptr) return -1;
2776  fprintf(W," SUBROUTINE RNNFUN(rin,rout)\n");
2777  fprintf(W," DIMENSION RIN(%d)\n",NET.Nneur[0]);
2778  fprintf(W," DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
2779  fprintf(W,"C\n");
2780 
2781  for(in=0; in<NET.Nneur[0]; in++)
2782  {
2783  if(DIVERS.Norm==0)
2784  {
2785  fprintf(W," OUT%d = RIN(%d)\n",in+1,in+1);
2786  }
2787  else
2788  {
2789  fprintf(W," OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
2790  STAT.mean[in],STAT.sigma[in]);
2791  }
2792  }
2793  for(il=1; il<NET.Nlayer-1; il++)
2794  {
2795  fprintf(W,"C\n");
2796  fprintf(W,"C layer %d\n",il+1);
2797  for(in=0; in<NET.Nneur[il]; in++)
2798  {
2799  fprintf(W," RIN%d = %e\n",in+1,
2800  (double) NET.Weights[il][in][0]);
2801  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2802  fprintf(W," > +(%e) * OUT%d\n",
2803  (double) NET.Weights[il][in][jn],jn);
2804  }
2805  fprintf(W,"C\n");
2806  for(in=0; in<NET.Nneur[il]; in++)
2807  {
2808  if(NET.T_func[il][in]==0)
2809  {
2810  fprintf(W," OUT%d = 0\n",in+1);
2811  }
2812  else if(NET.T_func[il][in]==1)
2813  {
2814  fprintf(W," OUT%d = RIN%d\n",in+1,in+1);
2815  }
2816  else if(NET.T_func[il][in]==2)
2817  {
2818  fprintf(W," OUT%d = SIGMOID(RIN%d)\n",
2819  in+1,in+1);
2820  }
2821  }
2822  }
2823  il = NET.Nlayer-1;
2824  fprintf(W,"C\n");
2825  fprintf(W,"C layer %d\n",il+1);
2826  for(in=0; in<NET.Nneur[il]; in++)
2827  {
2828  fprintf(W," RIN%d = %e\n",in+1,
2829  (double) NET.Weights[il][in][0]);
2830  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2831  fprintf(W," > +(%e) * OUT%d\n",
2832  (double) NET.Weights[il][in][jn],jn);
2833  }
2834  fprintf(W,"C\n");
2835  for(in=0; in<NET.Nneur[il]; in++)
2836  {
2837  if(NET.T_func[il][in]==0)
2838  {
2839  fprintf(W," ROUT(%d) = 0\n",in+1);
2840  }
2841  else if(NET.T_func[il][in]==1)
2842  {
2843  fprintf(W," ROUT(%d) = RIN%d\n",in+1,in+1);
2844  }
2845  else if(NET.T_func[il][in]==2)
2846  {
2847  fprintf(W," ROUT(%d) = SIGMOID(RIN%d)\n",
2848  in+1,in+1);
2849  }
2850  }
2851 
2852  fprintf(W,"C\n");
2853  fprintf(W," END\n");
2854  fprintf(W," REAL FUNCTION SIGMOID(X)\n");
2855  fprintf(W," SIGMOID = 1./(1.+EXP(-X))\n");
2856  fprintf(W," END\n");
2857 
2858  fclose(W);
2859  return 0;
2860 }
2861 
2862 
2863 /***********************************************************/
2864 /* MLP_PrCFun */
2865 /* */
2866 /* writes the MLP function to file as a C function */
2867 /* */
2868 /* inputs : char *filename = name of the output file */
2869 /* */
2870 /* return value (int) = 0: no error */
2871 /* -1: could not open file */
2872 /* */
2873 /* Author: J.Schwindling 23-Apr-99 */
2874 /* Modified: J.Schwindling 30-Nov-99 return code */
2875 /***********************************************************/
2876 
2877 /* extern "C"Dllexport */
2879 {
2880  int il,in,jn;
2881  FILE *W;
2882 
2883  W=fopen(filename,"w");
2884  if(W==nullptr) return -1;
2885 
2886  fprintf(W,"double sigmoid(double x)\n");
2887  fprintf(W,"{\n");
2888  fprintf(W,"return 1/(1+exp(-x));\n");
2889  fprintf(W,"}\n");
2890  fprintf(W,"void rnnfun(double *rin,double *rout)\n");
2891  fprintf(W,"{\n");
2892  fprintf(W," double out1[%d];\n",NET.Nneur[0]);
2893  fprintf(W," double out2[%d];\n",NET.Nneur[1]);
2894  if(NET.Nlayer>=3) fprintf(W," double out3[%d];\n",NET.Nneur[2]);
2895  if(NET.Nlayer>=4) fprintf(W," double out4[%d];\n",NET.Nneur[3]);
2896  fprintf(W,"\n");
2897 
2898  for(in=0; in<NET.Nneur[0]; in++)
2899  {
2900  if(DIVERS.Norm==0)
2901  {
2902  fprintf(W," out1[%d] = rin[%d];\n",in,in);
2903  }
2904  else
2905  {
2906  fprintf(W," out1[%d] = (rin[%d]-%e)/%e;\n",
2907  in,in,
2908  STAT.mean[in],STAT.sigma[in]);
2909  }
2910  }
2911 
2912  for(il=1; il<=NET.Nlayer-1; il++)
2913  {
2914  fprintf(W,"\n");
2915  fprintf(W,"/* layer %d */\n",il+1);
2916  for(in=0; in<NET.Nneur[il]; in++)
2917  {
2918  fprintf(W," out%d[%d] = %e\n",il+1,in,
2919  (double) NET.Weights[il][in][0]);
2920  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2921  fprintf(W," +(%e) * out%d[%d]\n",
2922  (double) NET.Weights[il][in][jn],il,jn-1);
2923  fprintf(W," ;\n");
2924  }
2925  fprintf(W,"\n");
2926  for(in=0; in<NET.Nneur[il]; in++)
2927  {
2928  if(NET.T_func[il][in]==0)
2929  {
2930  fprintf(W," out%d[%d] = 0;\n",il+1,in);
2931  }
2932  else if(NET.T_func[il][in]==1)
2933  {
2934  }
2935  else if(NET.T_func[il][in]==2)
2936  {
2937  fprintf(W," out%d[%d] = sigmoid(out%d[%d]);\n",
2938  il+1,in,il+1,in);
2939  }
2940  }
2941  }
2942  il = NET.Nlayer-1;
2943  for(in=0; in<NET.Nneur[il]; in++)
2944  {
2945  fprintf(W," rout[%d] = out%d[%d];\n",in,il+1,in);
2946  }
2947  fprintf(W,"}\n");
2948  fclose(W);
2949  return 0;
2950 }
2951 
2952 
2953 /***********************************************************/
2954 /* SaveWeights */
2955 /* */
2956 /* writes the weights to file */
2957 /* */
2958 /* inputs : char *filename = name of the output file */
2959 /* int iepoch = epoch number */
2960 /* */
2961 /* return value (int): 0 if OK */
2962 /* -1 if file could not be opened */
2963 /* */
2964 /* Author: J.Schwindling 20-Apr-99 */
2965 /* Modified: J.Schwindling 11-Jun-99 */
2966 /* print net structure in header */
2967 /* Modified: J.Schwindling 05-Nov-99 */
2968 /* return error code */
2969 /***********************************************************/
2970 
2971 /* extern "C"Dllexport */
2972 int SaveWeights(char *filename, int iepoch)
2973 {
2974  FILE *W;
2975  int ilayer,ineur,i;
2976 
2977  W=fopen(filename,"w");
2978  if(W==nullptr) return -1;
2979 
2980  fprintf(W,"# network structure ");
2981  for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
2982  {
2983  fprintf(W,"%d ",NET.Nneur[ilayer]);
2984  }
2985 
2986  fprintf(W,"\n %d\n",iepoch);
2987  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2988  {
2989  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2990  {
2991  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2992  {
2993  fprintf(W," %1.15e\n",
2994  (double) NET.Weights[ilayer][ineur][i]);
2995  }
2996  }
2997  }
2998  fclose(W);
2999  return 0;
3000 }
3001 
3002 
3003 /***********************************************************/
3004 /* LoadWeights */
3005 /* */
3006 /* reads the weights from file */
3007 /* */
3008 /* input : char *filename = name of the input file */
3009 /* output : int *iepoch = epoch number */
3010 /* */
3011 /* return value (int): 0 if OK */
3012 /* -1 if file could not be opened */
3013 /* */
3014 /* Author: J.Schwindling 20-Apr-99 */
3015 /* Modified: J.Schwindling 11-Jun-99 */
3016 /* lines starting with # are skipped */
3017 /* Modified: J.Schwindling 05-Nov-99 */
3018 /* return error code */
3019 /***********************************************************/
3020 
3021 /* extern "C"Dllexport */
3022 int LoadWeights(char *filename, int *iepoch)
3023 {
3024  FILE *W;
3025  int ilayer,ineur,i;
3026  double p;
3027  char s[80];
3028 
3029  W=fopen(filename,"r");
3030  if(W==nullptr) return -1;
3031  do
3032  {
3033  fgets(s,80,W);
3034  }
3035  while(*s == '#');
3036  sscanf(s," %d",iepoch);
3037  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
3038  {
3039  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
3040  {
3041  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
3042  {
3043  fscanf(W," %le",&p);
3044  NET.Weights[ilayer][ineur][i] = (dbl) p;
3045  }
3046  }
3047  }
3048 
3049  fclose(W);
3050  return 0;
3051 }
3052 
3053 
3054 /***********************************************************/
3055 /* AllocPatterns */
3056 /* */
3057 /* allocate memory for the examples */
3058 /* */
3059 /* input : int ifile = file number (0 or 1) */
3060 /* int npat = number of examples */
3061 /* int nin = number of inputs */
3062 /* int nout = number of outputs */
3063 /* int iadd = 0: new examples */
3064 /* 1: add to existing ones */
3065 /* */
3066 /* return value (int) = error code: 0 = no error */
3067 /* 1 = wrong file number */
3068 /* -111 = no memory */
3069 /* */
3070 /* Author: J.Schwindling 21-Apr-99 */
3071 /* Modified: J.Schwindling 26-Apr-99 */
3072 /* - frees memory if already booked and iadd=0 */
3073 /* (should remove need to call mlpfree) */
3074 /* - implement iadd = 1 */
3075 /***********************************************************/
3076 
3077 /* extern "C"Dllexport */int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
3078 {
3079  int j;
3080  type_pat *tmp, *tmp3;
3081  type_pat **tmp2;
3082  int ntot;
3083 
3084  if(ifile>1 || ifile<0) return(1);
3085 /* scanf("%d",&j); */
3086  if(ExamplesMemory==0)
3087  {
3088  ExamplesMemory=1;
3089  PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
3090  PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
3091  PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
3092  PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
3093  if(PAT.Pond == nullptr || PAT.Rin == nullptr
3094  || PAT.Rans == nullptr || PAT.vRin == nullptr) return -111;
3095  }
3096 
3097 
3098 /* if iadd=0, check that memory not already allocated. Otherwise free it */
3099  if(iadd==0 && PatMemory[ifile]!=0)
3100  {
3101  FreePatterns(ifile);
3102  }
3103 
3104 /* allocate memory and initialize ponderations */
3105  if(iadd==0 || PatMemory[ifile]==0)
3106  {
3107  PatMemory[ifile] = 1;
3108  PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
3109  if(PAT.Pond[ifile] == nullptr) return -111;
3110  for(j=0; j<npat; j++)
3111  PAT.Pond[ifile][j] = 1;
3112 
3113  PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3114  if(PAT.Rin[ifile] == nullptr) return -111;
3115  PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3116  if(PAT.Rans[ifile] == nullptr) return -111;
3117 
3118  PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
3119  sizeof(type_pat));
3120  if(PAT.vRin[ifile] == nullptr) return -111;
3121 
3122  for(j=0; j<npat; j++)
3123  {
3124  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3125  PAT.vRin[ifile][j*(nin+1)] = 1;
3126  }
3127  for(j=0; j<npat; j++)
3128  {
3129  PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
3130  if(PAT.Rans[ifile][j] == nullptr) return -111;
3131  }
3132  PAT.Npat[ifile] = npat;
3133 
3134  if(ifile==0)
3135  {
3136  ExamplesIndex = (int *) malloc(npat*sizeof(int));
3137  if(ExamplesIndex == nullptr) return -111;
3138  for(j=0; j<npat; j++) ExamplesIndex[j] = j;
3139  }
3140  }
3141  else /* add examples */
3142  {
3143  ntot = PAT.Npat[ifile]+npat;
3144 
3145 /* event weighting */
3146  tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
3147  if(tmp == nullptr) return -111;
3148 
3149  for(j=0; j<PAT.Npat[ifile]; j++)
3150  {
3151  tmp[j] = PAT.Pond[ifile][j];
3152  }
3153  for(j=PAT.Npat[ifile];j<ntot;j++)
3154  {
3155  tmp[j] = 1;
3156  }
3157  if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
3158  PAT.Pond[ifile] = tmp;
3159 
3160 /* examples */
3161 /* tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3162  for(j=0; j<PAT.Npat[ifile]; j++)
3163  {
3164  tmp2[j] = PAT.Rin[ifile][j];
3165  }
3166  for(j=PAT.Npat[ifile];j<ntot;j++)
3167  {
3168  tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
3169  }
3170  if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
3171  PAT.Rin[ifile] = tmp2; */
3172 
3173  tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
3174  if(tmp3 == nullptr) return -111;
3175 
3176  for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
3177  {
3178  tmp3[j] = PAT.vRin[ifile][j];
3179  }
3180  if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
3181  PAT.vRin[ifile] = tmp3;
3182  for(j=0; j<ntot; j++)
3183  {
3184  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3185  PAT.vRin[ifile][j*(nin+1)] = 1;
3186  }
3187 
3188  tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3189  if(tmp2 == nullptr) return -111;
3190  for(j=0; j<PAT.Npat[ifile]; j++)
3191  {
3192  tmp2[j] = PAT.Rans[ifile][j];
3193  }
3194  for(j=PAT.Npat[ifile];j<ntot;j++)
3195  {
3196  tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
3197  if(tmp2[j] == nullptr) return -111;
3198  }
3199  if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
3200  PAT.Rans[ifile] = tmp2;
3201  PAT.Npat[ifile] = ntot;
3202  PatMemory[ifile] = 1;
3203 
3204 /* indices */
3205  if(ifile==0)
3206  {
3207  free(ExamplesIndex);
3208  ExamplesIndex = (int *) malloc(ntot*sizeof(int));
3209  if(ExamplesIndex == nullptr) return -111;
3210  for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
3211  }
3212  }
3213 
3214  return 0;
3215 }
3216 
3217 
3218 /***********************************************************/
3219 /* FreePatterns */
3220 /* */
3221 /* frees memory for the examples */
3222 /* */
3223 /* input : int ifile = file number (0 or 1) */
3224 /* */
3225 /* return value (int) = error code: 0 = no error */
3226 /* 1 = wrong file number */
3227 /* 2 = no mem allocated */
3228 /* */
3229 /* Author: J.Schwindling 26-Apr-99 */
3230 /***********************************************************/
3231 
3232 /* extern "C"Dllexport */int FreePatterns(int ifile)
3233 {
3234  int i;
3235 
3236  if(ifile>1 || ifile<0) return 1;
3237 /* printf("%d %d \n",ifile,PatMemory[ifile]);*/
3238  if(PatMemory[ifile]==0) return 2;
3239 
3240  free(PAT.Pond[ifile]);
3241  for(i=0; i<PAT.Npat[ifile]; i++)
3242  {
3243 /* free(PAT.Rin[ifile][i]); */
3244  free(PAT.Rans[ifile][i]);
3245  }
3246  free(PAT.Rin[ifile]);
3247  free(PAT.Rans[ifile]);
3248  free(PAT.vRin[ifile]);
3249  PatMemory[ifile] = 0;
3250  PAT.Npat[ifile] = 0;
3251 
3252  return 0;
3253 }
3254 
3255 
3256 /***********************************************************/
3257 /* MLP_StatInputs */
3258 /* */
3259 /* compute statistics about the inputs: mean, RMS, min, max*/
3260 /* */
3261 /* inputs: int Nexamples = number of examples */
3262 /* int Niputs = number of quantities */
3263 /* type_pat **inputs = input values */
3264 /* */
3265 /* outputs: dbl *mean = mean value */
3266 /* dbl *sigma = RMS */
3267 /* dbl *minimum = minimum */
3268 /* dbl *maximum = maximum */
3269 /* */
3270 /* return value (int): always = 0 */
3271 /* */
3272 /* Author: J.Schwindling 11-Oct-99 */
3273 /***********************************************************/
3274 
3275 int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs,
3276  dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
3277 {
3278  dbl *fmean;
3279  int j, ipat, nmax;
3280 
3281 /* first compute a fast rough mean using the first 100 events */
3282  fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
3283  nmax = 100;
3284  if(Nexamples<100) nmax=Nexamples;
3285 
3286  for(j=0;j<Ninputs;j++)
3287  {
3288  fmean[j] = 0;
3289  for(ipat=0;ipat<nmax;ipat++)
3290  {
3291  fmean[j] += (dbl) inputs[ipat][j];
3292  }
3293  fmean[j] = fmean[j]/(dbl) nmax;
3294 
3295 /* now compute real mean and sigma, min and max */
3296  mean[j] = 0;
3297  sigma[j] = 0;
3298  minimum[j] = 99999;
3299  maximum[j] = -99999;
3300  for(ipat=0;ipat<Nexamples;ipat++)
3301  {
3302  mean[j] += (dbl) inputs[ipat][j];
3303  sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
3304  ((dbl) inputs[ipat][j]-fmean[j]);
3305  if((dbl) inputs[ipat][j] > maximum[j])
3306  maximum[j]=(dbl) inputs[ipat][j];
3307  if((dbl) inputs[ipat][j] < minimum[j])
3308  minimum[j]=(dbl) inputs[ipat][j];
3309  }
3310  mean[j] = mean[j]/(dbl) Nexamples;
3311  sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples -
3312  (mean[j]-fmean[j])*
3313  (mean[j]-fmean[j]));
3314  }
3315  free(fmean);
3316  return 0;
3317 }
3318 
3319 /***********************************************************/
3320 /* MLP_PrintInputStat */
3321 /* */
3322 /* prints statistics about the inputs: mean, RMS, min, max */
3323 /* */
3324 /* return value (int) = error code: 0 = OK */
3325 /* -111 = could not */
3326 /* allocate memory */
3327 /* */
3328 /* Author: J.Schwindling 11-Oct-99 */
3329 /* Modified: J.Schwindling 31-Jan-2000: return value */
3330 /***********************************************************/
3331 
3333 {
3334  int j;
3335  dbl *mean, *sigma, *minimum, *maximum;
3336 
3337 /* allocate memory */
3338  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3339  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3340  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3341  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3342  int returnCode = -111; // to return if any malloc failed
3343 
3344  if(mean && sigma && minimum && maximum) {
3345 
3346  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],mean,sigma,minimum,maximum);
3347 
3348  printf("\t mean \t\t RMS \t\t min \t\t max\n");
3349  for(j=0;j<NET.Nneur[0];j++)
3350  {
3351  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3352  mean[j],sigma[j],minimum[j],maximum[j]);
3353  }
3354  returnCode = 0; // everything went fine
3355  }
3356 
3357  free(mean);
3358  free(sigma);
3359  free(minimum);
3360  free(maximum);
3361  printf("\n");
3362  return returnCode;
3363 }
3364 
3365 
3366 /***********************************************************/
3367 /* NormalizeInputs */
3368 /* */
3369 /* normalize the inputs: I' = (I - <I>) / RMS(I) */
3370 /* */
3371 /* return value (int) = error code: 0 = OK */
3372 /* -111 = could not */
3373 /* allocate memory */
3374 /* */
3375 /* Author: J.Schwindling 04-May-1999 */
3376 /* Modified: J.Schwindling 31-Jan-2000: return value */
3377 /***********************************************************/
3378 
3379 /* extern "C"Dllexport */int NormalizeInputs()
3380 {
3381  int j, ipat;
3382  dbl *mean, *sigma, *minimum, *maximum;
3383 
3384 /* allocate memory */
3385  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3386  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3387  STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3388  STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3389  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3390  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3391  int returnCode = -111; // to return if any malloc failed
3392 
3393  if(mean && sigma && minimum && maximum && STAT.mean && STAT.sigma) {
3394 
3395  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],mean,sigma,minimum,maximum);
3396 
3397  if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
3398  for(j=0;j<NET.Nneur[0];j++)
3399  {
3400  if(NET.Debug>=1)
3401  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3402  mean[j],sigma[j],minimum[j],maximum[j]);
3403 
3404  /* store mean and sigma for output function */
3405  STAT.mean[j] = mean[j];
3406  STAT.sigma[j] = sigma[j];
3407 
3408  /* finally apply the normalization */
3409  for(ipat=0;ipat<PAT.Npat[0];ipat++)
3410  {
3411  PAT.Rin[0][ipat][j] =
3412  (PAT.Rin[0][ipat][j]-(float) mean[j])/
3413  (float) sigma[j];
3414  }
3415  for(ipat=0;ipat<PAT.Npat[1];ipat++)
3416  {
3417  PAT.Rin[1][ipat][j] =
3418  (PAT.Rin[1][ipat][j]-(float) mean[j])/
3419  (float) sigma[j];
3420  }
3421  }
3422  returnCode = 0; // everything went fine
3423  }
3424 
3425  free(mean);
3426  free(sigma);
3427  free(minimum);
3428  free(maximum);
3429  if(NET.Debug>=1) printf("\n");
3430  return returnCode;
3431 }
3432 
3433 
3434 /***********************************************************/
3435 /* AllocNetwork */
3436 /* */
3437 /* memory allocation for weights, etc */
3438 /* */
3439 /* inputs: int Nlayer: number of layers */
3440 /* int *Neurons: nulber of neurons per layer */
3441 /* */
3442 /* return value (int): error = 0: no error */
3443 /* = -111: could not allocate mem*/
3444 /* */
3445 /* Author: J.Schwindling 28-Sep-99 */
3446 /***********************************************************/
3447 
3448 int AllocNetwork(int Nlayer, int *Neurons)
3449 {
3450  int i, j, k, l;
3451 
3452  if(NetMemory != 0) FreeNetwork();
3453  NetMemory = 1;
3454 
3455  NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
3456  if(NET.Nneur == nullptr) return -111;
3457 
3458  NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
3459  NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
3460  NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3461  NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3462  NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
3463  if(NET.T_func == nullptr || NET.Deriv1 == nullptr
3464  || NET.Inn == nullptr || NET.Outn == nullptr
3465  || NET.Delta == nullptr) return -111;
3466 
3467  for(i=0; i<Nlayer; i++)
3468  {
3469  NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
3470  NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3471  NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3472  NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3473  NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3474  if(NET.T_func[i] == nullptr || NET.Deriv1[i] == nullptr
3475  || NET.Inn[i] == nullptr || NET.Outn[i] == nullptr
3476  || NET.Delta[i] ==nullptr ) return -111;
3477  }
3478 
3479  NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3480  NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
3481  LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3482  LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3483  LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3484  if(NET.Weights == nullptr || NET.vWeights == nullptr
3485  || LEARN.Odw == nullptr || LEARN.ODeDw == nullptr
3486  || LEARN.DeDw == nullptr) return -111;
3487 
3488  for(i=1; i<Nlayer; i++)
3489  {
3490  k = Neurons[i-1]+1;
3491  NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
3492  sizeof(dbl));
3493  NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3494  LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3495  LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3496  LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3497  if(NET.Weights[i] == nullptr || NET.vWeights[i] == nullptr
3498  || LEARN.Odw[i] == nullptr || LEARN.ODeDw[i] == nullptr
3499  || LEARN.DeDw[i] == nullptr) return -111;
3500 
3501  for(j=0; j<Neurons[i]; j++)
3502  {
3503  NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
3504  LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3505  LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3506  LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3507  if(LEARN.Odw[i][j] == nullptr
3508  || LEARN.ODeDw[i][j] == nullptr
3509  || LEARN.DeDw[i][j] == nullptr) return -111;
3510 
3511  for(l=0; l<k; l++)
3512  {
3513  LEARN.Odw[i][j][l] = 0;
3514  LEARN.ODeDw[i][j][l] = 0;
3515  }
3516  }
3517  }
3518  return 0;
3519 }
3520 
3521 
3522 /***********************************************************/
3523 /* FreeNetwork */
3524 /* */
3525 /* frees the memory allocated for the network */
3526 /* */
3527 /* Author: J.Schwindling 06-Oct-99 */
3528 /***********************************************************/
3529 
3531 {
3532  int i, j;
3533  for(i=1; i<NET.Nlayer; i++)
3534  {
3535  for(j=0; j<NET.Nneur[i]; j++)
3536  {
3537 /* free(NET.Weights[i][j]); */
3538  free(LEARN.Odw[i][j]);
3539  free(LEARN.ODeDw[i][j]);
3540  free(LEARN.DeDw[i][j]);
3541  }
3542  free(NET.vWeights[i]);
3543  free(NET.Weights[i]);
3544  free(LEARN.Odw[i]);
3545  free(LEARN.ODeDw[i]);
3546  free(LEARN.DeDw[i]);
3547  }
3548  free(NET.Weights);
3549  free(LEARN.Odw);
3550  free(LEARN.ODeDw);
3551  free(LEARN.DeDw);
3552 
3553  free(NET.Nneur);
3554 
3555  for(i=0; i<NET.Nlayer; i++)
3556  {
3557  free(NET.T_func[i]);
3558  free(NET.Deriv1[i]);
3559  free(NET.Inn[i]);
3560  free(NET.Outn[i]);
3561  free(NET.Delta[i]);
3562  }
3563  free(NET.T_func);
3564  free(NET.Deriv1);
3565  free(NET.Inn);
3566  free(NET.Outn);
3567  free(NET.Delta);
3568 
3569  NetMemory = 0;
3570 }
3571 
3572 /***********************************************************/
3573 /* GetNetStructure */
3574 /* */
3575 /* given a strinng like "3,4,1" returns the network */
3576 /* structure */
3577 /* */
3578 /* inputs: char *s: input string */
3579 /* */
3580 /* outputs: int *Nlayer: number of layers */
3581 /* int *Nneur: number of neurons per layer */
3582 /* */
3583 /* return value (int): error = 0: no error */
3584 /* = -1: s is empty */
3585 /* = -2: s is too long */
3586 /* = -3: too many layers */
3587 /* */
3588 /* Author: J.Schwindling 04-Oct-99 */
3589 /***********************************************************/
3590 
3591 int GetNetStructure(char *s, int *Nlayer, int *Nneur)
3592 {
3593  int i=0;
3594  char tmp[1024];
3595 
3596  if(strlen(s)==0) return -1;
3597  if(strlen(s)>1024) return -2;
3598 
3599  strcpy(tmp,s);
3600  if (strtok(tmp,","))
3601  {
3602  i=1;
3603  while (strtok(nullptr,",")) i++;
3604  }
3605  *Nlayer = i;
3606  if(i > NLMAX) return -3;
3607 
3608  strcpy(tmp,s);
3609  if (*Nlayer>0)
3610  {
3611  sscanf(strtok(tmp,","),"%d",&(Nneur[0]));
3612  for (i=1;i<*Nlayer;i++)
3613  sscanf(strtok(nullptr,","),"%d",&(Nneur[i]));
3614  }
3615 
3616  return 0;
3617 }
3618 
3619 
3620 /***********************************************************/
3621 /* MLP_SetNet */
3622 /* */
3623 /* to set the structure of a neural network */
3624 /* inputs: int *nl = number of layers */
3625 /* int *nn = number of neurons */
3626 /* */
3627 /* return value (int) = error value: */
3628 /* 0: no error */
3629 /* 1: N layers > NLMAX */
3630 /* 2: N layers < 2 */
3631 /* -111: Error allocating memory */
3632 /* */
3633 /* Author: J.Schwindling 14-Apr-99 */
3634 /* Modified: J.Schwindling 05-Oct-99 allocate memory */
3635 /* Modified: J.Schwindling 29-Nov-99 LearnFree, LearnAlloc */
3636 /***********************************************************/
3637 
3638 int MLP_SetNet(int *nl, int *nn)
3639 {
3640  int il,ierr;
3641 
3642  if((*nl)>NLMAX) return(1);
3643  if((*nl)<2) return(2);
3644 
3645 /* LearnFree(); */
3646 /* allocate memory */
3647  ierr = AllocNetwork(*nl,nn);
3648  if(ierr != 0) return ierr;
3649 
3650 /* set number of layers */
3651  NET.Nlayer = (int) *nl;
3652 
3653 /* set number of neurons */
3654  for(il=0; il<NET.Nlayer; il++) {
3655  NET.Nneur[il] = nn[il];
3656  }
3657 
3658 /* set transfer functions */
3659  SetDefaultFuncs();
3660 /* LearnAlloc(); */
3661 
3662  return(0);
3663 }
3664 
3665 
3666 /***********************************************************/
3667 /* MLP_MatrixVectorBias */
3668 /* */
3669 /* computes a Matrix-Vector product */
3670 /* r[j] = M[j][0] + Sum_i M[j][i] v[i] */
3671 /* */
3672 /* inputs: dbl *M = matrix (n lines, m+1 columns) */
3673 /* dbl *v = vector (dimension m) */
3674 /* dbl *r = resulting vector (dimension n) */
3675 /* int n */
3676 /* int m */
3677 /* */
3678 /* Author: J.Schwindling 24-Jan-00 */
3679 /***********************************************************/
3680 
3681 void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
3682 {
3683  int i,j;
3684  dbl a1, a2, a3, a4, c, d;
3685  dbl *pM1 = M;
3686  dbl *pM2 = &(M[m+1]);
3687  dbl *pM3 = &(M[2*(m+1)]);
3688  dbl *pM4 = &(M[3*(m+1)]);
3689  dbl *pr = r;
3690  int mp1 = m+1;
3691 
3692  for(i=0; i<n-3;
3693  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3694  pr+=4)
3695  {
3696  a1 = *pM1;
3697  a2 = *pM2;
3698  a3 = *pM3;
3699  a4 = *pM4;
3700  pM1++; pM2++; pM3++; pM4++;
3701  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3702  {
3703  c = v[j];
3704  d = v[j+1];
3705  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3706  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3707  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3708  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3709  }
3710  for(/*j set above*/; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3711  {
3712  c = v[j];
3713  a1 = a1 + *pM1 * c;
3714  a2 = a2 + *pM2 * c;
3715  a3 = a3 + *pM3 * c;
3716  a4 = a4 + *pM4 * c;
3717  }
3718  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3719  }
3720  for(/*i set above*/; i<n; i++)
3721  {
3722  pM1 = &(M[i*(m+1)]);
3723  a1 = *pM1;
3724  pM1++;
3725  for(j=0; j<m; j++, pM1++)
3726  {
3727  a1 = a1 + *pM1 * v[j];
3728  }
3729  r[i] = a1;
3730  }
3731 }
3732 /***********************************************************/
3733 /* MLP_MatrixVector */
3734 /* */
3735 /* computes a Matrix-Vector product */
3736 /* r[j] = Sum_i M[j][i] v[i] */
3737 /* */
3738 /* inputs: dbl *M = matrix (n lines, m+1 columns) */
3739 /* dbl *v = vector (dimension m) */
3740 /* dbl *r = resulting vector (dimension n) */
3741 /* int n */
3742 /* int m */
3743 /* */
3744 /* Author: J.Schwindling 24-Jan-00 */
3745 /***********************************************************/
3746 
3747 void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
3748 {
3749  int i,j;
3750  dbl a1, a2, a3, a4, c, d;
3751  dbl *pM1 = M;
3752  dbl *pM2 = &(M[m]);
3753  dbl *pM3 = &(M[2*m]);
3754  dbl *pM4 = &(M[3*m]);
3755  dbl *pr = r;
3756  int mp1 = m;
3757 
3758  for(i=0; i<n-3;
3759  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3760  pr+=4)
3761  {
3762  a1 = 0;
3763  a2 = 0;
3764  a3 = 0;
3765  a4 = 0;
3766  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3767  {
3768  c = v[j];
3769  d = v[j+1];
3770  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3771  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3772  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3773  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3774  }
3775  for(/*j set above*/; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3776  {
3777  c = v[j];
3778  a1 = a1 + *pM1 * c;
3779  a2 = a2 + *pM2 * c;
3780  a3 = a3 + *pM3 * c;
3781  a4 = a4 + *pM4 * c;
3782  }
3783  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3784  }
3785  for(/*i set above*/; i<n; i++)
3786  {
3787  pM1 = &(M[i*m]);
3788  a1 = 0;
3789  for(j=0; j<m; j++, pM1++)
3790  {
3791  a1 = a1 + *pM1 * v[j];
3792  }
3793  r[i] = a1;
3794  }
3795 }
3796 
3797 
3798 /***********************************************************/
3799 /* MLP_MM2rows */
3800 /* */
3801 /* computes a Matrix-Matrix product, with the first matrix */
3802 /* having 2 rows */
3803 /* */
3804 /* inputs: dbl *c = resulting matrix (Nj * Nk) */
3805 /* dbl *a = first matrix (Ni * Nj) */
3806 /* dbl *b = second matrix (Nj * Nk) */
3807 /* int Ni */
3808 /* int Nj */
3809 /* int Nk */
3810 /* int NaOffs */
3811 /* int NbOffs */
3812 /* */
3813 /* Author: J.Schwindling 24-Jan-00 */
3814 /***********************************************************/
3815 
3817  int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
3818 {
3819 //int i,j,k;
3820 int j,k;
3821 dbl s00,s01,s10,s11;
3822 type_pat *pa0,*pa1;
3823 dbl *pb0,*pb1,*pc0,*pc1;
3824 
3825  for (j=0; j<=Nj-2; j+=2)
3826  {
3827  pc0 = c+j;
3828  pc1 = c+j+Nj;
3829  s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
3830 
3831  for (k=0,pb0=b+k+NbOffs*j,
3832  pb1=b+k+NbOffs*(j+1),
3833  pa0=a+k,
3834  pa1=a+k+NaOffs;
3835  k<Nk;
3836  k++,pa0++,
3837  pa1++,
3838  pb0++,
3839  pb1++)
3840  {
3841  s00 += (*pa0)*(*pb0);
3842  s01 += (*pa0)*(*pb1);
3843  s10 += (*pa1)*(*pb0);
3844  s11 += (*pa1)*(*pb1);
3845  }
3846  *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
3847  }
3848  for (/*j set above*/; j<Nj; j++)
3849  {
3850  pc0 = c+j;
3851  pc1 = c+j+Nj;
3852  s00 = 0.0; s10 = 0.0;
3853  for (k=0,pb0=b+k+NbOffs*j,
3854  pa0=a+k,
3855  pa1=a+k+NaOffs;
3856  k<Nk;
3857  k++,pa0++,
3858  pa1++,
3859  pb0++)
3860  {
3861  s00 += (*pa0)*(*pb0);
3862  s10 += (*pa1)*(*pb0);
3863  }
3864  *pc0 = s00; *pc1 = s10;
3865  }
3866 }
3867 
3868 #ifdef __cplusplus
3869 } // extern "C"
3870 #endif
dbl * delta
Definition: mlp_gen.cc:36
dbl MLP_Epoch(int iepoch, dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:706
int MessLang
Definition: mlp_gen.cc:22
float alpha
Definition: AMPTWrapper.h:95
static const TGPicture * info(bool iBackgroundIsBlack)
Definition: mlp_gen.h:56
void getnLexemes(int n, char *s, char **ss)
Definition: mlp_gen.cc:2601
int JacobianMemory
Definition: mlp_gen.cc:28
Definition: mlp_gen.h:28
dbl MLP_Test_MM(int ifile, dbl *tmp)
Definition: mlp_gen.cc:263
void LearnFree()
Definition: mlp_gen.cc:2640
dbl * sigma
Definition: mlp_gen.h:58
dbl DeDwProd()
Definition: mlp_gen.cc:1020
int ReadPatterns(char *filename, int ifile, int *inet, int *ilearn, int *iexamples)
Definition: mlp_gen.cc:2229
void CGDir(dbl beta)
Definition: mlp_gen.cc:1266
int FixedStep(dbl alpha)
Definition: mlp_gen.cc:1704
void DeDwSaveZero()
Definition: mlp_gen.cc:1097
int DecreaseSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1616
void InitWeights()
Definition: mlp_gen.cc:2147
Definition: mlp_gen.h:38
int OutputWeights
Definition: mlp_gen.cc:23
int * ExamplesIndex
Definition: mlp_gen.cc:40
int SaveWeights(char *filename, int iepoch)
Definition: mlp_gen.cc:2972
int NLineSearchFail
Definition: mlp_gen.cc:33
int dgels_(char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info)
void MLP_Out(type_pat *rrin, dbl *rrout)
Definition: mlp_gen.cc:62
#define NLMAX
Definition: mlp_gen.cc:14
#define STAT
Definition: mlp_gen.h:60
TRandom random
Definition: MVATrainer.cc:138
int MLP_SetNet(int *nl, int *nn)
Definition: mlp_gen.cc:3638
int LearnMemory
Definition: mlp_gen.cc:29
void EtaDecay()
Definition: mlp_gen.cc:2086
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e g
Definition: Activities.doc:4
#define PAT
Definition: mlp_gen.h:45
dbl ** BFGSH
Definition: mlp_gen.cc:37
int MLP_PrCFun(char *filename)
Definition: mlp_gen.cc:2878
int nin
float MLPfitVersion
Definition: mlp_gen.cc:31
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3275
void SetLambda(double Wmax)
Definition: mlp_gen.cc:1949
void SteepestDir()
Definition: mlp_gen.cc:1244
int GetBFGSH(int Nweights)
Definition: mlp_gen.cc:1418
int BFGSMemory
Definition: mlp_gen.cc:27
int np
Definition: AMPTWrapper.h:33
int GetNetStructure(char *s, int *Nlayer, int *Nneur)
Definition: mlp_gen.cc:3591
void PrintWeights()
Definition: mlp_gen.cc:2168
void MLP_Out_T(type_pat *rrin)
Definition: mlp_gen.cc:113
int StochStep()
Definition: mlp_gen.cc:965
int LearnAlloc()
Definition: mlp_gen.cc:2685
dbl MLP_Stochastic()
Definition: mlp_gen.cc:518
T sqrt(T t)
Definition: SSEVec.h:18
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
double type_pat
Definition: mlp_gen.h:13
int MLP_Train(int *ipat, dbl *err)
Definition: mlp_gen.cc:895
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2102
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2131
dbl DeDwNorm()
Definition: mlp_gen.cc:998
int DeDwSum(type_pat *ans, dbl *out, int ipat)
Definition: mlp_gen.cc:1123
dbl LastAlpha
Definition: mlp_gen.cc:32
int CountLexemes(char *s)
Definition: mlp_gen.cc:2585
struct net_ net_ MLP_HIDDEN
Definition: mlp_gen.cc:16
int NetMemory
Definition: mlp_gen.cc:30
int LoadWeights(char *filename, int *iepoch)
Definition: mlp_gen.cc:3022
void MLP_LineHyb(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1923
Definition: mlp_gen.h:16
int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
Definition: mlp_gen.cc:3077
dbl ** JacobianMatrix
Definition: mlp_gen.cc:39
#define DIVERS
Definition: mlp_gen.h:54
ii
Definition: cuy.py:590
int k[5][pyjets_maxn]
int SetTransFunc(int layer, int neuron, int func)
Definition: mlp_gen.cc:1200
int StochStepHyb()
Definition: mlp_gen.cc:926
void InitBFGSH(int Nweights)
Definition: mlp_gen.cc:1388
void GetGammaDelta()
Definition: mlp_gen.cc:1313
int PatMemory[2]
Definition: mlp_gen.cc:26
void MLP_ResLin()
Definition: mlp_gen.cc:1971
void MLP_MM2rows(dbl *c, type_pat *a, dbl *b, int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
Definition: mlp_gen.cc:3816
void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3747
void DeDwScale(int Nexamples)
Definition: mlp_gen.cc:1060
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3681
void getLexemes(char *s, char **ss)
Definition: mlp_gen.cc:2615
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
void MLP_vSigmoide(dbl *x, int n)
void FreeNetwork()
Definition: mlp_gen.cc:3530
int NormalizeInputs()
Definition: mlp_gen.cc:3379
int ExamplesMemory
Definition: mlp_gen.cc:24
int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1478
void DeDwSave()
Definition: mlp_gen.cc:1078
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int nout
void BFGSdir(int Nweights)
Definition: mlp_gen.cc:1340
double a
Definition: hdecay.h:121
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
dbl MLP_Sigmoide(dbl x)
Definition: mlp_sigmoide.cc:27
int WeightsMemory
Definition: mlp_gen.cc:25
int MLP_PrintInputStat()
Definition: mlp_gen.cc:3332
Definition: DDUnits.h:8
#define CLEN
Definition: mlp_gen.cc:2226
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:184
void SetDefaultFuncs()
Definition: mlp_gen.cc:1223
int FreePatterns(int ifile)
Definition: mlp_gen.cc:3232
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int MLP_PrFFun(char *filename)
Definition: mlp_gen.cc:2769
void DeDwZero()
Definition: mlp_gen.cc:1041
int AllocNetwork(int Nlayer, int *Neurons)
Definition: mlp_gen.cc:3448
dbl ** Hessian
Definition: mlp_gen.cc:41
int LineSearchHyb(dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:1781
dbl DerivDir()
Definition: mlp_gen.cc:1288