CMS 3D CMS Logo

/data/refman/pasoursint/CMSSW_5_2_9/src/PhysicsTools/MVATrainer/plugins/mlp_gen.cc

Go to the documentation of this file.
00001 #include <math.h>
00002 #include <string.h>
00003 #include <stdio.h>
00004 #include <stdlib.h>
00005 
00006 #include "mlp_gen.h"
00007 #include "mlp_sigmoide.h"
00008 
00009 #ifdef __cplusplus
00010 extern "C" {
00011 #endif
00012 
00013 #define NPMAX 100000
00014 #define NLMAX 1000
00015 
00016 struct net_ net_ MLP_HIDDEN;
00017 struct learn_ learn_ MLP_HIDDEN;
00018 struct pat_ pat_ MLP_HIDDEN;
00019 struct divers_ divers_ MLP_HIDDEN;
00020 struct stat_ stat_ MLP_HIDDEN;
00021 
00022 int MessLang = 0;
00023 int OutputWeights = 100;
00024 int ExamplesMemory = 0;
00025 int WeightsMemory = 0;
00026 int PatMemory[2] = {0,0};
00027 int BFGSMemory = 0;
00028 int JacobianMemory = 0;
00029 int LearnMemory = 0;
00030 int NetMemory = 0;
00031 float MLPfitVersion = (float) 1.40;
00032 dbl LastAlpha = 0;
00033 int NLineSearchFail = 0;
00034 
00035 dbl ***dir;
00036 dbl *delta;
00037 dbl **BFGSH;
00038 dbl *Gamma;
00039 dbl **JacobianMatrix;
00040 int *ExamplesIndex;
00041 dbl **Hessian;
00042 
00043 /* The following lines are needed to use the dgels routine from the LAPACK
00044    library in Reslin()                                                      */
00045 
00046 #include "mlp_lapack.h"
00047 /* Subroutine */ int dgels_(char *trans, integer *m, integer *n, integer *
00048         nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb, 
00049         doublereal *work, integer *lwork, integer *info);
00050     
00051 /***********************************************************/
00052 /* MLP_Out                                                 */
00053 /*                                                         */
00054 /* computes the output of the Neural Network               */
00055 /* inputs:     double *rrin = inputs of the MLP            */    
00056 /* outputs:    double *rrout = outputs of the MLP          */    
00057 /*                                                         */
00058 /* Author: J.Schwindling   29-Mar-99                       */
00059 /* Modified: J.Schwindling 16-Jul-99 unrolled loops        */
00060 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid          */
00061 /***********************************************************/
00062    
00063 /* extern "C"Dllexport */void MLP_Out(type_pat *rrin, dbl *rrout)
00064 {
00065 //      static int i, il, in, j, ilm1, m, mp1;  
00066         static int i, il, in, j, m, mp1;  
00067         dbl **deriv1;
00068 
00069 /* input layer */  
00070 
00071         deriv1 = NET.Deriv1;
00072         m = NET.Nneur[0]%4;
00073         if(m==0) goto L10;
00074         for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
00075 L10:    
00076         mp1 = m+1;
00077         for(i=mp1; i<=NET.Nneur[0]; i+=4)
00078                 {
00079                 NET.Outn[0][i-1] = rrin[i-1];
00080                 NET.Outn[0][i] = rrin[i];
00081                 NET.Outn[0][i+1] = rrin[i+1];
00082                 NET.Outn[0][i+2] = rrin[i+2];
00083                 }
00084   
00085 /* hidden and output layers */
00086 
00087         MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
00088                         NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
00089                         
00090         for(il=2; il<NET.Nlayer; il++)
00091                 {
00092                 MLP_vSigmoideDeriv(NET.Outn[il-1],
00093                                 deriv1[il-1],NET.Nneur[il-1]);
00094                 MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
00095                                 NET.Outn[il],NET.Nneur[il],
00096                                 NET.Nneur[il-1]);                       
00097                 }        
00098         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00099                 {                                                                
00100                 deriv1[NET.Nlayer-1][in] = 1;
00101                 }               
00102 }
00103 
00104 
00105 /***********************************************************/
00106 /* MLP_Out_T                                               */
00107 /*                                                         */
00108 /* computes the output of the Neural Network when called   */
00109 /* from MLP_Test                                           */
00110 /* inputs:     double *rrin = inputs of the MLP            */    
00111 /*                                                         */
00112 /* Author: J.Schwindling   23-Jul-1999                     */
00113 /***********************************************************/
00114    
00115 /* extern "C"Dllexport */void MLP_Out_T(type_pat *rrin)
00116 {
00117         static int i, il, in, j, ilm1, m, mp1;  
00118         register dbl a;
00119 
00120 /* input layer */  
00121 
00122         m = NET.Nneur[0]%4;
00123         if(m==0) goto L10;
00124         for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
00125 L10:    
00126         mp1 = m+1;
00127         for(i=mp1; i<=NET.Nneur[0]; i+=4)
00128                 {
00129                 NET.Outn[0][i-1] = rrin[i-1];
00130                 NET.Outn[0][i] = rrin[i];
00131                 NET.Outn[0][i+1] = rrin[i+1];
00132                 NET.Outn[0][i+2] = rrin[i+2];
00133                 }
00134   
00135 /* hidden and output layers */
00136 
00137 /*      for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
00138                 NET.Outn[0][in],NET.Weights[1][0][in]);
00139                 printf("\n");  */
00140         for(il=1; il<NET.Nlayer; il++)
00141                 {
00142                 ilm1 = il-1;
00143                 m = NET.Nneur[ilm1]%4;
00144                 for(in=0; in<NET.Nneur[il]; in++)
00145                         {
00146                         a = NET.Weights[il][in][0];
00147                         if(m==0) goto L20;
00148                         for(j=1;j<=m;j++) a += 
00149                                 NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
00150 L20:                    
00151                         mp1 = m+1;
00152                         for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
00153                                 {
00154                                 a += 
00155                                 NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
00156                                 NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
00157                                 NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
00158                                 NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
00159                                 }
00160                         switch(NET.T_func[il][in])
00161                                 {
00162                                 case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
00163                                         break;                                  
00164                                 case 1: NET.Outn[il][in] = a; 
00165                                         break; 
00166                                 case 0: NET.Outn[il][in] = 0; 
00167                                         break; 
00168                                 }       
00169                         }
00170                 }        
00171 }
00172 
00173 
00174 /***********************************************************/
00175 /* MLP_Out2                                                 */
00176 /*                                                         */
00177 /* computes the output of the Neural Network               */
00178 /* inputs:     double *rrin = inputs of the MLP            */    
00179 /* outputs:    double *rrout = outputs of the MLP          */    
00180 /*                                                         */
00181 /* Author: J.Schwindling   29-Mar-99                       */
00182 /* Modified: J.Schwindling 16-Jul-99 unrolled loops        */
00183 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid          */
00184 /***********************************************************/
00185    
00186 /* extern "C"Dllexport */void MLP_Out2(type_pat *rrin)
00187 {
00188 //      static int il, in, m, mp1, i0, ilm1;  
00189         static int il, in, m, mp1;
00190         register int i;
00191         dbl **rrout, **deriv1;
00192         register dbl *prrout;
00193         type_pat *prrin;
00194         int nhid = NET.Nneur[1];
00195         int nin = NET.Nneur[0];
00196 
00197         rrout = NET.Outn;
00198         deriv1 = NET.Deriv1;
00199         
00200         m = NET.Nneur[0]%4;
00201         if(m==0) goto L10;
00202         if(m==1) 
00203                 {
00204                 rrout[0][0] = rrin[1];
00205                 goto L10;
00206                 }
00207         else if(m==2)
00208                 {
00209                 rrout[0][0] = rrin[1];
00210                 rrout[0][1] = rrin[2];
00211                 goto L10;
00212                 }       
00213         else if(m==3)
00214                 {
00215                 rrout[0][0] = rrin[1];
00216                 rrout[0][1] = rrin[2];
00217                 rrout[0][2] = rrin[3];
00218                 goto L10;
00219                 }       
00220 L10:    
00221         mp1 = m+1;
00222         prrout = &(rrout[0][mp1]);
00223         prrin = &(rrin[mp1+1]);
00224         for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
00225                 {
00226                 *(prrout-1) = *(prrin-1);
00227                 *prrout = *prrin;
00228                 *(prrout+1)= *(prrin+1);
00229                 *(prrout+2) = *(prrin+2);
00230                 }
00231                 
00232 /* input layer */ 
00233 
00234         MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
00235                 NET.Outn[1],nhid,nin);
00236         
00237           
00238 /* hidden and output layers */
00239 
00240         for(il=2; il<NET.Nlayer; il++)
00241                 {
00242                 MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
00243                 MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
00244                         NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
00245                 }       
00246         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00247                 deriv1[NET.Nlayer-1][in] = 1;    
00248 }
00249 
00250 
00251 /***********************************************************/
00252 /* MLP_Test_MM                                             */
00253 /*                                                         */
00254 /* computes the MLP error function using matrix-matrix     */
00255 /* multiplications                                         */
00256 /* inputs:     int ifile = file number: 0=learn, 1=test    */
00257 /*             dbl *tmp = a pointer to an array of size    */
00258 /*                        2 x number of neurons in first   */
00259 /*                        hidden layer                     */   
00260 /*                                                         */
00261 /* return value (dbl) = error value                        */ 
00262 /*                                                         */
00263 /* Author: J.Schwindling   25-Jan-2000                     */
00264 /***********************************************************/
00265    
00266 dbl MLP_Test_MM(int ifile, dbl *tmp)
00267 {
00268         int ipat;
00269         int npat = PAT.Npat[ifile];
00270         int nhid = NET.Nneur[1];
00271         int nin = NET.Nneur[0];
00272         int jpat, j, il, ilm1, m, in, mp1;
00273         dbl err, a, rrans;
00274         dbl *pweights, *ptmp;
00275 
00276         err = 0;
00277         for(ipat=0; ipat<npat-1; ipat+=2)
00278                 {
00279                 MLP_MM2rows(tmp, &(PAT.vRin[ifile][ipat*(nin+1)]), 
00280                         NET.vWeights[1], 2, nhid, nin+1, 
00281                         nin+1, nin+1);
00282         
00283                 switch(NET.T_func[1][0])
00284                         {
00285                         case 2: 
00286                         ptmp = &(tmp[0]);
00287                         MLP_vSigmoide(ptmp,2*nhid);     
00288                         break;
00289                                         
00290                         case 1: 
00291                         break;
00292                                          
00293                         case 0: 
00294                         for(jpat=0; jpat<2; jpat++)
00295                                 {
00296                                 for(j=0; j<nhid; j++)
00297                                         {       
00298                                         tmp[j+jpat*nhid] = 0;
00299                                         }
00300                                 }
00301                         break; 
00302                         }
00303                 
00304                 for(jpat=0; jpat<2; jpat++)
00305                 {
00306                 for(in=0; in<nhid; in++) 
00307                         {
00308                         NET.Outn[1][in] = tmp[jpat*nhid+in];
00309                         }
00310                 for(il=2; il<NET.Nlayer; il++)
00311                         {
00312                         ilm1 = il-1;
00313                         m = NET.Nneur[ilm1]%4;
00314                         for(in=0; in<NET.Nneur[il]; in++)
00315                                 {
00316                                 pweights = &(NET.Weights[il][in][0]);
00317                                 a = *pweights;
00318                                 pweights++;
00319                                 if(m==0) goto L20;
00320                                 for(j=1;j<=m;j++,pweights++) a += 
00321                                 (*pweights)*NET.Outn[ilm1][j-1];
00322 L20:                    
00323                                 mp1 = m+1;
00324                                 for(j=mp1; j<=NET.Nneur[ilm1]; 
00325                                         j+=4, pweights+=4)
00326                                         {
00327                                         a += 
00328                                 *(pweights+3)*NET.Outn[ilm1][j+2]+
00329                                 *(pweights+2)*NET.Outn[ilm1][j+1]+
00330                                 *(pweights+1)*NET.Outn[ilm1][j]+
00331                                 *(pweights  )*NET.Outn[ilm1][j-1];
00332                                         }
00333                                 switch(NET.T_func[il][in])
00334                                 {
00335                                 case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
00336                                         break;                                  
00337                                 case 1: NET.Outn[il][in] = a; 
00338                                         break; 
00339                                 case 0: NET.Outn[il][in] = 0; 
00340                                         break; 
00341                                 }       
00342                                 }
00343                         if(il == NET.Nlayer-1)
00344                         {
00345                         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00346                         {
00347                         rrans = (dbl) PAT.Rans[ifile][ipat+jpat][in];
00348                         err  += (rrans-NET.Outn[NET.Nlayer-1][in])*
00349                                 (rrans-NET.Outn[NET.Nlayer-1][in])*
00350                                 PAT.Pond[ifile][ipat+jpat];
00351                         } 
00352                         }
00353                         }
00354                 }               
00355         }
00356         
00357 /* cas npat impair */   
00358         for(ipat=ipat; ipat<npat; ipat++)
00359                 {
00360                 MLP_MatrixVector(NET.vWeights[1],
00361                                 &(PAT.vRin[ifile][ipat*(nin+1)]),tmp,
00362                                 nhid,nin+1);
00363         
00364                 switch(NET.T_func[1][0])
00365                         {
00366                         case 2: 
00367                         ptmp = &(tmp[0]);
00368                         MLP_vSigmoide(ptmp,2*nhid);     
00369                         break;
00370                                         
00371                         case 1: 
00372                         break;
00373                                          
00374                         case 0: 
00375                         for(j=0; j<nhid; j++)
00376                                 {       
00377                                 tmp[j] = 0;
00378                                 }
00379                         break; 
00380                         }
00381                 
00382                 for(in=0; in<nhid; in++) 
00383                         {
00384                         NET.Outn[1][in] = tmp[in];
00385                         }
00386                 for(il=2; il<NET.Nlayer; il++)
00387                         {
00388                         ilm1 = il-1;
00389                         m = NET.Nneur[ilm1]%4;
00390                         for(in=0; in<NET.Nneur[il]; in++)
00391                                 {
00392                                 pweights = &(NET.Weights[il][in][0]);
00393                                 a = *pweights;
00394                                 pweights++;
00395                                 if(m==0) goto L25;
00396                                 for(j=1;j<=m;j++,pweights++) a += 
00397                                 (*pweights)*NET.Outn[ilm1][j-1];
00398 L25:                    
00399                                 mp1 = m+1;
00400                                 for(j=mp1; j<=NET.Nneur[ilm1]; 
00401                                         j+=4, pweights+=4)
00402                                         {
00403                                         a += 
00404                                 *(pweights+3)*NET.Outn[ilm1][j+2]+
00405                                 *(pweights+2)*NET.Outn[ilm1][j+1]+
00406                                 *(pweights+1)*NET.Outn[ilm1][j]+
00407                                 *(pweights  )*NET.Outn[ilm1][j-1];
00408                                         }
00409                                 switch(NET.T_func[il][in])
00410                                 {
00411                                 case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
00412                                         break;                                  
00413                                 case 1: NET.Outn[il][in] = a; 
00414                                         break; 
00415                                 case 0: NET.Outn[il][in] = 0; 
00416                                         break; 
00417                                 }       
00418                                 }
00419                         if(il == NET.Nlayer-1)
00420                         {
00421                         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00422                         {
00423                         rrans = (dbl) PAT.Rans[ifile][ipat][in];
00424                         err  += (rrans-NET.Outn[NET.Nlayer-1][in])*
00425                                 (rrans-NET.Outn[NET.Nlayer-1][in])*
00426                                 PAT.Pond[ifile][ipat];
00427                         } 
00428                         }
00429                 }               
00430         }
00431         return(err);    
00432 }
00433 
00434 
00435 /***********************************************************/
00436 /* MLP_Test                                                */
00437 /*                                                         */
00438 /* computes the MLP error function                         */
00439 /* inputs:     int ifile = file number: 0=learn, 1=test    */
00440 /*             int regul = 0: no regularisation term       */
00441 /*                         1: regularisation term          */
00442 /*                            (for hybrid learning method) */     
00443 /*                                                         */
00444 /* return value (dbl) = error value                        */ 
00445 /*                                                         */
00446 /* Author: J.Schwindling   31-Mar-99                       */
00447 /***********************************************************/
00448    
00449 /* extern "C"Dllexport */dbl MLP_Test(int ifile,int regul)
00450 {
00451         dbl err, rrans;
00452         int in,jn,ipat,ipati;
00453         
00454         dbl *tmp;
00455         
00456         tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl)); 
00457         if(tmp == 0)    /* not enough memory */
00458         {
00459         printf("not enough memory in MLP_Test\n");                              
00460         err = 0;
00461         for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
00462                 {
00463                 if(ifile==0)
00464                         {
00465                         ipati = ExamplesIndex[ipat];
00466                         }
00467                 else
00468                         {
00469                         ipati = ipat;
00470                         }       
00471                 MLP_Out_T(PAT.Rin[ifile][ipati]);
00472                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00473                         {
00474                         rrans = (dbl) PAT.Rans[ifile][ipati][in];
00475                         err  += (rrans-NET.Outn[NET.Nlayer-1][in])*
00476                                 (rrans-NET.Outn[NET.Nlayer-1][in])*
00477                                 PAT.Pond[ifile][ipati];
00478                         }
00479                 }
00480 
00481         if(regul>=1) 
00482                 {
00483                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00484                         for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
00485                         {
00486                         err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
00487                                 NET.Weights[NET.Nlayer-1][in][jn];
00488                         }
00489                 }
00490         free(tmp);      
00491         return(err);
00492         }
00493         else    /* computation using matrix - matrix multiply */
00494         {
00495         err = MLP_Test_MM(ifile, tmp);
00496         if(regul>=1) 
00497                 {
00498                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00499                         for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
00500                         {
00501                         err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
00502                                 NET.Weights[NET.Nlayer-1][in][jn];
00503                         }
00504                 }
00505         free(tmp);
00506         return(err);
00507         }               
00508 }
00509 
00510    
00511 /***********************************************************/
00512 /* MLP_Stochastic                                          */
00513 /*                                                         */
00514 /* one epoch of MLP stochastic training                    */
00515 /* (optimized for speed)                                   */
00516 /*                                                         */
00517 /* Author: J.Schwindling   08-Jul-99                       */
00518 /* Modified: J.Schwindling 20-Jul-99 remove unused cases   */
00519 /***********************************************************/
00520    
00521 /* extern "C"Dllexport */dbl MLP_Stochastic()
00522 {
00523         int ipat, ii, inm1;
00524         dbl err = 0;
00525         int il, in1, in, itest2;
00526         dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
00527         dbl eta, eps;
00528         register dbl a, b, dd, a1, a2, a3, a4;
00529         dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
00530         dbl ***weights;
00531     
00532         if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");             
00533         weights = NET.Weights;
00534 /* shuffle patterns */          
00535         ShuffleExamples(PAT.Npat[0],ExamplesIndex); 
00536                 
00537 /* reduce learning parameter */
00538         if(LEARN.Decay<1) EtaDecay();
00539                 
00540         eta = -LEARN.eta;
00541         eps = LEARN.epsilon;
00542         
00543 /* loop on the examples */              
00544         for(ipat=0;ipat<PAT.Npat[0];ipat++)
00545                 {
00546                 ii = ExamplesIndex[ipat];
00547                 pond = PAT.Pond[0][ii];
00548                 
00549                 MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)])); 
00550                         
00551 /* next lines are equivalent to DeDwSum */                          
00552                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00553                         {
00554                         deriv = NET.Deriv1[NET.Nlayer-1][in]; 
00555                         a = (dbl) PAT.Rans[0][ii][in];
00556                         b = NET.Outn[NET.Nlayer-1][in]-a;
00557                         err  += b*b*pond;
00558                         NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
00559                         }
00560                         
00561                 for(il=NET.Nlayer-2; il>0; il--) 
00562                         {
00563                         dd = NET.Delta[il+1][0];
00564                         for(in=0; in<NET.Nneur[il]-3; in+=4) 
00565                                 {
00566                                 deriv1 = NET.Deriv1[il][in]; 
00567                                 deriv2 = NET.Deriv1[il][in+1]; 
00568                                 deriv3 = NET.Deriv1[il][in+2]; 
00569                                 deriv4 = NET.Deriv1[il][in+3]; 
00570                                 itest2 = (NET.Nneur[il+1]==1);
00571                                 a1 = dd*weights[il+1][0][in+1];
00572                                 a2 = dd*weights[il+1][0][in+2];
00573                                 a3 = dd*weights[il+1][0][in+3];
00574                                 a4 = dd*weights[il+1][0][in+4];
00575                                 if(itest2) goto L1;
00576                                 pdelta = &(NET.Delta[il+1][1]);
00577                                 for(in1=1; in1<NET.Nneur[il+1];
00578                                         in1++, pdelta++) 
00579                                         {
00580                                         a1 += *pdelta * weights[il+1][in1][in+1];
00581                                         a2 += *pdelta * weights[il+1][in1][in+2];
00582                                         a3 += *pdelta * weights[il+1][in1][in+3];
00583                                         a4 += *pdelta * weights[il+1][in1][in+4];
00584                                         }
00585 L1:                             NET.Delta[il][in] = a1*deriv1;
00586                                 NET.Delta[il][in+1] = a2*deriv2;
00587                                 NET.Delta[il][in+2] = a3*deriv3;
00588                                 NET.Delta[il][in+3] = a4*deriv4;
00589                                 } 
00590                         for(in=in; in<NET.Nneur[il]; in++) 
00591                                 {
00592                                 deriv = NET.Deriv1[il][in]; 
00593                                 itest2 = (NET.Nneur[il+1]==1);
00594                                 a = dd*weights[il+1][0][in+1];
00595                                 if(itest2) goto L2;
00596                                 pdelta = &(NET.Delta[il+1][1]);
00597                                 for(in1=1; in1<NET.Nneur[il+1];
00598                                         in1++, pdelta++) 
00599                                         {
00600                                         a += *pdelta * 
00601                                         weights[il+1][in1][in+1];
00602                                         }
00603 L2:                             NET.Delta[il][in] = a*deriv;
00604                                 } 
00605                                 
00606                         }                       /* end of loop on layers */
00607 
00608 
00609 /* update the weights */
00610                 if(eps==0)
00611                         {
00612                 for(il=1; il<NET.Nlayer; il++)
00613                         {
00614                         inm1 = NET.Nneur[il-1];
00615                         for(in=0; in<NET.Nneur[il]-3; in+=4)
00616                                 { 
00617                                 a1 = NET.Delta[il][in];
00618                                 a2 = NET.Delta[il][in+1];
00619                                 a3 = NET.Delta[il][in+2];
00620                                 a4 = NET.Delta[il][in+3];
00621                                 pout = &(NET.Outn[il-1][0]);
00622                                 weights[il][in][0] += a1;                               
00623                                 weights[il][in+1][0] += a2;                             
00624                                 weights[il][in+2][0] += a3;                             
00625                                 weights[il][in+3][0] += a4;                             
00626                                 weights[il][in][1] += a1* (*pout);
00627                                 weights[il][in+1][1] += a2* (*pout);
00628                                 weights[il][in+2][1] += a3* (*pout);
00629                                 weights[il][in+3][1] += a4* (*pout);
00630                                 pout++;
00631                                 pw1 = &(weights[il][in][2]);
00632                                 pw2 = &(weights[il][in+1][2]);
00633                                 pw3 = &(weights[il][in+2][2]);
00634                                 pw4 = &(weights[il][in+3][2]);
00635                                 for(in1=2; in1<=inm1; 
00636                                         ++in1, ++pout, ++pw1, ++pw2,
00637                                         ++pw3, ++pw4)
00638                                         {
00639                                         *pw1 += a1 * *pout;
00640                                         *pw2 += a2 * *pout;
00641                                         *pw3 += a3 * *pout;
00642                                         *pw4 += a4 * *pout;
00643                                         }
00644                                 }
00645                         for(in=in; in<NET.Nneur[il]; in++)
00646                                 { 
00647                                 a1 = NET.Delta[il][in];
00648                                 pout = &(NET.Outn[il-1][0]);
00649                                 weights[il][in][0] += a1;                               
00650                                 weights[il][in][1] += a1* (*pout);
00651                                 pout++;
00652                                 pw1 = &(weights[il][in][2]);
00653                                 for(in1=2; in1<=inm1; 
00654                                         ++in1, ++pout, ++pw1)
00655                                         {
00656                                         *pw1 += a1 * *pout;
00657                                         }
00658                                 }
00659                         }
00660                         }
00661                 else
00662                         {                       
00663                 for(il=1; il<NET.Nlayer; il++)
00664                         {
00665                         for(in=0; in<NET.Nneur[il]; in++)
00666                                 { 
00667                                 
00668                                 a = NET.Delta[il][in];
00669                                 LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
00670                                 NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
00671                                 
00672                                 b = a*NET.Outn[il-1][0];
00673                                 LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
00674                                 NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
00675                                 
00676                                 for(in1=2; in1<=NET.Nneur[il-1]; in1++)
00677                                         {
00678                                         b = a*NET.Outn[il-1][in1-1];
00679                                         LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
00680                                         NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
00681                                         }
00682                                 }
00683                         }
00684                         }               
00685                         
00686                 }                       /* end of loop on examples */
00687         return(err);            
00688 }
00689 
00690 
00691 /***********************************************************/
00692 /* MLP_Epoch                                               */
00693 /*                                                         */
00694 /* one epoch of MLP training                               */
00695 /* inputs:     int iepoch = epoch number                   */
00696 /*             dbl *alpmin = optimal alpha from Line Search*/
00697 /*                                                         */
00698 /* return value (dbl) = error value on learning sample     */
00699 /*                      BEFORE changing the weights        */  
00700 /*                                                         */
00701 /* Author: J.Schwindling   31-Mar-99                       */
00702 /* Modified: J.Schwindling 09-Apr-99                       */
00703 /*                re-organize routine                      */
00704 /*           J.Schwindling 13-Apr-99                       */
00705 /*                remove Quickprop algorithm               */
00706 /*                implement Ribiere-Polak conjugate grad.  */  
00707 /***********************************************************/
00708    
00709 /* extern "C"Dllexport */dbl MLP_Epoch(int iepoch, dbl *alpmin, int *Ntest)
00710 {
00711         dbl err, ONorm, beta, prod, ddir;       
00712 /*      int *index;*/
00713         int Nweights, Nlinear, ipat, ierr;
00714         int nn;
00715         
00716         err = 0;
00717         *alpmin = 0.;
00718         
00719         Nweights = NET.Nweights;
00720         Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
00721         
00722         if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");          
00723 /* stochastic minimization */           
00724         if(LEARN.Meth==1) 
00725                 {
00726 
00727                 err = MLP_Stochastic();
00728                         
00729                 }
00730         else
00731                 {
00732                 if(iepoch==1 && LEARN.Meth==7)
00733                         {
00734                         SetLambda(10000);
00735                         MLP_ResLin();
00736                         if(NET.Debug>=2) PrintWeights();
00737                         }
00738                         
00739 /* save previous gradient and reset current one */
00740                 DeDwSaveZero();
00741                 if(LEARN.Meth==16) 
00742                         {                       
00743                         ShuffleExamples(PAT.Npat[0],ExamplesIndex);
00744                         nn = PAT.Npat[0];
00745                         PAT.Npat[0] = nn/10;
00746                         for(ipat=0;ipat<nn;ipat++)
00747                                 {
00748                                 ierr = MLP_Train(&ExamplesIndex[ipat],&err);
00749                                 if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
00750                                 }
00751                         } 
00752                 else
00753                         {
00754                         for(ipat=0;ipat<PAT.Npat[0];ipat++)
00755                                 {
00756                                 ierr = MLP_Train(&ipat,&err);
00757                                 if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
00758                                 }
00759                         }
00760                 DeDwScale(PAT.Npat[0]);
00761                 if(LEARN.Meth==2) StochStep();
00762                 if(LEARN.Meth==3) 
00763                         {
00764                         SteepestDir(); 
00765                         if(LineSearch(alpmin,Ntest,err)==1) StochStep();
00766                         }
00767 
00768 /* Conjugate Gradients Ribiere - Polak */
00769                 if(LEARN.Meth==4) 
00770                         {
00771                         if((iepoch-1)%LEARN.Nreset==0) 
00772                                 {
00773                                 LEARN.Norm = DeDwNorm(); /* for next epoch */
00774                                 SteepestDir();
00775                                 }
00776                         else
00777                                 {
00778                                 ONorm = LEARN.Norm;
00779                                 LEARN.Norm = DeDwNorm();
00780                                 prod = DeDwProd();
00781                                 beta = (LEARN.Norm-prod)/ONorm;
00782                                 CGDir(beta);
00783                                 }
00784                         if(LineSearch(alpmin,Ntest,err)==1) StochStep();
00785                         }
00786 
00787 /* Conjugate Gradients Fletcher - Reeves */
00788                 if(LEARN.Meth==5) 
00789                         {
00790                         if((iepoch-1)%LEARN.Nreset==0) 
00791                                 {
00792                                 LEARN.Norm = DeDwNorm(); /* for next epoch */
00793                                 SteepestDir();
00794                                 }
00795                         else
00796                                 {
00797                                 ONorm = LEARN.Norm;
00798                                 LEARN.Norm = DeDwNorm();
00799                                 beta = LEARN.Norm/ONorm;
00800                                 CGDir(beta);
00801                                 }
00802                         if(LineSearch(alpmin,Ntest,err)==1) StochStep();
00803                         }
00804                 if(LEARN.Meth==6)
00805                         {
00806                         if((iepoch-1)%LEARN.Nreset==0)
00807                                 {
00808                                 SteepestDir();
00809                                 InitBFGSH(Nweights);
00810                                 }
00811                         else
00812                                 {
00813                                 GetGammaDelta();
00814                                 ierr = GetBFGSH(Nweights);
00815                                 if(ierr)
00816                                         {
00817                                         SteepestDir();
00818                                         InitBFGSH(Nweights);
00819                                         }
00820                                 else
00821                                         {       
00822                                         BFGSdir(Nweights);
00823                                         }
00824                                 }
00825                         ddir = DerivDir();
00826                         if(ddir>0)
00827                                 {
00828                                 SteepestDir();
00829                                 InitBFGSH(Nweights);
00830                                 ddir = DerivDir();
00831                                 }
00832                         if(LineSearch(alpmin,Ntest,err)==1) 
00833                                 {
00834                                 InitBFGSH(Nweights);
00835                                 SteepestDir();
00836                                 if(LineSearch(alpmin,Ntest,err)==1) 
00837                                         {
00838                                         printf("Line search fail \n");
00839                                         }
00840                                 }
00841                         }
00842                 if(LEARN.Meth==7)
00843                         {
00844                         if((iepoch-1)%LEARN.Nreset==0)
00845                                 {
00846                                 SteepestDir();
00847                                 InitBFGSH(Nweights-Nlinear);
00848                                 }
00849                         else
00850                                 {
00851                         if(NET.Debug>=5) printf("Before GetGammaDelta \n");
00852                                 GetGammaDelta();
00853                         if(NET.Debug>=5) printf("After GetGammaDelta \n");
00854                                 ierr = GetBFGSH(Nweights-Nlinear);
00855                         if(NET.Debug>=5) printf("After GetBFGSH \n");
00856                                 if(ierr)
00857                                         {
00858                                         SteepestDir();
00859                                         InitBFGSH(Nweights-Nlinear);
00860                                         }
00861                                 else
00862                                         {       
00863                                         BFGSdir(Nweights-Nlinear);
00864                                         }
00865                         if(NET.Debug>=5) printf("After BFGSdir \n");
00866                                 }
00867                         SetLambda(10000);
00868                         if(LineSearchHyb(alpmin,Ntest)==1)
00869                                 {
00870                                 InitBFGSH(Nweights-Nlinear);
00871                                 SteepestDir();
00872                                 if(LineSearchHyb(alpmin,Ntest)==1) 
00873                                         {
00874                                         printf("Line search fail \n");
00875                                         }
00876                                 }
00877                         }
00878                 }
00879         
00880         if(NET.Debug>=5) printf(" End MLP_Epoch\n");            
00881         return(err);
00882 }
00883 
00884 
00885 /***********************************************************/
00886 /* MLP_Train                                               */
00887 /*                                                         */
00888 /* Train Network: compute output, update DeDw              */
00889 /* inputs:     int *ipat = pointer to pattern number       */
00890 /* input/output:    dbl *err = current error               */
00891 /*                                                         */
00892 /* return value (int) = error value: 1 wrong pattern number*/
00893 /*                                   2 *ipat < 0           */   
00894 /*                                                         */
00895 /* Author: J.Schwindling   09-Apr-99                       */
00896 /***********************************************************/
00897    
00898 /* extern "C"Dllexport */int MLP_Train(int *ipat, dbl *err)
00899 {
00900         int in;
00901     
00902 /*      if(*ipat>=PAT.Npat[0]) return(1);*/
00903         if(*ipat<0) return(2);
00904         
00905 /*      MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
00906         MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
00907         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00908                 {
00909                 *err  += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
00910                         *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
00911                         PAT.Pond[0][*ipat];
00912                 }
00913         DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);       
00914         return(0); 
00915 } 
00916 
00917           
00918 /***********************************************************/
00919 /* StochStepHyb                                            */
00920 /*                                                         */
00921 /* Update the weights according to stochastic minimization */
00922 /* formula (for hybrid methods)                            */
00923 /*                                                         */
00924 /* return value (int) = 0                                  */
00925 /*                                                         */
00926 /* Author: J.Schwindling   09-Apr-99                       */
00927 /***********************************************************/
00928    
00929 /* extern "C"Dllexport */int StochStepHyb()
00930 {
00931         int il, in1, in;
00932         dbl eta, eps;
00933     
00934         eta = LEARN.eta;
00935         eps = LEARN.epsilon;
00936     for(il=NET.Nlayer-2; il>0; il--) {
00937 
00938         for(in=0; in<NET.Nneur[il]; in++) {
00939 
00940                 /* compute delta weights */
00941                 for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
00942                         LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
00943                          + eps * LEARN.Odw[il][in][in1];
00944                 }
00945                 
00946                 /* update weights */
00947                 for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
00948                         NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
00949                 }
00950         }       
00951         }       
00952     MLP_ResLin();
00953     return(0); 
00954 } 
00955           
00956 
00957 /***********************************************************/
00958 /* StochStep                                               */
00959 /*                                                         */
00960 /* Update the weights according to stochastic minimization */
00961 /*                                            formula      */
00962 /*                                                         */
00963 /* return value (int) = 0                                  */
00964 /*                                                         */
00965 /* Author: J.Schwindling   09-Apr-99                       */
00966 /***********************************************************/
00967    
00968 /* extern "C"Dllexport */int StochStep()
00969 {
00970         int il, in1, in;
00971         dbl eta, eps, epseta;
00972     
00973                 eta = -LEARN.eta;
00974                 eps = LEARN.epsilon;
00975                 epseta = eps/eta;
00976     for(il=NET.Nlayer-1; il>0; il--) {
00977                 for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
00978 
00979                 /* compute delta weights */
00980         for(in=0; in<NET.Nneur[il]; in++) {
00981                         LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
00982                          + epseta * LEARN.Odw[il][in][in1]);
00983                         NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
00984                 }
00985                 
00986         }       
00987         }       
00988 
00989     return(0); 
00990 }                 
00991 
00992 
00993 /***********************************************************/
00994 /* DeDwNorm                                                */
00995 /*                                                         */
00996 /* computes the norm of the gradient                       */
00997 /*                                                         */
00998 /* Author: J.Schwindling   31-Mar-99                       */
00999 /***********************************************************/
01000    
01001 /* extern "C"Dllexport */dbl DeDwNorm()
01002 {
01003         int il,in,jn;
01004         dbl dd=0;
01005         for(il=1; il<NET.Nlayer; il++)
01006                 for(in=0; in<NET.Nneur[il]; in++)
01007                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01008                                 dd += LEARN.DeDw[il][in][jn]*
01009                                         LEARN.DeDw[il][in][jn];
01010         return(dd);
01011 }
01012 
01013 
01014 /***********************************************************/
01015 /* DeDwProd                                                */
01016 /*                                                         */
01017 /* scalar product between new gradient and previous one    */
01018 /* (used in Polak-Ribiere Conjugate Gradient method        */
01019 /*                                                         */
01020 /* Author: J.Schwindling   26-Mar-99                       */
01021 /***********************************************************/
01022    
01023 /* extern "C"Dllexport */dbl DeDwProd()
01024 {
01025         int il,in,jn;
01026         dbl dd=0;
01027         for(il=1; il<NET.Nlayer; il++)
01028                 for(in=0; in<NET.Nneur[il]; in++)
01029                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01030                                 dd += LEARN.DeDw[il][in][jn]*
01031                                         LEARN.ODeDw[il][in][jn];
01032         return(dd);
01033 }
01034 
01035 
01036 /***********************************************************/
01037 /* DeDwZero                                                */
01038 /*                                                         */
01039 /* resets to 0 the gradient (should be done before DeDwSum)*/
01040 /*                                                         */
01041 /* Author: J.Schwindling   31-Mar-99                       */
01042 /***********************************************************/   
01043 
01044 /* extern "C"Dllexport */void DeDwZero()
01045 {
01046         int il, in, jn;
01047         for(il=1; il<NET.Nlayer; il++)
01048                 for(in=0; in<NET.Nneur[il]; in++)
01049                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01050                                 LEARN.DeDw[il][in][jn] = 0;
01051 }
01052         
01053 
01054 /***********************************************************/
01055 /* DeDwScale                                               */
01056 /*                                                         */
01057 /* divides the gradient by the number of examples          */
01058 /* inputs:     int Nexamples = number of examples          */
01059 /*                                                         */
01060 /* Author: J.Schwindling   31-Mar-99                       */
01061 /***********************************************************/   
01062 
01063 /* extern "C"Dllexport */void DeDwScale(int Nexamples)
01064 {
01065         int il, in, jn;
01066         for(il=1; il<NET.Nlayer; il++)
01067                 for(in=0; in<NET.Nneur[il]; in++)
01068                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01069                                 LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
01070 }       
01071 
01072 
01073 /***********************************************************/
01074 /* DeDwSave                                                */
01075 /*                                                         */
01076 /* copies the gradient DeDw into ODeDw                     */
01077 /*                                                         */
01078 /* Author: J.Schwindling   31-Mar-99                       */
01079 /***********************************************************/   
01080 
01081 /* extern "C"Dllexport */void DeDwSave()
01082 {
01083         int il, in, jn;
01084         for(il=1; il<NET.Nlayer; il++)
01085                 for(in=0; in<NET.Nneur[il]; in++)
01086                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01087                                 LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
01088 }       
01089 
01090 
01091 /***********************************************************/
01092 /* DeDwSaveZero                                            */
01093 /*                                                         */
01094 /* copies the gradient DeDw into ODeDw                     */
01095 /* resets DeDw to 0                                        */
01096 /*                                                         */
01097 /* Author: J.Schwindling   23-Apr-99                       */
01098 /***********************************************************/   
01099 
01100 /* extern "C"Dllexport */void DeDwSaveZero()
01101 {
01102         int il, in, jn;
01103         for(il=1; il<NET.Nlayer; il++)
01104                 for(in=0; in<NET.Nneur[il]; in++)
01105                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01106                                 {
01107                                 LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
01108                                 LEARN.DeDw[il][in][jn] = 0;
01109                                 }
01110 }       
01111 
01112 
01113 /***********************************************************/
01114 /* DeDwSum                                                 */
01115 /*                                                         */
01116 /* adds to the total gradient the gradient in the current  */
01117 /* example                                                 */
01118 /* inputs:     int Nexamples = number of examples          */
01119 /*                                                         */
01120 /* Author: J.Schwindling   31-Mar-99                       */
01121 /* Modified: B.Mansoulie   23-Apr-99                       */
01122 /*           faster and still correct way to compute the   */
01123 /*           sigmoid derivative                            */   
01124 /***********************************************************/   
01125 
01126 /* extern "C"Dllexport */int DeDwSum(type_pat *ans, dbl *out, int ipat)
01127 {
01128         int il, in1, in, ii;
01129 /*      dbl err[NMAX][4]; */
01130         dbl deriv;
01131         dbl *pout, *pdedw, *pdelta;
01132         register dbl a, b;
01133 /*      char buf[50];*/
01134 
01135 /* output layer */ 
01136         b =  (dbl) PAT.Pond[0][ipat];  
01137         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
01138         {                  
01139                 deriv = NET.Deriv1[NET.Nlayer-1][in];
01140                 NET.Delta[NET.Nlayer-1][in] = 
01141                         (out[in] - (dbl) ans[in])*deriv*b;
01142         }
01143         
01144     for(il=NET.Nlayer-2; il>0; il--) 
01145         {
01146 
01147         for(in=0; in<NET.Nneur[il]; in++) 
01148         {                          
01149                 deriv = NET.Deriv1[il][in];
01150                 a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
01151                 pdelta = &(NET.Delta[il+1][1]);
01152                 for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++) 
01153                 {
01154                         a += *pdelta * NET.Weights[il+1][in1][in+1];
01155                 }
01156                 NET.Delta[il][in] = a * deriv; 
01157         }
01158         }
01159                 
01160         for(il=1; il<NET.Nlayer; il++) 
01161                 {
01162                 ii = NET.Nneur[il-1];
01163                 for(in=0; in<NET.Nneur[il]; in++) 
01164                         {
01165                         a = NET.Delta[il][in];  
01166                         LEARN.DeDw[il][in][0] += a;
01167                         LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
01168                         pout = &(NET.Outn[il-1][1]);
01169                         pdedw = &(LEARN.DeDw[il][in][2]);
01170                         for(in1=1; in1<ii; ++in1, ++pout, ++pdedw) 
01171                                 {
01172                                 (*pdedw) += a * (*pout);
01173                                 }
01174                         }
01175                 }               
01176 
01177     return(0); 
01178 }
01179 
01180 
01181 /***********************************************************/
01182 /* SetTransFunc                                            */
01183 /*                                                         */
01184 /* to set the transfert function of a neuron               */
01185 /* inputs:     int layer = layer number (1 -> Nlayer)      */
01186 /*             int neuron = neuron number (1 -> Nneur)     */
01187 /*             int func = transfert function:              */
01188 /*                              0: y=0                     */   
01189 /*                              1: y=x                     */   
01190 /*                              2: y=1/(1+exp(-x))         */   
01191 /*                              3: y=tanh(x)               */   
01192 /*                              4: y=delta*x+1/(1+exp(-x)) */   
01193 /*                              5: y=exp(-x**2)            */   
01194 /*                                                         */
01195 /* return value (int) = error value:                       */
01196 /*                              0: no error                */ 
01197 /*                              1: layer > 4               */ 
01198 /*                              2: neuron > NMAX           */ 
01199 /*                                                         */
01200 /* Author: J.Schwindling   02-Apr-99                       */
01201 /***********************************************************/   
01202 
01203 /* extern "C"Dllexport */int SetTransFunc(int layer, int neuron, 
01204                                           int func)
01205 {    
01206     if(layer>NLMAX) return(1);
01207 /*    if(neuron>NMAX) return(2);*/
01208          
01209     NET.T_func[layer-1][neuron-1] = func;
01210 
01211     return(0); 
01212 }                 
01213 
01214 
01215 /***********************************************************/
01216 /* SetDefaultFuncs                                         */
01217 /*                                                         */
01218 /* - sets the default transfer functions to sigmoid for    */
01219 /* hidden layers and linear for output layer               */
01220 /* - sets temperatures to 1                                */
01221 /*                                                         */
01222 /*                                                         */
01223 /* Author: J.Schwindling   08-Apr-99                       */
01224 /***********************************************************/   
01225 
01226 void SetDefaultFuncs()
01227 {
01228     int il,in;
01229     for(il=0; il<NET.Nlayer; il++) {
01230        for(in=0; in<NET.Nneur[il]; in++) {
01231           NET.T_func[il][in] = 2; 
01232           if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
01233          }
01234       }
01235 
01236 }
01237 
01238 
01239 /***********************************************************/
01240 /* SteepestDir                                             */
01241 /*                                                         */
01242 /* sets the search direction to steepest descent           */
01243 /*                                                         */
01244 /* Author: J.Schwindling   02-Apr-99                       */
01245 /***********************************************************/   
01246 
01247 /* extern "C"Dllexport */void SteepestDir()
01248 {
01249         int il,in,jn;
01250         for(il=1; il<NET.Nlayer; il++)
01251                 for(in=0; in<NET.Nneur[il]; in++)
01252                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01253                                 dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
01254 }
01255 
01256 
01257 /***********************************************************/
01258 /* CGDir                                                   */
01259 /*                                                         */
01260 /* sets the search direction to conjugate gradient dir     */
01261 /* inputs:     dbl beta : d(t+1) = -g(t+1) + beta d(t)     */
01262 /*                        beta should be:                  */
01263 /*    ||g(t+1)||^2 / ||g(t)||^2 (Fletcher-Reeves)          */
01264 /*    g(t+1) (g(t+1)-g(t)) / ||g(t)||^2 (Polak-Ribiere)    */
01265 /*                                                         */
01266 /* Author: J.Schwindling   02-Apr-99                       */
01267 /***********************************************************/   
01268 
01269 /* extern "C"Dllexport */void CGDir(dbl beta)
01270 {
01271         int il,in,jn;
01272         for(il=1; il<NET.Nlayer; il++)
01273                 for(in=0; in<NET.Nneur[il]; in++)
01274                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01275                                 {
01276                                 dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
01277                                         beta*dir[il][in][jn];
01278                                 }
01279 }
01280 
01281 
01282 /***********************************************************/
01283 /* DerivDir                                                */
01284 /*                                                         */
01285 /* scalar product between gradient and direction           */
01286 /*     = derivative along direction                        */
01287 /*                                                         */
01288 /* Author: J.Schwindling   01-Jul-99                       */
01289 /***********************************************************/   
01290 
01291 /* extern "C"Dllexport */dbl DerivDir()
01292 {
01293         int il,in,jn;
01294         dbl ddir = 0;
01295         
01296         for(il=1; il<NET.Nlayer; il++)
01297                 for(in=0; in<NET.Nneur[il]; in++)
01298                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01299                                 {
01300                                 ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
01301                                 }
01302         return(ddir);                   
01303 }
01304 
01305 
01306 /***********************************************************/
01307 /* GetGammaDelta                                           */
01308 /*                                                         */
01309 /* sets the vectors Gamma (=g(t+1)-g(t))                   */
01310 /*              and delta (=w(t+1)-w(t))                   */
01311 /* (for BFGS and Hybrid learning methods)                  */
01312 /*                                                         */
01313 /* Author: J.Schwindling   02-Apr-99                       */
01314 /***********************************************************/   
01315 
01316 /* extern "C"Dllexport */void GetGammaDelta()
01317 {
01318         int i=0;
01319         int il,in,jn;
01320         for(il=1; il<NET.Nlayer; il++)
01321                 for(in=0; in<NET.Nneur[il]; in++)
01322                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01323                                 {
01324                                 Gamma[i] = LEARN.DeDw[il][in][jn]-
01325                                         LEARN.ODeDw[il][in][jn];
01326                                 delta[i] = LEARN.Odw[il][in][jn];
01327                                 i++;
01328                                 }
01329 }
01330 
01331 
01332 /***********************************************************/
01333 /* BFGSDir                                                 */
01334 /*                                                         */
01335 /* sets the search direction to BFGS direction from the    */
01336 /*                              BFGS matrix                */
01337 /*                                                         */
01338 /* inputs:     int Nweights = number of weights            */
01339 /*                                                         */
01340 /* Author: J.Schwindling   02-Apr-99                       */
01341 /***********************************************************/   
01342 
01343 /* extern "C"Dllexport */void BFGSdir(int Nweights)
01344 {
01345         dbl *g, *s;
01346         int kk=0;
01347         int il,i,j,in,jn;
01348         
01349         g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
01350         s = (dbl*) malloc(Nweights*sizeof(dbl));
01351         
01352         for(il=1; kk<Nweights; il++)
01353                 for(in=0; in<NET.Nneur[il]; in++)
01354                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01355                                 {
01356                                 g[kk] = LEARN.DeDw[il][in][jn];
01357                                 kk++;
01358                                 }
01359         for(i=0; i<Nweights; i++)
01360                 {
01361                 s[i] = 0;
01362                 for(j=0; j<Nweights; j++)
01363                         {
01364                         s[i] += BFGSH[i][j] * g[j];
01365                         }
01366                 }
01367         
01368         kk = 0;
01369         for(il=1; kk<Nweights; il++)
01370                 for(in=0; in<NET.Nneur[il]; in++)
01371                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01372                                 {
01373                                 dir[il][in][jn] = -s[kk];
01374                                 kk++;
01375                                 }
01376         free(g);
01377         free(s);
01378 }
01379 
01380 
01381 /***********************************************************/
01382 /* InitBFGS                                                */
01383 /*                                                         */
01384 /* initializes BFGS matrix to identity                     */
01385 /*                                                         */
01386 /* inputs:     int Nweights = number of weights            */
01387 /*                                                         */
01388 /* Author: J.Schwindling   02-Apr-99                       */
01389 /***********************************************************/   
01390 
01391 /* extern "C"Dllexport */void InitBFGSH(int Nweights)
01392 {
01393         int i,j;
01394         for(i=0; i<Nweights; i++)
01395                 for(j=0; j<Nweights; j++)
01396                         {
01397                         BFGSH[i][j] = 0;
01398                         if(i==j) BFGSH[i][j] = 1;
01399                         }
01400 }
01401 
01402 
01403 /***********************************************************/
01404 /* GetBFGSH                                                */
01405 /*                                                         */
01406 /* sets the BFGS matrix                                    */
01407 /*                                                         */
01408 /* inputs:     int Nweights = number of weights            */
01409 /*                                                         */
01410 /* return value (int) = 0 if no problem                    */
01411 /*                      1 is deltaTgamma = 0 -> switch to  */
01412 /*                              steepest dir               */
01413 /*                                                         */
01414 /* Author: J.Schwindling   02-Apr-99                       */
01415 /* Modified: J.Schwindling 04-May-99                       */
01416 /*           computations as Nw^2 , matrices removed       */
01417 /* Modified: J.Schwindling 11-Jun-99                       */
01418 /*           return value                                  */
01419 /***********************************************************/   
01420 
01421 /* extern "C"Dllexport */int GetBFGSH(int Nweights)
01422 {
01423         typedef double dble;
01424         dble deltaTgamma=0;
01425         dble factor=0; 
01426         dble *Hgamma;
01427         dble *tmp;
01428         register dble a, b;
01429         int i,j;
01430         
01431         Hgamma = (dble *) malloc(Nweights*sizeof(dble));
01432         tmp = (dble *) malloc(Nweights*sizeof(dble));
01433         
01434         for(i=0; i<Nweights; i++)
01435                 {
01436                 deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
01437                 a = 0;
01438                 b = 0;
01439                 for(j=0; j<Nweights; j++)
01440                         {
01441                         a += (dble) BFGSH[i][j] * (dble) Gamma[j];
01442                         b += (dble) Gamma[j] * (dble) BFGSH[j][i];
01443                         }
01444                 Hgamma[i] = a;
01445                 tmp[i] = b;     
01446                 factor += (dble) Gamma[i]*Hgamma[i];
01447                 }
01448         if(deltaTgamma == 0) return 1;
01449         a = 1 / deltaTgamma;    
01450         factor = 1 + factor*a;
01451         
01452         for(i=0; i<Nweights; i++)
01453                 {
01454                 b = (dble) delta[i];
01455                 for(j=0; j<Nweights; j++)
01456                         BFGSH[i][j] += (dbl) (factor*b* (dble) 
01457                         delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;        
01458                 }       
01459         free(Hgamma);
01460         free(tmp);
01461         return 0;
01462 }
01463 
01464 
01465 /***********************************************************/
01466 /* LineSearch                                              */
01467 /*                                                         */
01468 /* search along the line defined by dir                    */
01469 /*                                                         */
01470 /* outputs:     dbl *alpmin = optimal step length          */
01471 /*                                                         */
01472 /* Author: B.Mansoulie     01-Jul-98                       */
01473 /***********************************************************/   
01474 
01475 /* extern "C"Dllexport */
01476 int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
01477 {
01478         dbl ***w0;
01479         dbl alpha1, alpha2, alpha3;
01480         dbl err1, err2, err3;
01481         dbl tau;
01482         int icount, il, in, jn;
01483         
01484         tau=LEARN.Tau;
01485 
01486 /* store weights before line search */  
01487 
01488         *Ntest = 0;
01489         w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
01490         for(il=1; il<NET.Nlayer; il++)
01491                 {
01492                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01493                 for(in=0; in<NET.Nneur[il]; in++)
01494                         {
01495                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01496                                 sizeof(dbl));
01497                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01498                                 {
01499                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01500                                 }
01501                         }
01502                 }
01503         
01504 /* compute error(w0) */
01505 
01506 /*      err1 = MLP_Test(0,0); 
01507         (*Ntest) ++;*/
01508         err1 = Err0;
01509         
01510         if(NET.Debug>=4) printf("err depart= %f\n",err1);   
01511         
01512         *alpmin = 0;
01513         alpha1 = 0;
01514 /*      alpha2 = 0.05;
01515         if(LastAlpha != 0) alpha2 = LastAlpha;*/
01516         alpha2 = LastAlpha;
01517         if(alpha2 < 0.01) alpha2 = 0.01;
01518         if(alpha2 > 2.0) alpha2 = 2.0;
01519         MLP_Line(w0,alpha2);
01520         err2 = MLP_Test(0,0);
01521         (*Ntest) ++;
01522         if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);   
01523         
01524         alpha3 = alpha2;
01525         err3 = err2;
01526         
01527 /* try to find a triplet (alpha1, alpha2, alpha3) such that 
01528    Error(alpha1)>Error(alpha2)<Error(alpha3)                 */
01529    
01530         if(err1>err2) 
01531                 {
01532                 for(icount=1;icount<=100;icount++)
01533                         {
01534                         alpha3 = alpha3*tau;
01535                         MLP_Line(w0,alpha3);
01536                         err3 =MLP_Test(0,0);
01537         if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);   
01538                         (*Ntest) ++;
01539                         if(err3>err2) break;
01540                         alpha1 = alpha2;
01541                         err1 = err2;
01542                         alpha2 = alpha3;
01543                         err2 = err3;
01544                         }
01545                 if(icount>=100)                 /* line search fails */
01546                         {
01547                         MLP_Line(w0,0);         /* reset weights */
01548                         free(w0);
01549                         return(1);
01550                         }
01551                 }
01552         else
01553                 {
01554                 for(icount=1;icount<=100;icount++)
01555                         {
01556                         alpha2 = alpha2/tau;
01557                         MLP_Line(w0,alpha2);
01558                         err2 = MLP_Test(0,0);
01559         if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);   
01560                         (*Ntest) ++;
01561                         if(err1>err2) break;
01562                         alpha3 = alpha2;
01563                         err3 = err2;
01564                         }
01565                 if(icount>=100)                 /* line search fails */
01566                         {
01567                         MLP_Line(w0,0);         /* reset weights */
01568                         free(w0);
01569                         LastAlpha = 0.05;       /* try to be safe */
01570                         return(1);
01571                         }
01572                 }
01573 
01574 /* find bottom of parabola */
01575         
01576         *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
01577                 -(err2-err1)/(alpha2-alpha1)));
01578         if(*alpmin>10000) *alpmin=10000;
01579 
01580 /* set the weights */
01581         MLP_Line(w0,*alpmin);
01582         LastAlpha = *alpmin;
01583         
01584 /* store weight changes */
01585         for(il=1; il<NET.Nlayer; il++)
01586                 for(in=0; in<NET.Nneur[il]; in++)
01587                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01588                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01589                                 - w0[il][in][jn];
01590 
01591         for(il=1; il<NET.Nlayer; il++)
01592                 for(in=0; in<NET.Nneur[il]; in++)
01593                         free(w0[il][in]);                       
01594         for(il=1; il<NET.Nlayer; il++)
01595                 free(w0[il]);   
01596         free(w0);
01597         
01598         return(0);
01599 }
01600 
01601 
01602 /***********************************************************/
01603 /* DecreaseSearch                                          */
01604 /*                                                         */
01605 /* search along the line defined by dir for a point where  */
01606 /* error is decreased (faster than full line search)       */
01607 /*                                                         */
01608 /* outputs:     dbl *alpmin = step length                  */
01609 /*                                                         */
01610 /* Author: J.Schwindling     11-May-99                     */
01611 /***********************************************************/   
01612 
01613 /* extern "C"Dllexport */
01614 int DecreaseSearch(dbl *alpmin, int *Ntest, dbl Err0)
01615 {
01616         dbl ***w0;
01617         dbl alpha2;
01618         dbl err1, err2;
01619         dbl tau;
01620         int icount, il, in, jn;
01621         
01622         tau=LEARN.Tau;
01623 
01624 /* store weights before line search */  
01625 
01626         *Ntest = 0;
01627         w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
01628         for(il=1; il<NET.Nlayer; il++)
01629                 {
01630                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01631                 for(in=0; in<NET.Nneur[il]; in++)
01632                         {
01633                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01634                                 sizeof(dbl));
01635                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01636                                 {
01637                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01638                                 }
01639                         }
01640                 }
01641         
01642 /* compute error(w0) */
01643 
01644 /*      err1 = MLP_Test(0,0); 
01645         (*Ntest) ++;*/
01646         err1 = Err0;
01647         
01648         if(NET.Debug>=4) printf("err depart= %f\n",err1);   
01649         
01650         *alpmin = 0;
01651         alpha2 = 0.05;
01652         MLP_Line(w0,alpha2);
01653         err2 = MLP_Test(0,0);
01654         (*Ntest) ++;
01655         
01656         if(err2<err1) 
01657                 {
01658                 *alpmin = alpha2;
01659                 }
01660         else
01661                 {       
01662         
01663    
01664                 for(icount=1;icount<=100;icount++)
01665                         {
01666                         alpha2 = alpha2/tau;
01667                         MLP_Line(w0,alpha2);
01668                         err2 = MLP_Test(0,0);
01669                         (*Ntest) ++;
01670                         if(err1>err2) break;
01671                         }
01672                 if(icount>=100)                 /* line search fails */
01673                         {
01674                         MLP_Line(w0,0);         /* reset weights */
01675                         free(w0);
01676                         return(1);
01677                         }
01678                 *alpmin = alpha2;       
01679                 }
01680 
01681 /* set the weights */
01682         MLP_Line(w0,*alpmin);
01683         
01684 /* store weight changes */
01685         for(il=1; il<NET.Nlayer; il++)
01686                 for(in=0; in<NET.Nneur[il]; in++)
01687                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01688                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01689                                 - w0[il][in][jn];
01690 
01691         for(il=1; il<NET.Nlayer; il++)
01692                 for(in=0; in<NET.Nneur[il]; in++)
01693                         free(w0[il][in]);                       
01694         for(il=1; il<NET.Nlayer; il++)
01695                 free(w0[il]);   
01696         free(w0);
01697         
01698         return(0);
01699 }
01700 
01701 
01702 /* extern "C"Dllexport */int FixedStep(dbl alpha)
01703 {
01704         dbl ***w0;
01705         int il, in, jn;
01706         
01707         w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
01708         for(il=1; il<NET.Nlayer; il++)
01709                 {
01710                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01711                 for(in=0; in<NET.Nneur[il]; in++)
01712                         {
01713                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01714                                 sizeof(dbl));
01715                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01716                                 {
01717                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01718                                 }
01719                         }
01720                 }
01721         
01722 
01723 /* set the weights */
01724         MLP_Line(w0,alpha);
01725         
01726 /* store weight changes */
01727         for(il=1; il<NET.Nlayer; il++)
01728                 for(in=0; in<NET.Nneur[il]; in++)
01729                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01730                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01731                                 - w0[il][in][jn];
01732 
01733         for(il=1; il<NET.Nlayer; il++)
01734                 for(in=0; in<NET.Nneur[il]; in++)
01735                         free(w0[il][in]);                       
01736         for(il=1; il<NET.Nlayer; il++)
01737                 free(w0[il]);   
01738         free(w0);
01739         
01740         return(0);
01741 }
01742 
01743 /***********************************************************/
01744 /* MLP_Line                                                */
01745 /*                                                         */
01746 /* sets the weights to a point along a line                */
01747 /*                                                         */
01748 /* inputs:     dbl ***w0 = initial point                   */
01749 /*             dbl alpha = step length                     */ 
01750 /*                                                         */
01751 /* Author: B.Mansoulie     01-Jul-98                       */
01752 /***********************************************************/   
01753 
01754 /* extern "C"Dllexport */
01755 void MLP_Line(dbl ***w0, dbl alpha)
01756 {
01757         register int il,in,jn;
01758         
01759         for(il=1; il<NET.Nlayer; il++)
01760                 for(in=0; in<NET.Nneur[il]; in++)
01761                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01762                                 NET.Weights[il][in][jn] = w0[il][in][jn]+
01763                                 alpha*dir[il][in][jn];
01764                                 
01765 }
01766 
01767 
01768 /***********************************************************/
01769 /* LineSearchHyb                                           */
01770 /*                                                         */
01771 /* search along the line defined by dir                    */
01772 /*                                                         */
01773 /* outputs:     dbl *alpmin = optimal step length          */
01774 /*                                                         */
01775 /* Author: B.Mansoulie     01-Jul-98                       */
01776 /***********************************************************/   
01777 
01778 /* extern "C"Dllexport */
01779 int LineSearchHyb(dbl *alpmin, int *Ntest)
01780 {
01781         dbl ***w0;
01782         dbl alpha1, alpha2, alpha3;
01783         dbl err1, err2, err3;
01784         dbl tau;
01785         int icount, il, in, jn;
01786 
01787 /*      char buf [50];
01788   sprintf (buf,"entree linesearchhyb\n");
01789   MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
01790         
01791         if(NET.Debug>=4){
01792                 printf(" entry LineSearchHyb \n");
01793                 }
01794         tau=LEARN.Tau;
01795 
01796 /* store weights before line search */  
01797 
01798         *Ntest = 0;
01799         w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
01800         for(il=1; il<NET.Nlayer-1; il++)
01801                 {
01802                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01803                 for(in=0; in<NET.Nneur[il]; in++)
01804                         {
01805                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01806                                 sizeof(dbl));
01807                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01808                                 {
01809                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01810                                 }
01811                         }
01812                 }
01813         
01814 /* compute error(w0) */
01815         err1 = MLP_Test(0,1);
01816         (*Ntest) ++; 
01817         if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);   
01818         
01819         *alpmin = 0;
01820         alpha1 = 0;
01821 /*      alpha2 = 0.05;
01822         if(LastAlpha != 0) alpha2 = LastAlpha;*/
01823         alpha2 = LastAlpha;
01824         if(alpha2 < 0.01) alpha2 = 0.01;
01825         if(alpha2 > 2.0) alpha2 = 2.0;
01826         MLP_LineHyb(w0,alpha2);
01827         err2 = MLP_Test(0,1);
01828         (*Ntest) ++; 
01829         
01830         alpha3 = alpha2;
01831         err3 = err2;
01832         
01833 /* try to find a triplet (alpha1, alpha2, alpha3) such that 
01834    Error(alpha1)>Error(alpha2)<Error(alpha3)                 */
01835    
01836         if(err1>err2) 
01837                 {
01838                 for(icount=1;icount<=100;icount++)
01839                         {
01840                         alpha3 = alpha3*tau;
01841                         MLP_LineHyb(w0,alpha3);
01842                         err3 = MLP_Test(0,1);
01843                         (*Ntest) ++; 
01844                         if(err3>err2) break;
01845                         alpha1 = alpha2;
01846                         err1 = err2;
01847                         alpha2 = alpha3;
01848                         err2 = err3;
01849                         }
01850                 if(icount>=100)                 /* line search fails */
01851                         {
01852                         MLP_LineHyb(w0,0);      /* reset weights */
01853                         free(w0);
01854                         return(1);
01855                         }
01856                 }
01857         else
01858                 {
01859                 for(icount=1;icount<=100;icount++)
01860                         {
01861                         alpha2 = alpha2/tau;
01862                         MLP_LineHyb(w0,alpha2);
01863                         err2 = MLP_Test(0,1);
01864                         (*Ntest) ++; 
01865                         if(err1>err2) break;
01866                         alpha3 = alpha2;
01867                         err3 = err2;
01868                         }
01869                 if(icount>=100)                 /* line search fails */
01870                         {
01871                         MLP_LineHyb(w0,0);      /* reset weights */
01872                         free(w0);
01873                         return(1);
01874                         }
01875                 }
01876 
01877 /* find bottom of parabola */
01878         
01879         *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
01880                 -(err2-err1)/(alpha2-alpha1)));
01881         if(*alpmin>10000) *alpmin=10000;
01882 
01883 /* set the weights */
01884         MLP_LineHyb(w0,*alpmin);
01885         LastAlpha = *alpmin;
01886         
01887 /* store weight changes */
01888         for(il=1; il<NET.Nlayer-1; il++)
01889                 for(in=0; in<NET.Nneur[il]; in++)
01890                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01891                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01892                                 - w0[il][in][jn];
01893 
01894         for(il=1; il<NET.Nlayer-1; il++)
01895                 for(in=0; in<NET.Nneur[il]; in++)
01896                         free(w0[il][in]);                       
01897         for(il=1; il<NET.Nlayer-1; il++)
01898                 free(w0[il]);   
01899         free(w0);
01900         if(NET.Debug>=4){
01901                 printf(" exit LineSearchHyb \n");
01902                 }
01903 
01904         return(0);
01905 }
01906 
01907 
01908 /***********************************************************/
01909 /* MLP_LineHyb                                             */
01910 /*                                                         */
01911 /* sets the weights to a point along a line                */
01912 /*                     (for hybrid methods)                */
01913 /*                                                         */
01914 /* inputs:     dbl ***w0 = initial point                   */
01915 /*             dbl alpha = step length                     */ 
01916 /*                                                         */
01917 /* Author: B.Mansoulie     01-Jul-98                       */
01918 /***********************************************************/   
01919 
01920 /* extern "C"Dllexport */
01921 void MLP_LineHyb(dbl ***w0, dbl alpha)
01922 {
01923         int il,in,jn;
01924         for(il=1; il<NET.Nlayer-1; il++)
01925                 for(in=0; in<NET.Nneur[il]; in++)
01926                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01927                         {
01928                                 NET.Weights[il][in][jn] = w0[il][in][jn]+
01929                                 alpha*dir[il][in][jn];
01930                         }
01931         MLP_ResLin();
01932 }
01933 
01934 
01935 /***********************************************************/
01936 /* SetLambda                                               */
01937 /*                                                         */
01938 /* sets the coefficient of the regularisation term for     */
01939 /* hybrid learning method                                  */
01940 /*                                                         */
01941 /* input:     double Wmax = typical maximum weight         */
01942 /*                                                         */
01943 /* Author: J.Schwindling   13-Apr-99                       */
01944 /***********************************************************/   
01945 
01946 /* extern "C"Dllexport */
01947 void SetLambda(double Wmax)
01948 {
01949         dbl err;
01950         err = MLP_Test(0,0);
01951         LEARN.Alambda =
01952         LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
01953 }
01954 
01955         
01956 /***********************************************************/
01957 /* MLP_ResLin                                              */
01958 /*                                                         */
01959 /* resolution of linear system of equations for hybrid     */
01960 /* training method                                         */
01961 /*                                                         */
01962 /*                                                         */
01963 /* Author: B.Mansoulie        end-98                       */
01964 /* Modified: J.Schwindling 29-APR-99                       */
01965 /*                         use dgels from LAPACK           */  
01966 /***********************************************************/   
01967 
01968 /* extern "C"Dllexport */
01969 void MLP_ResLin()
01970 {
01971 /*      dbl rrans[NMAX], rrout[NMAX];*/
01972 /*      type_pat rrin[NMAX];*/
01973         doublereal *HR,*dpat; //,*wlin,*SV;
01974         double err,lambda,lambda2;
01975         integer Nl,M,Nhr,khr,nrhs,iret,ierr;
01976         int   il, in, inl, ipat;
01977         /*register dbl a;*/ //a unused
01978         char Trans = 'N';
01979 
01980         
01981 /*      integer rank; */
01982 //      doublereal rcond = -1;  /* use machine precision */
01983         
01984         lambda2 = LEARN.Alambda;
01985         
01986 /* Nl = number of linear weights
01987    M = number of terms in linear system = number of examples + regularisation*/
01988         Nl = NET.Nneur[NET.Nlayer-2] + 1;
01989         M = PAT.Npat[0]+Nl;
01990 
01991         integer Lwork = 5 * M;
01992         doublereal *Work = (doublereal*) malloc((int) Lwork*sizeof(doublereal));
01993         
01994 /* memory allocation */
01995         dpat = (doublereal*) malloc((int) M*sizeof(doublereal));
01996 //      wlin = (doublereal*) malloc((int) Nl*sizeof(doublereal));
01997 //      SV = (doublereal*) malloc((int) Nl*sizeof(doublereal));
01998         
01999         Nhr = M * Nl;
02000         HR = (doublereal*) malloc((int) Nhr*sizeof(doublereal));
02001         err = 0.;
02002         for(ipat=0;ipat<PAT.Npat[0];ipat++)
02003                 {
02004 /* *** Filling dpat and HR *** */
02005 /*              for(in=0; in<NET.Nneur[0]; in++)
02006                         {
02007                         rrin[in] = PAT.Rin[0][ipat][in];
02008                         }*/
02009 
02010                 MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
02011 /*              MLP_Out(rrin,rrout);*/
02012                 /*for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
02013                         { 
02014                           a = (dbl) PAT.Rans[0][ipat][in]; //a was not used
02015                         } */
02016                 il = NET.Nlayer-2;
02017                 dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
02018                 khr = ipat;
02019                 HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
02020                 for(in=0;in<NET.Nneur[il];in++)
02021                         {
02022                         khr =  M *(in+1) + ipat;
02023                         HR[khr] = NET.Outn[il][in]* 
02024                                 (dbl) sqrt(PAT.Pond[0][ipat]);
02025                         }
02026                 }
02027         il = NET.Nlayer-2;
02028         lambda = sqrt(lambda2);
02029         for(ipat=0;ipat<=NET.Nneur[il];ipat++)
02030                 {
02031                 dpat[ipat+PAT.Npat[0]] = 0;
02032                 for(in=0;in<=NET.Nneur[il];in++)
02033                         {
02034                         khr =  M *in + ipat + PAT.Npat[0];
02035                         HR[khr] = 0;
02036                         if(in==ipat) HR[khr]=lambda;
02037                         }
02038                 }
02039         if(NET.Debug>=4) 
02040                 {
02041                 err = MLP_Test(0,0);
02042                 printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err); 
02043                 }                
02044 /*                                                                */
02045 /*      Trouve les poids lineaires par resolution lineaire        */
02046 /*                                                                */
02047         nrhs = 1;
02048         ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
02049                         &Lwork,&iret);
02050         if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
02051         if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
02052         
02053 /*      ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
02054                 &iret);
02055         if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
02056         if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
02057         
02058         il = NET.Nlayer-1;
02059         for (inl=0; inl<=NET.Nneur[il-1];inl++)
02060                 {
02061                 NET.Weights[il][0][inl] = dpat[inl];
02062                 }
02063         if(NET.Debug>=4) 
02064                 {
02065                 err = MLP_Test(0,0);
02066                 printf("ResLin, apres tlsfor, err= %f\n",err); 
02067                 }                
02068         free(Work);
02069         free(dpat);
02070 //      free(wlin);
02071         free(HR);
02072 //      free(SV);
02073 }
02074 
02075 /***********************************************************/
02076 /* EtaDecay                                                */
02077 /*                                                         */
02078 /* decreases the learning parameter eta by the factor      */
02079 /* LEARN.Decay                                             */
02080 /*                                                         */
02081 /* Author: J.Schwindling   14-Apr-99                       */
02082 /***********************************************************/   
02083 
02084 void EtaDecay()
02085 {
02086         LEARN.eta *= LEARN.Decay;
02087 }
02088 
02089        
02090 /***********************************************************/
02091 /* ShuffleExamples                                         */
02092 /*                                                         */
02093 /* Shuffles the learning examples (for stochastic method)  */
02094 /*                                                         */
02095 /* Author: J.Schwindling   14-Apr-1999                     */
02096 /* Modified: J.Schwindling 21-Jul-1999  inline MLP_Rand    */
02097 /***********************************************************/   
02098 
02099 /* extern "C"Dllexport */
02100 int ShuffleExamples(int n, int *index)
02101 {
02102         int i,ii,itmp;
02103         dbl a = (dbl) (n-1);
02104         
02105         for(i=0;i<n;i++)
02106                 {
02107                 ii = (int) MLP_Rand(0.,a);
02108                 itmp = index[ii];
02109                 index[ii] = index[i];
02110                 index[i] = itmp;
02111                 }
02112         return 0;
02113 }
02114 
02115 
02116 /***********************************************************/
02117 /* MLP_Rand                                                */
02118 /*                                                         */
02119 /* Random Numbers (to initialize weights)                  */
02120 /*                                                         */
02121 /* inputs :     dbl min, dbl max = random number between   */
02122 /*                                 min and max             */
02123 /* return value: (double) random number                    */
02124 /*                                                         */
02125 /* Author: J.Schwindling   14-Apr-99                       */
02126 /***********************************************************/   
02127 
02128 /* extern "C"Dllexport */
02129 double MLP_Rand(dbl mini, dbl maxi)
02130 {
02131 return mini+(maxi-mini)*random()/RAND_MAX;
02132 }
02133 
02134 
02135 /***********************************************************/
02136 /* InitWeights                                             */
02137 /*                                                         */
02138 /* initializes the weights to random numbers between       */
02139 /* -0.5 : 0.5                                              */
02140 /*                                                         */
02141 /* Author: J.Schwindling   14-Apr-99                       */
02142 /***********************************************************/   
02143 
02144 /* extern "C"Dllexport */
02145 void InitWeights()
02146 {
02147         int ilayer,ineur,i;
02148         
02149         for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
02150                 for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
02151                         for(i=0;i<=NET.Nneur[ilayer-1];i++)
02152                                 NET.Weights[ilayer][ineur][i]=
02153                                         (dbl) MLP_Rand(-0.5, 0.5);
02154 }
02155 
02156 
02157 /***********************************************************/
02158 /* PrintWeights                                            */
02159 /*                                                         */
02160 /* Print weights on the screen                             */
02161 /*                                                         */
02162 /* Author: J.Schwindling   14-Apr-99                       */
02163 /***********************************************************/   
02164 
02165 /* extern "C"Dllexport */
02166 void PrintWeights()
02167 {
02168         int ilayer,ineur,i;
02169 
02170         for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
02171                 {
02172                 if(MessLang==1) 
02173                         {
02174                         printf("Couche %d\n",ilayer);
02175                         }
02176                 else
02177                         {
02178                         printf("Layer %d\n",ilayer);
02179                         }
02180                 for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
02181                         {
02182                         if(MessLang==1) 
02183                                 {
02184                                 printf("Neurone %d",ineur);
02185                                 }
02186                         else
02187                                 {
02188                                 printf("Neuron %d",ineur);
02189                                 }
02190                         for(i=0; i<=NET.Nneur[ilayer-1]; i++)
02191                                 {
02192                                 printf(" %f",
02193                                         (double) NET.Weights[ilayer][ineur][i]);
02194                                 }
02195                         printf("\n");
02196                         }
02197                 printf("\n");
02198                 }
02199 }
02200  
02201 
02202 /***********************************************************/
02203 /* ReadPatterns                                            */
02204 /*                                                         */
02205 /* parser for learn.pat or test.pat files                  */
02206 /*                                                         */
02207 /* inputs: char *filename = name of the file to read       */
02208 /*         int ifile = 0: learning examples                */
02209 /*                     1: test examples                    */
02210 /*                                                         */
02211 /* outputs: int *inet = 1 if a network is defined          */
02212 /*          int *ilearn = 1 if a learning method is defined*/
02213 /*          int *iexamples = 1 if examples are defined     */ 
02214 /*                                                         */
02215 /* return value (int) = 0:   no error                      */
02216 /*                    = -1:  file could not be opened      */
02217 /*                                                         */
02218 /* Author: J.Schwindling   20-Apr-99                       */
02219 /* Modified: J.Schwindling 01-Jun-99 return inet, ilearn   */
02220 /*                                          iexamples      */ 
02221 /*           J.Schwindling 21-Sep-99 return value = -1     */
02222 /***********************************************************/   
02223 
02224 #define CLEN 1024
02225 
02226 /* extern "C"Dllexport */
02227 int ReadPatterns(char *filename, int ifile,
02228                  int *inet, int *ilearn, int *iexamples)
02229 {
02230 char s[CLEN], s2[CLEN], cc[6], cc2[6];
02231 char otherfile[CLEN];
02232 double p;
02233 //int line,i,j;
02234 int line,i;
02235 //int l,ll,ipat,nmax,il,in,tf;
02236 int l,ll,ipat,nmax;
02237 int np=0;       /* nombre d'exemples */
02238 int nin=0;      /* nombre d'entrees */
02239 int nout=0;     /* nombre de sorties */
02240 int npon=0;
02241 int ntot, ierr;
02242 //char **ss;
02243 char **ss=0;
02244 FILE *LVQpat;
02245 int nlayer, nneur[NLMAX];
02246 
02247 printf("\nLoading file %s\n",filename);
02248 LVQpat=fopen(filename,"r");
02249 if(LVQpat == 0) return -1;
02250 
02251 line=0;
02252 
02253 while(fgets(s,CLEN,LVQpat))
02254         {
02255         if(*s=='N')                                 
02256                 {
02257                 if(*(s+1)=='N')                 /* NNEU */
02258                         {
02259                         printf("Number of neurons %s",s);
02260                         *inet = 1;
02261                         sscanf(s,"%s %s",cc,s2);
02262                         ierr = GetNetStructure(s2,&nlayer,nneur);
02263                         if(ierr != 0) return ierr;
02264                         ierr = MLP_SetNet(&nlayer,nneur);
02265                         if(ierr != 0) return ierr;
02266                         }
02267                 else
02268                         {       
02269                         sscanf(s,"%s %d",cc,&l);
02270                         if(*(cc+1)=='P')        /* NPAT */
02271                                 {
02272                                 np=l;
02273                                 printf("Number of patterns %d\n",np);
02274                                 }
02275                         else if(*(cc+1)=='I')   /* NINP */
02276                                 {
02277                                 nin=l;
02278                                 PAT.Nin = nin;
02279                                 printf("Number of inputs %d\n",nin);
02280                                 }
02281                         else if(*(cc+1)=='O' && *(cc+2)=='U')   /* NOUT */
02282                                 {
02283                                 nout=l;
02284                                 PAT.Nout = nout;
02285                                 printf("Number of outputs %d\n",nout);
02286                                 }
02287                         else if(*(cc+1)=='O' && *(cc+2)=='R')   /* NORM */
02288                                 {
02289                                 DIVERS.Norm=l;
02290                                 if(l==1) printf("Normalize inputs\n");
02291                                 }
02292 /* obsolete datacard     NLAY */                        
02293                         else if(*(cc+1)=='L')   
02294                                 {
02295                                 printf("NLAY datacard is no longer needed\n");
02296                                 }                               
02297                         else if(*(cc+1)=='E')   /* NEPO */ 
02298                                 {
02299                                 LEARN.Nepoch=l;
02300                                 printf("Number of epochs %d\n",l);
02301                                 }
02302                         else if(*(cc+1)=='R')   /* NRES */
02303                                 {
02304                                 LEARN.Nreset=l;
02305                                 printf(
02306                                 "Reset to steepest descent every %d epochs\n",
02307                                 l);
02308                                 }
02309                         }
02310                 }
02311         else if(*s=='L')                
02312                 {
02313                 if(*(s+1)=='P')                 /* LPAR */
02314                         {
02315                         sscanf(s,"%s %le",cc,&p);
02316                         printf("Learning parameter %f\n",p);
02317                         LEARN.eta = (dbl) p;
02318                         }
02319                 else if(*(s+1)=='M')            /* LMET */
02320                         {
02321                         *ilearn = 1;
02322                         sscanf(s,"%s %d",cc,&(LEARN.Meth));
02323                         printf("Learning method = ");
02324                         switch(LEARN.Meth)
02325                         {
02326                 case 1: printf("Stochastic Minimization\n");
02327                         break;
02328                 case 2: printf("Steepest descent with fixed step\n");
02329                         break;
02330                 case 3: printf("Steepest descent with line search\n"); break;
02331                 case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
02332                 case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
02333                         break;
02334                 case 6: printf("BFGS\n");
02335                         break;
02336                 case 7: printf("Hybrid BFGS-linear\n");
02337                         break;
02338                 default: printf("Error: unknown method\n"); break;
02339                 }
02340 
02341                         }
02342                 else if(*(s+1)=='T')    /* LTAU */
02343                         {
02344                         sscanf(s,"%s %lf",cc,&p);
02345                         printf("Tau %f\n",p);
02346                         LEARN.Tau = (dbl) p;
02347                         }
02348                 else if(*(s+1)=='A')    /* LAMB */
02349                         {
02350                         sscanf(s,"%s %lf",cc,&p);
02351                         printf("Lambda %f\n",p);
02352                         LEARN.Lambda = (dbl) p;
02353                         }
02354                 }
02355         else if(*s=='F')                
02356                 {
02357                 if(*(s+1)=='S')         /* FSPO */
02358                         {
02359                         sscanf(s,"%s %le",cc,&p);
02360                         printf("Flat spot elimination parameter %f\n",p);
02361                         LEARN.delta = (dbl) p;
02362                         }
02363                 else if(*(s+1)=='I')    /* FILE */
02364                         {
02365                         sscanf(s,"%s %s",cc,otherfile);
02366                         ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
02367                         if(ierr != 0) return ierr;
02368                         }       
02369                 }
02370         else if(*s=='M')                /* momentum */
02371                 {
02372                 sscanf(s,"%s %le",cc,&p);
02373                 printf("Momentum term %f\n",p);
02374                 LEARN.epsilon = (dbl) p;
02375                 }               
02376         else if(*s=='O')                /* OUTx */
02377                 {
02378                 if(*(s+3)=='W')         /* OUTW */
02379                         {
02380                         sscanf(s,"%s %d",cc,&OutputWeights);
02381                         if(OutputWeights == 0)
02382                                 {
02383                                 printf("Never write file weights.out\n");
02384                                 }
02385                         else if(OutputWeights == -1)
02386                                 {
02387                                 printf("Write weights to output file at the end\n");
02388                                 }
02389                         else 
02390                                 {
02391                                 printf("Write weights to file every %d epochs\n",
02392                                         OutputWeights);
02393                                 }
02394                         }
02395                 else if(*(s+3)=='F')    /* OUTF */
02396                         {
02397                         sscanf(s,"%s %s",cc,cc2);
02398                         if(*cc2=='F' || *cc2=='C')
02399                                 {
02400                                 DIVERS.Outf = *cc2;
02401                                 }
02402                         else
02403                                 {
02404                         printf(" *** Error while loading file %s at line %s :",
02405                                 filename,s);
02406                         printf(" unknown language\n");
02407                                 }       
02408                         }
02409                 else
02410                         {
02411                         printf(" *** Error while loading file %s at line %s\n",
02412                                 filename,s);
02413                         }                                       
02414                 }               
02415         else if(*s=='R')                /* RDWT */
02416                 {
02417                 sscanf(s,"%s %d",cc,&(NET.Rdwt));
02418                 if(NET.Rdwt == 0)
02419                         {
02420                         printf("Random weights \n");
02421                         }
02422                 else
02423                         {
02424                         printf("Read weights from file weights.in\n");
02425                         }       
02426                 }               
02427         else if(*s=='S')                /* STAT */
02428                 {
02429                 sscanf(s,"%s %d",cc,&(DIVERS.Stat));
02430                 }
02431 /*      else if(*s=='T')                 TFUN 
02432                 {
02433                 sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
02434                 SetTransFunc(il,in,tf);
02435                 } */    
02436         else if(*s=='H')                /* HESS */
02437                 {
02438                 sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
02439                 }               
02440         else if(*s=='D')                
02441                 {
02442                 if(*(s+1)=='C')         /* DCAY */
02443                         {       
02444                         sscanf(s,"%s %le",cc,&p);
02445                         LEARN.Decay = p;
02446                         printf("Learning parameter decay %f\n",
02447                                                 (double) LEARN.Decay);
02448                         }
02449                 if(*(s+1)=='B')         /* DBIN */      
02450                         {       
02451                         sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
02452                         printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
02453                         }
02454                 if(*(s+1)=='E')         /* DEBU */      
02455                         {       
02456                         sscanf(s,"%s %d",cc,&(NET.Debug));
02457                         printf("Debug mode %d\n",NET.Debug);
02458                         }
02459                 }               
02460         else if(*s=='P')                /* POND */
02461                 {
02462                 npon = CountLexemes(s);
02463                 if(npon==2) 
02464                         {
02465                         sscanf(s,"%s %d",cc,&(PAT.Iponde));
02466                         }
02467                 else
02468                         {
02469                         ss = (char**) malloc((npon+1)*sizeof(char*)); 
02470                         for(i=0;i<=npon;i++)
02471                                 ss[i]=(char*) malloc(40*sizeof(char));
02472                         getnLexemes(npon,s,ss);
02473                         sscanf(ss[1],"%d",&(PAT.Iponde));
02474                         for(i=2;i<npon;i++)
02475                             {   
02476                             sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
02477                             }
02478                         }
02479                 if(PAT.Iponde==0) 
02480                         {
02481                         npon = 0;
02482                         }
02483                 else
02484                         {
02485                         npon = 1;
02486                         }
02487                 }               
02488         else if(*s=='#')                            /* comments */
02489                 {
02490                 }
02491         else                                        /* exemple itself */
02492                 {
02493                 if(np==0) return 1;
02494                 if(nin==0) return 2;
02495                 if(nout==0) return 3;
02496                 
02497 
02498 /* store number of exemples and allocate memory*/               
02499                 if(line==0)
02500                         {
02501                         PAT.Npat[ifile] = np;
02502                         ierr = AllocPatterns(ifile,np,nin,nout,0);
02503                         if(ierr != 0) return ierr;
02504                         *iexamples = 1;
02505                         }
02506 
02507 /* now get exemple */
02508                                 
02509                 line++;
02510                 ll = (line-1)%2;
02511                 ipat = (line-1)/2;
02512                 /*              printf("Loading event \t %d\r",ipat);*/
02513 /*              if(ipat>NPMAX) 
02514                         {
02515                         printf("Too many examples in file\n");
02516                         printf("Loading %d examples\n",NPMAX);
02517                         PAT.Npat[ifile] = NPMAX;
02518                         break;
02519                         }
02520 */                      
02521                 
02522 /* allocate the number of lines */
02523                 
02524                 if(line==1) 
02525                         {
02526                         
02527                         nmax = nin;
02528                         if(nout>nin) nmax=nout;
02529                         ss = (char**) malloc((nmax+1)*sizeof(char*));
02530                         if(ss == 0) return -111; 
02531                         for(i=0;i<=nmax;i++)
02532                                 {
02533                                 ss[i]=(char*) malloc(40*sizeof(char));
02534                                 if(ss[i] == 0) return -111;
02535                                 }
02536                         }
02537                         
02538                 if(ll==0)    /* inputs */
02539                         {
02540                         getnLexemes(nin,s,ss);
02541                         for(i=0;i<nin;i++)
02542                                 {
02543                                 sscanf(ss[i],"%le",&p);
02544                                 PAT.Rin[ifile][ipat][i] = (type_pat) p;
02545                                 }
02546                         }
02547                 else         /* answers */
02548                         {
02549                         ntot=nout+npon;
02550                         getnLexemes(ntot,s,ss);
02551                         for(i=0;i<ntot;i++)
02552                                 {
02553                                 sscanf(ss[i],"%le",&p);
02554                                 if(i<nout)
02555                                         {
02556                                         PAT.Rans[ifile][ipat][i] = (type_pat) p;
02557                                         }
02558                                 else
02559                                         {
02560                                         if(PAT.Iponde==1)
02561                                                 {
02562                                                 PAT.Pond[ifile][ipat] = 
02563                                                         (type_pat) p;
02564                                                 }
02565                                         else
02566                                                 {
02567                                                 PAT.Pond[ifile][ipat] = 
02568                                                 (type_pat) PAT.Ponds[(int) p -1];
02569                                                 }
02570                                         }
02571                                 }
02572                         }                               
02573                 }
02574         }
02575         printf("%d examples loaded    \n\n",PAT.Npat[ifile]);
02576         fclose(LVQpat);
02577         return 0;
02578 }
02579 
02580 
02581 /* CountLexemes returns the number of lexemes in s separated by blanks.*/
02582 /* extern "C"Dllexport */
02583 int CountLexemes(char *s)
02584 {
02585   char tmp[1024];
02586   int i=0;
02587   
02588   strcpy(tmp,s);
02589   if (strtok(tmp," "))
02590     {
02591       i=1;
02592       while (strtok(NULL," ")) i++;
02593     }
02594   return i;
02595 }
02596 
02597 /* splits s in substrings ss separated by blanks*/
02598 /* extern "C"Dllexport */
02599 void getnLexemes(int n, char *s, char **ss)
02600 {
02601   char tmp[1024];
02602   int i;     
02603   strcpy(tmp,s);
02604   if (n>0)
02605     {
02606       strcpy(ss[0],strtok(tmp," "));
02607       for (i=1;i<n;i++)
02608         strcpy(ss[i],strtok(NULL," "));
02609     }
02610 }
02611 
02612 /* extern "C"Dllexport */
02613 void getLexemes(char *s,char **ss)
02614 {
02615   char tmp[1024];
02616   int i,n;   
02617     
02618   strcpy(tmp,s);
02619   n=CountLexemes(tmp);
02620   if (n>0)
02621     {
02622       strcpy(ss[0],strtok(tmp," "));
02623       for (i=1;i<n;i++)
02624         strcpy(ss[i],strtok(NULL," "));
02625     }
02626 }
02627 
02628 
02629 /***********************************************************/
02630 /* LearnFree                                               */
02631 /*                                                         */
02632 /* frees memory allocated for learning                     */
02633 /*                                                         */
02634 /* Author: J.Schwindling   28-May-99                       */
02635 /***********************************************************/   
02636 
02637 /* extern "C"Dllexport */
02638 void LearnFree()
02639 {
02640         int il,in; 
02641         if(LearnMemory==0) return;
02642         LearnMemory = 0;
02643         for(il=0; il<NET.Nlayer; il++)
02644                 {
02645                 for(in=0; in<NET.Nneur[il]; in++)
02646                         {
02647                         free(dir[il][in]);
02648                         }
02649                 free(dir[il]);
02650                 }
02651         free(dir);
02652         if(BFGSMemory==0) return;
02653         BFGSMemory = 0;
02654         for(il=0; il<NET.Nweights; il++)
02655                 {
02656                 free(BFGSH[il]);
02657                 }               
02658         free(BFGSH);
02659         free(Gamma);
02660         free(delta);
02661         
02662 /*      if(JacobianMemory == 0) return;
02663         JacobianMemory = 0;
02664         for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
02665         free(JacobianMatrix);   */
02666 }
02667 
02668 
02669 /***********************************************************/
02670 /* LearnAlloc                                              */
02671 /*                                                         */
02672 /* memory allocation for vectors and matrices used in      */
02673 /* conjugate gradients or BFGS like methods                */
02674 /*                                                         */
02675 /* return value (int) = error code: 0 no error             */
02676 /*                                  -111 no memory         */
02677 /*                                                         */
02678 /* Author: J.Schwindling   20-Apr-99                       */
02679 /* Modified: J.Schwindling 31-Jan-2000  error code         */
02680 /***********************************************************/   
02681 
02682 /* extern "C"Dllexport */
02683 int LearnAlloc()
02684 {
02685         int il,in,i; 
02686         int Nweights = 0;
02687         
02688         if(LearnMemory != 0) LearnFree();
02689         LearnMemory = 1;
02690         dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
02691         if(dir == 0) return -111;
02692         
02693         for(il=0; il<NET.Nlayer; il++)
02694                 {
02695                 dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
02696                 if(dir[il] == 0) return -111;
02697                 for(in=0; in<NET.Nneur[il]; in++)
02698                         {
02699                         if(il==0) 
02700                                 {
02701 /* TODO: understand implications of hard-coded 101 */                           
02702                                 dir[0][in] = (dbl *)
02703                                 malloc(101*sizeof(dbl));
02704                                 if(dir[0][in] == 0) return -111;
02705                                 }
02706                         else
02707                                 {
02708                                 dir[il][in] = (dbl *) 
02709                                 malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
02710                                 if(dir[il][in] == 0) return -111;
02711                                 Nweights += NET.Nneur[il-1]+1;
02712                                 }
02713                         }
02714                 }
02715         NET.Nweights = Nweights;
02716                 
02717         if(BFGSMemory==0 && LEARN.Meth>= 6) 
02718                 {
02719                 BFGSMemory = 1;
02720                 Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
02721                 delta = (dbl*) malloc(Nweights*sizeof(dbl));
02722                 BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
02723                 if(Gamma == 0 || delta == 0 || BFGSH == 0)
02724                    return -111;
02725                    
02726                 for(i=0; i<Nweights; i++)
02727                         {
02728                         BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
02729                         if(BFGSH[i] == 0) return -111;
02730                         }
02731                 }
02732                 
02733 /*      if(JacobianMemory==0)
02734                 {
02735                 JacobianMemory = 1;
02736                 printf("JacobianMemory = %d\n",JacobianMemory);
02737                 JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
02738                 for(i=0; i<PAT.Npat[0]; i++)
02739                         JacobianMatrix[i] = 
02740                                 (dbl*) malloc(Nweights*sizeof(dbl));
02741                 printf("end memory alloc\n");
02742                 }       
02743         
02744         if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
02745         
02746         return 0;
02747 }
02748 
02749 
02750 /***********************************************************/
02751 /* MLP_PrFFun                                              */
02752 /*                                                         */
02753 /* writes the MLP function to file as a fortran function   */
02754 /*                                                         */
02755 /* inputs :     char *filename = name of the output file   */
02756 /*                                                         */
02757 /* return value (int) = 0: no error                        */
02758 /*                     -1: could not open file             */ 
02759 /*                                                         */
02760 /* Author: J.Schwindling   20-Apr-99                       */
02761 /* Modified: J.Schwindling 05-May-99                       */
02762 /*              add normalization of inputs                */
02763 /*           J.Schwindling 30-Nov-99 return code           */
02764 /***********************************************************/   
02765 
02766 /* extern "C"Dllexport */
02767 int MLP_PrFFun(char *filename)
02768 {
02769         int il,in,jn;
02770         FILE *W;
02771 
02772         W=fopen(filename,"w");
02773         if(W==0) return -1;
02774         fprintf(W,"      SUBROUTINE RNNFUN(rin,rout)\n");
02775         fprintf(W,"      DIMENSION RIN(%d)\n",NET.Nneur[0]);
02776         fprintf(W,"      DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
02777         fprintf(W,"C\n");
02778         
02779         for(in=0; in<NET.Nneur[0]; in++)
02780                 {
02781                 if(DIVERS.Norm==0)
02782                         { 
02783                         fprintf(W,"      OUT%d = RIN(%d)\n",in+1,in+1);
02784                         }
02785                 else
02786                         {
02787                         fprintf(W,"      OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
02788                                         STAT.mean[in],STAT.sigma[in]);
02789                         }       
02790                 }
02791         for(il=1; il<NET.Nlayer-1; il++)
02792                 {
02793                 fprintf(W,"C\n");
02794                 fprintf(W,"C     layer %d\n",il+1);
02795                 for(in=0; in<NET.Nneur[il]; in++)
02796                         {
02797                         fprintf(W,"      RIN%d = %e\n",in+1,
02798                                         (double) NET.Weights[il][in][0]);
02799                         for(jn=1;jn<=NET.Nneur[il-1]; jn++)
02800                                 fprintf(W,"     > +(%e) * OUT%d\n",
02801                                         (double) NET.Weights[il][in][jn],jn);
02802                         }
02803                 fprintf(W,"C\n");
02804                 for(in=0; in<NET.Nneur[il]; in++)
02805                         {
02806                         if(NET.T_func[il][in]==0) 
02807                                 {
02808                                 fprintf(W,"      OUT%d = 0\n",in+1);
02809                                 }
02810                         else if(NET.T_func[il][in]==1)
02811                                 {
02812                                 fprintf(W,"      OUT%d = RIN%d\n",in+1,in+1);
02813                                 }
02814                         else if(NET.T_func[il][in]==2)
02815                                 {
02816                                 fprintf(W,"      OUT%d = SIGMOID(RIN%d)\n",
02817                                         in+1,in+1);
02818                                 }
02819                         }
02820                 }
02821         il = NET.Nlayer-1;
02822         fprintf(W,"C\n");
02823         fprintf(W,"C     layer %d\n",il+1);
02824         for(in=0; in<NET.Nneur[il]; in++)
02825                 {
02826                 fprintf(W,"      RIN%d = %e\n",in+1,
02827                                 (double) NET.Weights[il][in][0]);
02828                 for(jn=1;jn<=NET.Nneur[il-1]; jn++)
02829                         fprintf(W,"     > +(%e) * OUT%d\n",
02830                                 (double) NET.Weights[il][in][jn],jn);
02831                 }
02832         fprintf(W,"C\n");
02833         for(in=0; in<NET.Nneur[il]; in++)
02834                 {
02835                 if(NET.T_func[il][in]==0) 
02836                         {
02837                         fprintf(W,"      ROUT(%d) = 0\n",in+1);
02838                         }
02839                 else if(NET.T_func[il][in]==1)
02840                         {
02841                         fprintf(W,"      ROUT(%d) = RIN%d\n",in+1,in+1);
02842                         }
02843                 else if(NET.T_func[il][in]==2)
02844                         {
02845                         fprintf(W,"      ROUT(%d) = SIGMOID(RIN%d)\n",
02846                                 in+1,in+1);
02847                         }
02848                 }
02849                 
02850         fprintf(W,"C\n");
02851         fprintf(W,"      END\n");
02852         fprintf(W,"      REAL FUNCTION SIGMOID(X)\n");
02853         fprintf(W,"      SIGMOID = 1./(1.+EXP(-X))\n");
02854         fprintf(W,"      END\n");
02855         
02856         fclose(W);
02857         return 0;
02858 }
02859 
02860 
02861 /***********************************************************/
02862 /* MLP_PrCFun                                              */
02863 /*                                                         */
02864 /* writes the MLP function to file as a C function         */
02865 /*                                                         */
02866 /* inputs :     char *filename = name of the output file   */
02867 /*                                                         */
02868 /* return value (int) = 0: no error                        */
02869 /*                      -1: could not open file            */ 
02870 /*                                                         */
02871 /* Author: J.Schwindling   23-Apr-99                       */
02872 /* Modified: J.Schwindling 30-Nov-99 return code           */
02873 /***********************************************************/   
02874 
02875 /* extern "C"Dllexport */
02876 int MLP_PrCFun(char *filename)
02877 {
02878         int il,in,jn;
02879         FILE *W;
02880 
02881         W=fopen(filename,"w");
02882         if(W==0) return -1;
02883         
02884         fprintf(W,"double sigmoid(double x)\n");
02885         fprintf(W,"{\n");
02886         fprintf(W,"return 1/(1+exp(-x));\n");
02887         fprintf(W,"}\n");
02888         fprintf(W,"void rnnfun(double *rin,double *rout)\n");
02889         fprintf(W,"{\n");
02890         fprintf(W,"      double out1[%d];\n",NET.Nneur[0]);
02891         fprintf(W,"      double out2[%d];\n",NET.Nneur[1]);
02892         if(NET.Nlayer>=3) fprintf(W,"      double out3[%d];\n",NET.Nneur[2]);
02893         if(NET.Nlayer>=4) fprintf(W,"      double out4[%d];\n",NET.Nneur[3]);
02894         fprintf(W,"\n");
02895         
02896         for(in=0; in<NET.Nneur[0]; in++)
02897                 {
02898                 if(DIVERS.Norm==0)
02899                         { 
02900                         fprintf(W,"      out1[%d] = rin[%d];\n",in,in);
02901                         }
02902                 else
02903                         {
02904                         fprintf(W,"      out1[%d] = (rin[%d]-%e)/%e;\n",
02905                                         in,in,
02906                                         STAT.mean[in],STAT.sigma[in]);
02907                         }       
02908                 }
02909                 
02910         for(il=1; il<=NET.Nlayer-1; il++)
02911                 {
02912                 fprintf(W,"\n");
02913                 fprintf(W,"/*     layer %d */\n",il+1);
02914                 for(in=0; in<NET.Nneur[il]; in++)
02915                         {
02916                         fprintf(W,"      out%d[%d] = %e\n",il+1,in,
02917                                         (double) NET.Weights[il][in][0]);
02918                         for(jn=1;jn<=NET.Nneur[il-1]; jn++)
02919                                 fprintf(W,"      +(%e) * out%d[%d]\n",
02920                                         (double) NET.Weights[il][in][jn],il,jn-1);
02921                                 fprintf(W,"      ;\n");
02922                         }
02923                 fprintf(W,"\n");
02924                 for(in=0; in<NET.Nneur[il]; in++)
02925                         {
02926                         if(NET.T_func[il][in]==0) 
02927                                 {
02928                                 fprintf(W,"      out%d[%d] = 0;\n",il+1,in);
02929                                 }
02930                         else if(NET.T_func[il][in]==1)
02931                                 {
02932                                 }
02933                         else if(NET.T_func[il][in]==2)
02934                                 {
02935                                 fprintf(W,"      out%d[%d] = sigmoid(out%d[%d]);\n",
02936                                         il+1,in,il+1,in);
02937                                 }
02938                         }
02939                 }
02940         il = NET.Nlayer-1;
02941         for(in=0; in<NET.Nneur[il]; in++)
02942                 {
02943                 fprintf(W,"      rout[%d] = out%d[%d];\n",in,il+1,in);
02944                 }
02945         fprintf(W,"}\n");       
02946         fclose(W);
02947         return 0;
02948 }
02949 
02950 
02951 /***********************************************************/
02952 /* SaveWeights                                             */
02953 /*                                                         */
02954 /* writes the weights to file                              */
02955 /*                                                         */
02956 /* inputs :     char *filename = name of the output file   */
02957 /*              int iepoch = epoch number                  */
02958 /*                                                         */
02959 /* return value (int): 0 if OK                             */
02960 /*                     -1 if file could not be opened      */    
02961 /*                                                         */
02962 /* Author: J.Schwindling   20-Apr-99                       */
02963 /* Modified: J.Schwindling 11-Jun-99                       */
02964 /*                         print net structure in header   */
02965 /* Modified: J.Schwindling 05-Nov-99                       */
02966 /*                         return error code               */
02967 /***********************************************************/   
02968 
02969 /* extern "C"Dllexport */
02970 int SaveWeights(char *filename, int iepoch)
02971 {
02972         FILE *W;
02973         int ilayer,ineur,i;
02974 
02975         W=fopen(filename,"w");
02976         if(W==0) return -1;
02977         
02978         fprintf(W,"# network structure ");
02979         for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
02980                 {
02981                 fprintf(W,"%d ",NET.Nneur[ilayer]);
02982                 }
02983                 
02984         fprintf(W,"\n %d\n",iepoch);
02985         for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
02986                 {
02987                 for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
02988                         {
02989                         for(i=0; i<=NET.Nneur[ilayer-1]; i++)
02990                                 {
02991                                 fprintf(W," %1.15e\n",
02992                                 (double) NET.Weights[ilayer][ineur][i]);
02993                                 }
02994                         }
02995                 }
02996         fclose(W);
02997         return 0;
02998 }
02999         
03000 
03001 /***********************************************************/
03002 /* LoadWeights                                             */
03003 /*                                                         */
03004 /* reads the weights from file                             */
03005 /*                                                         */
03006 /* input :      char *filename = name of the input file    */
03007 /* output :     int *iepoch = epoch number                 */
03008 /*                                                         */
03009 /* return value (int): 0 if OK                             */
03010 /*                     -1 if file could not be opened      */    
03011 /*                                                         */
03012 /* Author: J.Schwindling   20-Apr-99                       */
03013 /* Modified: J.Schwindling 11-Jun-99                       */
03014 /*           lines starting with # are skipped             */
03015 /* Modified: J.Schwindling 05-Nov-99                       */
03016 /*                         return error code               */
03017 /***********************************************************/   
03018 
03019 /* extern "C"Dllexport */
03020 int LoadWeights(char *filename, int *iepoch)
03021 {
03022         FILE *W;
03023         int ilayer,ineur,i;
03024         double p;
03025         char s[80];
03026 
03027         W=fopen(filename,"r");
03028         if(W==0) return -1;
03029         do
03030                 {
03031                 fgets(s,80,W);
03032                 }
03033         while(*s == '#');       
03034                 sscanf(s," %d",iepoch);
03035                 for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
03036                         {
03037                         for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
03038                                 {
03039                                 for(i=0; i<=NET.Nneur[ilayer-1]; i++)
03040                                         {
03041                                         fscanf(W," %le",&p);
03042                                         NET.Weights[ilayer][ineur][i] = (dbl) p;
03043                                         }
03044                                 }
03045                         }
03046                 
03047         fclose(W);
03048         return 0;
03049 }
03050 
03051 
03052 /***********************************************************/
03053 /* AllocPatterns                                           */
03054 /*                                                         */
03055 /* allocate memory for the examples                        */
03056 /*                                                         */
03057 /* input :      int ifile = file number (0 or 1)           */
03058 /*              int npat = number of examples              */
03059 /*              int nin  = number of inputs                */
03060 /*              int nout = number of outputs               */
03061 /*              int iadd = 0: new examples                 */
03062 /*                         1: add to existing ones         */   
03063 /*                                                         */
03064 /* return value (int) = error code: 0 = no error           */
03065 /*                                  1 = wrong file number  */
03066 /*                                 -111 = no memory        */
03067 /*                                                         */
03068 /* Author: J.Schwindling   21-Apr-99                       */
03069 /* Modified: J.Schwindling 26-Apr-99                       */
03070 /*            - frees memory if already booked and iadd=0  */
03071 /*              (should remove need to call mlpfree)       */ 
03072 /*            - implement iadd = 1                         */  
03073 /***********************************************************/   
03074 
03075 /* extern "C"Dllexport */int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
03076 {
03077         int j;
03078         type_pat *tmp, *tmp3;
03079         type_pat **tmp2;
03080         int ntot;
03081         
03082         if(ifile>1 || ifile<0) return(1);
03083 /*      scanf("%d",&j); */
03084         if(ExamplesMemory==0) 
03085                 {
03086                 ExamplesMemory=1;
03087                 PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
03088                 PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
03089                 PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
03090                 PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
03091                 if(PAT.Pond == 0 || PAT.Rin == 0
03092                    || PAT.Rans == 0 || PAT.vRin == 0) return -111; 
03093                 } 
03094         
03095 
03096 /* if iadd=0, check that memory not already allocated. Otherwise free it */
03097         if(iadd==0 && PatMemory[ifile]!=0)
03098                 {
03099                  FreePatterns(ifile);
03100                 }
03101 
03102 /* allocate memory and initialize ponderations */
03103         if(iadd==0 || PatMemory[ifile]==0)
03104         {
03105         PatMemory[ifile] = 1;           
03106         PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
03107         if(PAT.Pond[ifile] == 0) return -111;
03108         for(j=0; j<npat; j++)
03109                 PAT.Pond[ifile][j] = 1;
03110                         
03111         PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
03112         if(PAT.Rin[ifile] == 0) return -111;
03113         PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
03114         if(PAT.Rans[ifile] == 0) return -111;
03115 
03116         PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
03117                                                 sizeof(type_pat));
03118         if(PAT.vRin[ifile] == 0) return -111;
03119                                                 
03120         for(j=0; j<npat; j++)
03121                 {
03122                 PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
03123                 PAT.vRin[ifile][j*(nin+1)] = 1;
03124                 }
03125         for(j=0; j<npat; j++)
03126                 {
03127                 PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
03128                 if(PAT.Rans[ifile][j] == 0) return -111;
03129                 }
03130         PAT.Npat[ifile] = npat;
03131         
03132         if(ifile==0)
03133                 {       
03134                 ExamplesIndex = (int *) malloc(npat*sizeof(int));
03135                 if(ExamplesIndex == 0) return -111;
03136                 for(j=0; j<npat; j++) ExamplesIndex[j] = j;
03137                 }
03138         }
03139         else            /* add examples */
03140         {
03141         ntot = PAT.Npat[ifile]+npat;
03142         
03143 /* event weighting */   
03144         tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
03145         if(tmp == 0) return -111;
03146         
03147         for(j=0; j<PAT.Npat[ifile]; j++) 
03148                 {
03149                 tmp[j] = PAT.Pond[ifile][j];
03150                 }
03151         for(j=PAT.Npat[ifile];j<ntot;j++)
03152                 {
03153                 tmp[j] = 1;
03154                 }
03155         if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
03156         PAT.Pond[ifile] = tmp;  
03157         
03158 /* examples */
03159 /*      tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));            
03160         for(j=0; j<PAT.Npat[ifile]; j++) 
03161                 {
03162                 tmp2[j] = PAT.Rin[ifile][j];
03163                 }
03164         for(j=PAT.Npat[ifile];j<ntot;j++)
03165                 {
03166                 tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
03167                 }
03168         if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
03169         PAT.Rin[ifile] = tmp2;  */
03170         
03171         tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
03172         if(tmp3 == 0) return -111;
03173         
03174         for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
03175                 {
03176                 tmp3[j] = PAT.vRin[ifile][j];
03177                 }
03178         if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
03179         PAT.vRin[ifile] = tmp3;
03180         for(j=0; j<ntot; j++)
03181                 {
03182                 PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
03183                 PAT.vRin[ifile][j*(nin+1)] = 1;
03184                 }
03185                 
03186         tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
03187         if(tmp2 == 0) return -111;              
03188         for(j=0; j<PAT.Npat[ifile]; j++) 
03189                 {
03190                 tmp2[j] = PAT.Rans[ifile][j];
03191                 }
03192         for(j=PAT.Npat[ifile];j<ntot;j++)
03193                 {
03194                 tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
03195                 if(tmp2[j] == 0) return -111;
03196                 }
03197         if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
03198         PAT.Rans[ifile] = tmp2; 
03199         PAT.Npat[ifile] = ntot;
03200         PatMemory[ifile] = 1;           
03201         
03202 /* indices */   
03203         if(ifile==0)
03204                 {
03205                 free(ExamplesIndex);    
03206                 ExamplesIndex = (int *) malloc(ntot*sizeof(int));
03207                 if(ExamplesIndex == 0) return -111;
03208                 for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
03209                 }
03210         }
03211                 
03212         return 0;
03213 } 
03214 
03215 
03216 /***********************************************************/
03217 /* FreePatterns                                            */
03218 /*                                                         */
03219 /* frees memory for the examples                           */
03220 /*                                                         */
03221 /* input :      int ifile = file number (0 or 1)           */
03222 /*                                                         */
03223 /* return value (int) = error code: 0 = no error           */
03224 /*                                  1 = wrong file number  */
03225 /*                                  2 = no mem allocated   */
03226 /*                                                         */
03227 /* Author: J.Schwindling   26-Apr-99                       */
03228 /***********************************************************/   
03229 
03230 /* extern "C"Dllexport */int FreePatterns(int ifile)
03231 {
03232         int i;
03233 
03234         if(ifile>1 || ifile<0) return 1;
03235 /*      printf("%d %d \n",ifile,PatMemory[ifile]);*/
03236         if(PatMemory[ifile]==0) return 2;
03237         
03238         free(PAT.Pond[ifile]);
03239         for(i=0; i<PAT.Npat[ifile]; i++)
03240                 {
03241 /*              free(PAT.Rin[ifile][i]); */
03242                 free(PAT.Rans[ifile][i]); 
03243                 }
03244         free(PAT.Rin[ifile]);
03245         free(PAT.Rans[ifile]);
03246         free(PAT.vRin[ifile]);
03247         PatMemory[ifile] = 0;
03248         PAT.Npat[ifile] = 0;
03249         
03250         return 0;
03251 }               
03252 
03253 
03254 /***********************************************************/
03255 /* MLP_StatInputs                                          */
03256 /*                                                         */
03257 /* compute statistics about the inputs: mean, RMS, min, max*/
03258 /*                                                         */
03259 /* inputs:      int Nexamples = number of examples         */
03260 /*              int Niputs = number of quantities          */
03261 /*              type_pat **inputs = input values           */
03262 /*                                                         */
03263 /* outputs:     dbl *mean = mean value                     */
03264 /*              dbl *sigma = RMS                           */
03265 /*              dbl *minimum = minimum                     */
03266 /*              dbl *maximum = maximum                     */
03267 /*                                                         */
03268 /* return value (int):  always = 0                         */  
03269 /*                                                         */
03270 /* Author: J.Schwindling   11-Oct-99                       */
03271 /***********************************************************/   
03272 
03273 int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, 
03274                 dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)      
03275 {
03276         dbl *fmean;
03277         int j, ipat, nmax;
03278         
03279 /* first compute a fast rough mean using the first 100 events */
03280         fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
03281         nmax = 100;
03282         if(Nexamples<100) nmax=Nexamples;
03283         
03284         for(j=0;j<Ninputs;j++)
03285                 {
03286                 fmean[j] = 0;
03287                 for(ipat=0;ipat<nmax;ipat++)
03288                         {
03289                         fmean[j] += (dbl) inputs[ipat][j];
03290                         }
03291                 fmean[j] = fmean[j]/(dbl) nmax;
03292                 
03293 /* now compute real mean and sigma, min and max */              
03294                 mean[j] = 0;
03295                 sigma[j] = 0;
03296                 minimum[j] = 99999;
03297                 maximum[j] = -99999;
03298                 for(ipat=0;ipat<Nexamples;ipat++)
03299                         {
03300                         mean[j] += (dbl) inputs[ipat][j];
03301                         sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
03302                                     ((dbl) inputs[ipat][j]-fmean[j]);
03303                         if((dbl) inputs[ipat][j] > maximum[j]) 
03304                                 maximum[j]=(dbl) inputs[ipat][j];           
03305                         if((dbl) inputs[ipat][j] < minimum[j]) 
03306                                 minimum[j]=(dbl) inputs[ipat][j];           
03307                         }
03308                 mean[j] = mean[j]/(dbl) Nexamples;
03309                 sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples - 
03310                                 (mean[j]-fmean[j])*
03311                                 (mean[j]-fmean[j]));    
03312                 }
03313         free(fmean);
03314         return 0;
03315 }
03316 
03317 /***********************************************************/
03318 /* MLP_PrintInputStat                                      */
03319 /*                                                         */
03320 /* prints statistics about the inputs: mean, RMS, min, max */
03321 /*                                                         */
03322 /* return value (int) = error code: 0 = OK                 */
03323 /*                                  -111 = could not       */
03324 /*                                         allocate memory */
03325 /*                                                         */
03326 /* Author: J.Schwindling   11-Oct-99                       */
03327 /* Modified: J.Schwindling 31-Jan-2000: return value       */
03328 /***********************************************************/   
03329 
03330 int MLP_PrintInputStat()
03331 {
03332         int j;
03333         dbl *mean, *sigma, *minimum, *maximum;
03334 
03335 /* allocate memory */
03336         mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03337         sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03338         minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03339         maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03340 
03341         if(mean == 0 || sigma == 0 || minimum == 0
03342            || maximum == 0) return -111;
03343 
03344         MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
03345                         mean,sigma,minimum,maximum);
03346 
03347         printf("\t mean \t\t RMS \t\t min \t\t max\n");
03348         for(j=0;j<NET.Nneur[0];j++)
03349                 {
03350                 printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
03351                                         mean[j],sigma[j],minimum[j],maximum[j]);
03352                 }
03353         
03354         free(mean);
03355         free(sigma);
03356         free(minimum);
03357         free(maximum);  
03358         printf("\n");
03359         return 0;
03360 }
03361 
03362 
03363 /***********************************************************/
03364 /* NormalizeInputs                                         */
03365 /*                                                         */
03366 /* normalize the inputs: I' = (I - <I>) / RMS(I)           */
03367 /*                                                         */
03368 /* return value (int) = error code: 0 = OK                 */
03369 /*                                  -111 = could not       */
03370 /*                                         allocate memory */
03371 /*                                                         */
03372 /* Author: J.Schwindling   04-May-1999                     */
03373 /* Modified: J.Schwindling 31-Jan-2000: return value       */
03374 /***********************************************************/   
03375 
03376 /* extern "C"Dllexport */int NormalizeInputs()
03377 {
03378         int j, ipat;
03379         dbl *mean, *sigma, *minimum, *maximum;
03380 
03381 /* allocate memory */
03382         mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03383         sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03384         STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03385         STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03386         minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03387         maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03388         
03389         if(mean == 0 || sigma == 0 || minimum == 0
03390            || maximum == 0 || STAT.mean == 0 ||
03391            STAT.sigma == 0) return -111;
03392 
03393         MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
03394                         mean,sigma,minimum,maximum);
03395 
03396         if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
03397         for(j=0;j<NET.Nneur[0];j++)
03398                 {
03399                 if(NET.Debug>=1)
03400                         printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
03401                                         mean[j],sigma[j],minimum[j],maximum[j]);
03402                 
03403 /* store mean and sigma for output function */          
03404                 STAT.mean[j] = mean[j];                 
03405                 STAT.sigma[j] = sigma[j];
03406                                         
03407 /* finally apply the normalization */
03408                 for(ipat=0;ipat<PAT.Npat[0];ipat++)
03409                         {
03410                         PAT.Rin[0][ipat][j] =
03411                         (PAT.Rin[0][ipat][j]-(float) mean[j])/
03412                         (float) sigma[j];
03413                         }       
03414                 for(ipat=0;ipat<PAT.Npat[1];ipat++)
03415                         {
03416                         PAT.Rin[1][ipat][j] =
03417                         (PAT.Rin[1][ipat][j]-(float) mean[j])/
03418                         (float) sigma[j];
03419                         }       
03420                 }
03421         
03422         free(mean);
03423         free(sigma);
03424         free(minimum);
03425         free(maximum);  
03426         if(NET.Debug>=1) printf("\n");
03427         return 0;
03428 }
03429 
03430 
03431 /***********************************************************/
03432 /* AllocNetwork                                            */
03433 /*                                                         */
03434 /* memory allocation for weights, etc                      */
03435 /*                                                         */
03436 /* inputs:      int Nlayer: number of layers               */
03437 /*              int *Neurons: nulber of neurons per layer  */
03438 /*                                                         */
03439 /* return value (int): error = 0: no error                 */
03440 /*                           = -111: could not allocate mem*/ 
03441 /*                                                         */
03442 /* Author: J.Schwindling   28-Sep-99                       */
03443 /***********************************************************/   
03444 
03445 int AllocNetwork(int Nlayer, int *Neurons)
03446 {
03447         int i, j, k, l;
03448         
03449         if(NetMemory != 0) FreeNetwork();
03450         NetMemory = 1;
03451         
03452         NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
03453         if(NET.Nneur == 0) return -111;
03454         
03455         NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
03456         NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
03457         NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
03458         NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
03459         NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
03460         if(NET.T_func == 0 || NET.Deriv1 == 0
03461                 || NET.Inn == 0 || NET.Outn == 0
03462                 || NET.Delta == 0) return -111;
03463         
03464         for(i=0; i<Nlayer; i++)
03465                 {
03466                 NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
03467                 NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03468                 NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03469                 NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03470                 NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03471                 if(NET.T_func[i] == 0 || NET.Deriv1[i] == 0 
03472                         || NET.Inn[i] == 0 || NET.Outn[i] == 0
03473                         || NET.Delta[i] ==0 ) return -111;
03474                 }
03475                 
03476         NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03477         NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
03478         LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03479         LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03480         LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03481         if(NET.Weights == 0 || NET.vWeights == 0 
03482           || LEARN.Odw == 0 || LEARN.ODeDw == 0
03483           || LEARN.DeDw == 0)  return -111;
03484           
03485         for(i=1; i<Nlayer; i++)
03486                 {
03487                 k = Neurons[i-1]+1;
03488                 NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
03489                                                 sizeof(dbl));
03490                 NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03491                 LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03492                 LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03493                 LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03494                 if(NET.Weights[i] == 0 || NET.vWeights[i] == 0 
03495                   || LEARN.Odw[i] == 0 || LEARN.ODeDw[i] == 0
03496                   || LEARN.DeDw[i] == 0)  return -111;
03497                   
03498                 for(j=0; j<Neurons[i]; j++)
03499                         {
03500                         NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
03501                         LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
03502                         LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
03503                         LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
03504                         if(LEARN.Odw[i][j] == 0 
03505                           || LEARN.ODeDw[i][j] == 0
03506                           || LEARN.DeDw[i][j] == 0)  return -111;
03507                         
03508                         for(l=0; l<k; l++)
03509                                 {
03510                                 LEARN.Odw[i][j][l] = 0;
03511                                 LEARN.ODeDw[i][j][l] = 0;
03512                                 }
03513                         }
03514                 }
03515         return 0;       
03516 }
03517 
03518 
03519 /***********************************************************/
03520 /* FreeNetwork                                             */
03521 /*                                                         */
03522 /* frees the memory allocated for the network              */
03523 /*                                                         */
03524 /* Author: J.Schwindling   06-Oct-99                       */
03525 /***********************************************************/   
03526 
03527 void FreeNetwork()
03528 {
03529         int i, j;
03530         for(i=1; i<NET.Nlayer; i++)
03531                 {
03532                 for(j=0; j<NET.Nneur[i]; j++)
03533                         {
03534 /*                      free(NET.Weights[i][j]); */
03535                         free(LEARN.Odw[i][j]);
03536                         free(LEARN.ODeDw[i][j]);
03537                         free(LEARN.DeDw[i][j]);
03538                         }
03539                 free(NET.vWeights[i]);
03540                 free(NET.Weights[i]);
03541                 free(LEARN.Odw[i]);
03542                 free(LEARN.ODeDw[i]);
03543                 free(LEARN.DeDw[i]);
03544                 }
03545         free(NET.Weights);
03546         free(LEARN.Odw);
03547         free(LEARN.ODeDw);
03548         free(LEARN.DeDw);       
03549         
03550         free(NET.Nneur);
03551         
03552         for(i=0; i<NET.Nlayer; i++)
03553                 {
03554                 free(NET.T_func[i]);
03555                 free(NET.Deriv1[i]);
03556                 free(NET.Inn[i]);
03557                 free(NET.Outn[i]);
03558                 free(NET.Delta[i]);
03559                 }
03560         free(NET.T_func);
03561         free(NET.Deriv1);
03562         free(NET.Inn);
03563         free(NET.Outn);
03564         free(NET.Delta);
03565         
03566         NetMemory = 0;  
03567 }
03568 
03569 /***********************************************************/
03570 /* GetNetStructure                                         */
03571 /*                                                         */
03572 /* given a strinng like "3,4,1" returns the network        */
03573 /* structure                                               */
03574 /*                                                         */
03575 /* inputs:      char *s: input string                      */
03576 /*                                                         */
03577 /* outputs:     int *Nlayer: number of layers              */
03578 /*              int *Nneur: number of neurons per layer    */
03579 /*                                                         */
03580 /* return value (int): error = 0: no error                 */
03581 /*                           = -1: s is empty              */ 
03582 /*                           = -2: s is too long           */
03583 /*                           = -3: too many layers         */
03584 /*                                                         */
03585 /* Author: J.Schwindling   04-Oct-99                       */
03586 /***********************************************************/   
03587 
03588 int GetNetStructure(char *s, int *Nlayer, int *Nneur)
03589 {
03590         int i=0;
03591         char tmp[1024];
03592 
03593         if(strlen(s)==0) return -1;
03594         if(strlen(s)>1024) return -2;
03595 
03596         strcpy(tmp,s);
03597         if (strtok(tmp,","))
03598                 {
03599                 i=1;
03600                 while (strtok(NULL,",")) i++;
03601                 }
03602         *Nlayer = i;
03603         if(i > NLMAX) return -3;
03604 
03605         strcpy(tmp,s);
03606         if (*Nlayer>0)
03607                 {
03608                 sscanf(strtok(tmp,","),"%d",&(Nneur[0]));
03609                 for (i=1;i<*Nlayer;i++)
03610                         sscanf(strtok(NULL,","),"%d",&(Nneur[i]));
03611                 }
03612 
03613         return 0;
03614 }
03615 
03616 
03617 /***********************************************************/
03618 /* MLP_SetNet                                              */
03619 /*                                                         */
03620 /* to set the structure of a neural network                */
03621 /* inputs:     int *nl = number of layers                  */
03622 /*             int *nn = number of neurons                 */
03623 /*                                                         */
03624 /* return value (int) = error value:                       */
03625 /*                              0: no error                */ 
03626 /*                              1: N layers > NLMAX        */ 
03627 /*                              2: N layers < 2            */ 
03628 /*                           -111: Error allocating memory */ 
03629 /*                                                         */
03630 /* Author: J.Schwindling   14-Apr-99                       */
03631 /* Modified: J.Schwindling 05-Oct-99 allocate memory       */
03632 /* Modified: J.Schwindling 29-Nov-99 LearnFree, LearnAlloc */
03633 /***********************************************************/   
03634                                                 
03635 int MLP_SetNet(int *nl, int *nn)
03636 {
03637     int il,ierr;
03638 
03639     if((*nl)>NLMAX) return(1);
03640     if((*nl)<2) return(2);
03641     
03642 /*    LearnFree(); */
03643 /* allocate memory */      
03644     ierr = AllocNetwork(*nl,nn);
03645     if(ierr != 0) return ierr;
03646       
03647 /* set number of layers */               
03648     NET.Nlayer = (int) *nl;
03649 
03650 /* set number of neurons */               
03651     for(il=0; il<NET.Nlayer; il++) {
03652        NET.Nneur[il] = nn[il];
03653       }
03654 
03655 /* set transfer functions */      
03656     SetDefaultFuncs();
03657 /*    LearnAlloc(); */
03658       
03659     return(0); 
03660 }
03661 
03662 
03663 /***********************************************************/
03664 /* MLP_MatrixVectorBias                                    */
03665 /*                                                         */
03666 /* computes a Matrix-Vector product                        */
03667 /* r[j] = M[j][0] + Sum_i M[j][i] v[i]                     */ 
03668 /*                                                         */
03669 /* inputs:     dbl *M = matrix (n lines, m+1 columns)      */
03670 /*             dbl *v = vector (dimension m)               */
03671 /*             dbl *r = resulting vector (dimension n)     */
03672 /*             int n                                       */
03673 /*             int m                                       */ 
03674 /*                                                         */
03675 /* Author: J.Schwindling   24-Jan-00                       */
03676 /***********************************************************/
03677    
03678 void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
03679 {
03680         int i,j;
03681         register dbl a1, a2, a3, a4, c, d;
03682         dbl *pM1 = M;
03683         dbl *pM2 = &(M[m+1]);
03684         dbl *pM3 = &(M[2*(m+1)]);
03685         dbl *pM4 = &(M[3*(m+1)]);
03686         dbl *pr = r;
03687         int mp1 = m+1;
03688         
03689         for(i=0; i<n-3; 
03690                 i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1, 
03691                 pr+=4)
03692                 {
03693                 a1 = *pM1;
03694                 a2 = *pM2;
03695                 a3 = *pM3;
03696                 a4 = *pM4;
03697                 pM1++; pM2++; pM3++; pM4++;
03698                 for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2) 
03699                         {
03700                         c = v[j];
03701                         d = v[j+1];
03702                         a1 = a1 + *pM1 * c + *(pM1+1) * d;
03703                         a2 = a2 + *pM2 * c + *(pM2+1) * d; 
03704                         a3 = a3 + *pM3 * c + *(pM3+1) * d;
03705                         a4 = a4 + *pM4 * c + *(pM4+1) * d; 
03706                         }
03707                 for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
03708                         {
03709                         c = v[j];
03710                         a1 = a1 + *pM1 * c;
03711                         a2 = a2 + *pM2 * c; 
03712                         a3 = a3 + *pM3 * c;
03713                         a4 = a4 + *pM4 * c; 
03714                         }       
03715                 *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
03716                 }
03717         for(i=i; i<n; i++)
03718                 {
03719                 pM1 = &(M[i*(m+1)]);
03720                 a1 = *pM1;
03721                 pM1++;
03722                 for(j=0; j<m; j++, pM1++)
03723                         {
03724                         a1 = a1 + *pM1 * v[j];
03725                         }
03726                 r[i] = a1;      
03727                 }       
03728 }
03729 /***********************************************************/
03730 /* MLP_MatrixVector                                        */
03731 /*                                                         */
03732 /* computes a Matrix-Vector product                        */
03733 /* r[j] = Sum_i M[j][i] v[i]                               */ 
03734 /*                                                         */
03735 /* inputs:     dbl *M = matrix (n lines, m+1 columns)      */
03736 /*             dbl *v = vector (dimension m)               */
03737 /*             dbl *r = resulting vector (dimension n)     */
03738 /*             int n                                       */
03739 /*             int m                                       */ 
03740 /*                                                         */
03741 /* Author: J.Schwindling   24-Jan-00                       */
03742 /***********************************************************/
03743    
03744 void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
03745 {
03746         int i,j;
03747         register dbl a1, a2, a3, a4, c, d;
03748         dbl *pM1 = M;
03749         dbl *pM2 = &(M[m]);
03750         dbl *pM3 = &(M[2*m]);
03751         dbl *pM4 = &(M[3*m]);
03752         dbl *pr = r;
03753         int mp1 = m;
03754         
03755         for(i=0; i<n-3; 
03756                 i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1, 
03757                 pr+=4)
03758                 {
03759                 a1 = 0;
03760                 a2 = 0;
03761                 a3 = 0;
03762                 a4 = 0;
03763                 for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2) 
03764                         {
03765                         c = v[j];
03766                         d = v[j+1];
03767                         a1 = a1 + *pM1 * c + *(pM1+1) * d;
03768                         a2 = a2 + *pM2 * c + *(pM2+1) * d; 
03769                         a3 = a3 + *pM3 * c + *(pM3+1) * d;
03770                         a4 = a4 + *pM4 * c + *(pM4+1) * d; 
03771                         }
03772                 for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
03773                         {
03774                         c = v[j];
03775                         a1 = a1 + *pM1 * c;
03776                         a2 = a2 + *pM2 * c; 
03777                         a3 = a3 + *pM3 * c;
03778                         a4 = a4 + *pM4 * c; 
03779                         }       
03780                 *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
03781                 }
03782         for(i=i; i<n; i++)
03783                 {
03784                 pM1 = &(M[i*m]);
03785                 a1 = 0;
03786                 for(j=0; j<m; j++, pM1++)
03787                         {
03788                         a1 = a1 + *pM1 * v[j];
03789                         }
03790                 r[i] = a1;      
03791                 }       
03792 }
03793 
03794 
03795 /***********************************************************/
03796 /* MLP_MM2rows                                             */
03797 /*                                                         */
03798 /* computes a Matrix-Matrix product, with the first matrix */
03799 /* having 2 rows                                           */
03800 /*                                                         */
03801 /* inputs:     dbl *c = resulting matrix (Nj * Nk)         */
03802 /*             dbl *a = first matrix (Ni * Nj)             */
03803 /*             dbl *b = second matrix (Nj * Nk)            */
03804 /*             int Ni                                      */
03805 /*             int Nj                                      */ 
03806 /*             int Nk                                      */ 
03807 /*             int NaOffs                                  */ 
03808 /*             int NbOffs                                  */ 
03809 /*                                                         */
03810 /* Author: J.Schwindling   24-Jan-00                       */
03811 /***********************************************************/
03812    
03813 void MLP_MM2rows(dbl* c, type_pat* a, dbl* b,
03814              int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
03815 {
03816 //int i,j,k;
03817 int j,k;
03818 dbl s00,s01,s10,s11;
03819 type_pat *pa0,*pa1;
03820 dbl *pb0,*pb1,*pc0,*pc1;
03821 
03822   for (j=0; j<=Nj-2; j+=2)
03823    {
03824     pc0 = c+j;
03825     pc1 = c+j+Nj;
03826     s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
03827 
03828     for (k=0,pb0=b+k+NbOffs*j,
03829              pb1=b+k+NbOffs*(j+1),
03830              pa0=a+k,
03831              pa1=a+k+NaOffs;
03832          k<Nk;
03833          k++,pa0++,
03834              pa1++,
03835              pb0++,
03836              pb1++)
03837      {
03838       s00 += (*pa0)*(*pb0);
03839       s01 += (*pa0)*(*pb1);
03840       s10 += (*pa1)*(*pb0);
03841       s11 += (*pa1)*(*pb1);
03842      }
03843     *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
03844    }
03845   for (j=j; j<Nj; j++)
03846    {
03847     pc0 = c+j;
03848     pc1 = c+j+Nj;
03849     s00 = 0.0; s10 = 0.0;
03850     for (k=0,pb0=b+k+NbOffs*j,
03851              pa0=a+k,
03852              pa1=a+k+NaOffs;
03853          k<Nk;
03854          k++,pa0++,
03855              pa1++,
03856              pb0++)
03857      {
03858       s00 += (*pa0)*(*pb0);
03859       s10 += (*pa1)*(*pb0);
03860      }
03861     *pc0 = s00; *pc1 = s10;
03862    }
03863 }
03864 
03865 #ifdef __cplusplus
03866 } // extern "C"
03867 #endif