CMS 3D CMS Logo

/data/doxygen/doxygen-1.7.3/gen/CMSSW_4_2_8/src/PhysicsTools/MVATrainer/plugins/mlp_gen.cc

Go to the documentation of this file.
00001 #include <math.h>
00002 #include <string.h>
00003 #include <stdio.h>
00004 #include <stdlib.h>
00005 
00006 #include "mlp_gen.h"
00007 #include "mlp_sigmoide.h"
00008 
00009 #ifdef __cplusplus
00010 extern "C" {
00011 #endif
00012 
00013 #define NPMAX 100000
00014 #define NLMAX 1000
00015 
00016 struct net_ net_ MLP_HIDDEN;
00017 struct learn_ learn_ MLP_HIDDEN;
00018 struct pat_ pat_ MLP_HIDDEN;
00019 struct divers_ divers_ MLP_HIDDEN;
00020 struct stat_ stat_ MLP_HIDDEN;
00021 
00022 int MessLang = 0;
00023 int OutputWeights = 100;
00024 int ExamplesMemory = 0;
00025 int WeightsMemory = 0;
00026 int PatMemory[2] = {0,0};
00027 int BFGSMemory = 0;
00028 int JacobianMemory = 0;
00029 int LearnMemory = 0;
00030 int NetMemory = 0;
00031 float MLPfitVersion = (float) 1.40;
00032 dbl LastAlpha = 0;
00033 int NLineSearchFail = 0;
00034 
00035 dbl ***dir;
00036 dbl *delta;
00037 dbl **BFGSH;
00038 dbl *Gamma;
00039 dbl **JacobianMatrix;
00040 int *ExamplesIndex;
00041 dbl **Hessian;
00042 
00043 /* The following lines are needed to use the dgels routine from the LAPACK
00044    library in Reslin()                                                      */
00045 
00046 #include "mlp_lapack.h"
00047 /* Subroutine */ int dgels_(char *trans, integer *m, integer *n, integer *
00048         nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb, 
00049         doublereal *work, integer *lwork, integer *info);
00050     
00051 /***********************************************************/
00052 /* MLP_Out                                                 */
00053 /*                                                         */
00054 /* computes the output of the Neural Network               */
00055 /* inputs:     double *rrin = inputs of the MLP            */    
00056 /* outputs:    double *rrout = outputs of the MLP          */    
00057 /*                                                         */
00058 /* Author: J.Schwindling   29-Mar-99                       */
00059 /* Modified: J.Schwindling 16-Jul-99 unrolled loops        */
00060 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid          */
00061 /***********************************************************/
00062    
00063 /* extern "C"Dllexport */void MLP_Out(type_pat *rrin, dbl *rrout)
00064 {
00065 //      static int i, il, in, j, ilm1, m, mp1;  
00066         static int i, il, in, j, m, mp1;  
00067         dbl **deriv1;
00068 
00069 /* input layer */  
00070 
00071         deriv1 = NET.Deriv1;
00072         m = NET.Nneur[0]%4;
00073         if(m==0) goto L10;
00074         for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
00075 L10:    
00076         mp1 = m+1;
00077         for(i=mp1; i<=NET.Nneur[0]; i+=4)
00078                 {
00079                 NET.Outn[0][i-1] = rrin[i-1];
00080                 NET.Outn[0][i] = rrin[i];
00081                 NET.Outn[0][i+1] = rrin[i+1];
00082                 NET.Outn[0][i+2] = rrin[i+2];
00083                 }
00084   
00085 /* hidden and output layers */
00086 
00087         MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
00088                         NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
00089                         
00090         for(il=2; il<NET.Nlayer; il++)
00091                 {
00092                 MLP_vSigmoideDeriv(NET.Outn[il-1],
00093                                 deriv1[il-1],NET.Nneur[il-1]);
00094                 MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
00095                                 NET.Outn[il],NET.Nneur[il],
00096                                 NET.Nneur[il-1]);                       
00097                 }        
00098         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00099                 {                                                                
00100                 deriv1[NET.Nlayer-1][in] = 1;
00101                 }               
00102 }
00103 
00104 
00105 /***********************************************************/
00106 /* MLP_Out_T                                               */
00107 /*                                                         */
00108 /* computes the output of the Neural Network when called   */
00109 /* from MLP_Test                                           */
00110 /* inputs:     double *rrin = inputs of the MLP            */    
00111 /*                                                         */
00112 /* Author: J.Schwindling   23-Jul-1999                     */
00113 /***********************************************************/
00114    
00115 /* extern "C"Dllexport */void MLP_Out_T(type_pat *rrin)
00116 {
00117         static int i, il, in, j, ilm1, m, mp1;  
00118         register dbl a;
00119 
00120 /* input layer */  
00121 
00122         m = NET.Nneur[0]%4;
00123         if(m==0) goto L10;
00124         for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
00125 L10:    
00126         mp1 = m+1;
00127         for(i=mp1; i<=NET.Nneur[0]; i+=4)
00128                 {
00129                 NET.Outn[0][i-1] = rrin[i-1];
00130                 NET.Outn[0][i] = rrin[i];
00131                 NET.Outn[0][i+1] = rrin[i+1];
00132                 NET.Outn[0][i+2] = rrin[i+2];
00133                 }
00134   
00135 /* hidden and output layers */
00136 
00137 /*      for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
00138                 NET.Outn[0][in],NET.Weights[1][0][in]);
00139                 printf("\n");  */
00140         for(il=1; il<NET.Nlayer; il++)
00141                 {
00142                 ilm1 = il-1;
00143                 m = NET.Nneur[ilm1]%4;
00144                 for(in=0; in<NET.Nneur[il]; in++)
00145                         {
00146                         a = NET.Weights[il][in][0];
00147                         if(m==0) goto L20;
00148                         for(j=1;j<=m;j++) a += 
00149                                 NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
00150 L20:                    
00151                         mp1 = m+1;
00152                         for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
00153                                 {
00154                                 a += 
00155                                 NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
00156                                 NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
00157                                 NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
00158                                 NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
00159                                 }
00160                         switch(NET.T_func[il][in])
00161                                 {
00162                                 case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
00163                                         break;                                  
00164                                 case 1: NET.Outn[il][in] = a; 
00165                                         break; 
00166                                 case 0: NET.Outn[il][in] = 0; 
00167                                         break; 
00168                                 }       
00169                         }
00170                 }        
00171 }
00172 
00173 
00174 /***********************************************************/
00175 /* MLP_Out2                                                 */
00176 /*                                                         */
00177 /* computes the output of the Neural Network               */
00178 /* inputs:     double *rrin = inputs of the MLP            */    
00179 /* outputs:    double *rrout = outputs of the MLP          */    
00180 /*                                                         */
00181 /* Author: J.Schwindling   29-Mar-99                       */
00182 /* Modified: J.Schwindling 16-Jul-99 unrolled loops        */
00183 /* Modified: J.Schwindling 20-Jul-99 fast sigmoid          */
00184 /***********************************************************/
00185    
00186 /* extern "C"Dllexport */void MLP_Out2(type_pat *rrin)
00187 {
00188 //      static int il, in, m, mp1, i0, ilm1;  
00189         static int il, in, m, mp1;
00190         register int i;
00191         dbl **rrout, **deriv1;
00192         register dbl *prrout;
00193         type_pat *prrin;
00194         int nhid = NET.Nneur[1];
00195         int nin = NET.Nneur[0];
00196 
00197         rrout = NET.Outn;
00198         deriv1 = NET.Deriv1;
00199         
00200         m = NET.Nneur[0]%4;
00201         if(m==0) goto L10;
00202         if(m==1) 
00203                 {
00204                 rrout[0][0] = rrin[1];
00205                 goto L10;
00206                 }
00207         else if(m==2)
00208                 {
00209                 rrout[0][0] = rrin[1];
00210                 rrout[0][1] = rrin[2];
00211                 goto L10;
00212                 }       
00213         else if(m==3)
00214                 {
00215                 rrout[0][0] = rrin[1];
00216                 rrout[0][1] = rrin[2];
00217                 rrout[0][2] = rrin[3];
00218                 goto L10;
00219                 }       
00220 L10:    
00221         mp1 = m+1;
00222         prrout = &(rrout[0][mp1]);
00223         prrin = &(rrin[mp1+1]);
00224         for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
00225                 {
00226                 *(prrout-1) = *(prrin-1);
00227                 *prrout = *prrin;
00228                 *(prrout+1)= *(prrin+1);
00229                 *(prrout+2) = *(prrin+2);
00230                 }
00231                 
00232 /* input layer */ 
00233 
00234         MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
00235                 NET.Outn[1],nhid,nin);
00236         
00237           
00238 /* hidden and output layers */
00239 
00240         for(il=2; il<NET.Nlayer; il++)
00241                 {
00242                 MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
00243                 MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
00244                         NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
00245                 }       
00246         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00247                 deriv1[NET.Nlayer-1][in] = 1;    
00248 }
00249 
00250 
00251 /***********************************************************/
00252 /* MLP_Test_MM                                             */
00253 /*                                                         */
00254 /* computes the MLP error function using matrix-matrix     */
00255 /* multiplications                                         */
00256 /* inputs:     int ifile = file number: 0=learn, 1=test    */
00257 /*             dbl *tmp = a pointer to an array of size    */
00258 /*                        2 x number of neurons in first   */
00259 /*                        hidden layer                     */   
00260 /*                                                         */
00261 /* return value (dbl) = error value                        */ 
00262 /*                                                         */
00263 /* Author: J.Schwindling   25-Jan-2000                     */
00264 /***********************************************************/
00265    
00266 dbl MLP_Test_MM(int ifile, dbl *tmp)
00267 {
00268         int ipat;
00269         int npat = PAT.Npat[ifile];
00270         int nhid = NET.Nneur[1];
00271         int nin = NET.Nneur[0];
00272         int jpat, j, il, ilm1, m, in, mp1;
00273         dbl err, a, rrans;
00274         dbl *pweights, *ptmp;
00275 
00276         err = 0;
00277         for(ipat=0; ipat<npat-1; ipat+=2)
00278                 {
00279                 MLP_MM2rows(tmp, &(PAT.vRin[ifile][ipat*(nin+1)]), 
00280                         NET.vWeights[1], 2, nhid, nin+1, 
00281                         nin+1, nin+1);
00282         
00283                 switch(NET.T_func[1][0])
00284                         {
00285                         case 2: 
00286                         ptmp = &(tmp[0]);
00287                         MLP_vSigmoide(ptmp,2*nhid);     
00288                         break;
00289                                         
00290                         case 1: 
00291                         break;
00292                                          
00293                         case 0: 
00294                         for(jpat=0; jpat<2; jpat++)
00295                                 {
00296                                 for(j=0; j<nhid; j++)
00297                                         {       
00298                                         tmp[j+jpat*nhid] = 0;
00299                                         }
00300                                 }
00301                         break; 
00302                         }
00303                 
00304                 for(jpat=0; jpat<2; jpat++)
00305                 {
00306                 for(in=0; in<nhid; in++) 
00307                         {
00308                         NET.Outn[1][in] = tmp[jpat*nhid+in];
00309                         }
00310                 for(il=2; il<NET.Nlayer; il++)
00311                         {
00312                         ilm1 = il-1;
00313                         m = NET.Nneur[ilm1]%4;
00314                         for(in=0; in<NET.Nneur[il]; in++)
00315                                 {
00316                                 pweights = &(NET.Weights[il][in][0]);
00317                                 a = *pweights;
00318                                 pweights++;
00319                                 if(m==0) goto L20;
00320                                 for(j=1;j<=m;j++,pweights++) a += 
00321                                 (*pweights)*NET.Outn[ilm1][j-1];
00322 L20:                    
00323                                 mp1 = m+1;
00324                                 for(j=mp1; j<=NET.Nneur[ilm1]; 
00325                                         j+=4, pweights+=4)
00326                                         {
00327                                         a += 
00328                                 *(pweights+3)*NET.Outn[ilm1][j+2]+
00329                                 *(pweights+2)*NET.Outn[ilm1][j+1]+
00330                                 *(pweights+1)*NET.Outn[ilm1][j]+
00331                                 *(pweights  )*NET.Outn[ilm1][j-1];
00332                                         }
00333                                 switch(NET.T_func[il][in])
00334                                 {
00335                                 case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
00336                                         break;                                  
00337                                 case 1: NET.Outn[il][in] = a; 
00338                                         break; 
00339                                 case 0: NET.Outn[il][in] = 0; 
00340                                         break; 
00341                                 }       
00342                                 }
00343                         if(il == NET.Nlayer-1)
00344                         {
00345                         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00346                         {
00347                         rrans = (dbl) PAT.Rans[ifile][ipat+jpat][in];
00348                         err  += (rrans-NET.Outn[NET.Nlayer-1][in])*
00349                                 (rrans-NET.Outn[NET.Nlayer-1][in])*
00350                                 PAT.Pond[ifile][ipat+jpat];
00351                         } 
00352                         }
00353                         }
00354                 }               
00355         }
00356         
00357 /* cas npat impair */   
00358         for(ipat=ipat; ipat<npat; ipat++)
00359                 {
00360                 MLP_MatrixVector(NET.vWeights[1],
00361                                 &(PAT.vRin[ifile][ipat*(nin+1)]),tmp,
00362                                 nhid,nin+1);
00363         
00364                 switch(NET.T_func[1][0])
00365                         {
00366                         case 2: 
00367                         ptmp = &(tmp[0]);
00368                         MLP_vSigmoide(ptmp,2*nhid);     
00369                         break;
00370                                         
00371                         case 1: 
00372                         break;
00373                                          
00374                         case 0: 
00375                         for(j=0; j<nhid; j++)
00376                                 {       
00377                                 tmp[j] = 0;
00378                                 }
00379                         break; 
00380                         }
00381                 
00382                 for(in=0; in<nhid; in++) 
00383                         {
00384                         NET.Outn[1][in] = tmp[in];
00385                         }
00386                 for(il=2; il<NET.Nlayer; il++)
00387                         {
00388                         ilm1 = il-1;
00389                         m = NET.Nneur[ilm1]%4;
00390                         for(in=0; in<NET.Nneur[il]; in++)
00391                                 {
00392                                 pweights = &(NET.Weights[il][in][0]);
00393                                 a = *pweights;
00394                                 pweights++;
00395                                 if(m==0) goto L25;
00396                                 for(j=1;j<=m;j++,pweights++) a += 
00397                                 (*pweights)*NET.Outn[ilm1][j-1];
00398 L25:                    
00399                                 mp1 = m+1;
00400                                 for(j=mp1; j<=NET.Nneur[ilm1]; 
00401                                         j+=4, pweights+=4)
00402                                         {
00403                                         a += 
00404                                 *(pweights+3)*NET.Outn[ilm1][j+2]+
00405                                 *(pweights+2)*NET.Outn[ilm1][j+1]+
00406                                 *(pweights+1)*NET.Outn[ilm1][j]+
00407                                 *(pweights  )*NET.Outn[ilm1][j-1];
00408                                         }
00409                                 switch(NET.T_func[il][in])
00410                                 {
00411                                 case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
00412                                         break;                                  
00413                                 case 1: NET.Outn[il][in] = a; 
00414                                         break; 
00415                                 case 0: NET.Outn[il][in] = 0; 
00416                                         break; 
00417                                 }       
00418                                 }
00419                         if(il == NET.Nlayer-1)
00420                         {
00421                         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00422                         {
00423                         rrans = (dbl) PAT.Rans[ifile][ipat][in];
00424                         err  += (rrans-NET.Outn[NET.Nlayer-1][in])*
00425                                 (rrans-NET.Outn[NET.Nlayer-1][in])*
00426                                 PAT.Pond[ifile][ipat];
00427                         } 
00428                         }
00429                 }               
00430         }
00431         return(err);    
00432 }
00433 
00434 
00435 /***********************************************************/
00436 /* MLP_Test                                                */
00437 /*                                                         */
00438 /* computes the MLP error function                         */
00439 /* inputs:     int ifile = file number: 0=learn, 1=test    */
00440 /*             int regul = 0: no regularisation term       */
00441 /*                         1: regularisation term          */
00442 /*                            (for hybrid learning method) */     
00443 /*                                                         */
00444 /* return value (dbl) = error value                        */ 
00445 /*                                                         */
00446 /* Author: J.Schwindling   31-Mar-99                       */
00447 /***********************************************************/
00448    
00449 /* extern "C"Dllexport */dbl MLP_Test(int ifile,int regul)
00450 {
00451         dbl err, rrans;
00452         int in,jn,ipat,ipati;
00453         
00454         dbl *tmp;
00455         
00456         tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl)); 
00457         if(tmp == 0)    /* not enough memory */
00458         {
00459         printf("not enough memory in MLP_Test\n");                              
00460         err = 0;
00461         for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
00462                 {
00463                 if(ifile==0)
00464                         {
00465                         ipati = ExamplesIndex[ipat];
00466                         }
00467                 else
00468                         {
00469                         ipati = ipat;
00470                         }       
00471                 MLP_Out_T(PAT.Rin[ifile][ipati]);
00472                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00473                         {
00474                         rrans = (dbl) PAT.Rans[ifile][ipati][in];
00475                         err  += (rrans-NET.Outn[NET.Nlayer-1][in])*
00476                                 (rrans-NET.Outn[NET.Nlayer-1][in])*
00477                                 PAT.Pond[ifile][ipati];
00478                         }
00479                 }
00480 
00481         if(regul>=1) 
00482                 {
00483                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00484                         for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
00485                         {
00486                         err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
00487                                 NET.Weights[NET.Nlayer-1][in][jn];
00488                         }
00489                 }
00490         free(tmp);      
00491         return(err);
00492         }
00493         else    /* computation using matrix - matrix multiply */
00494         {
00495         err = MLP_Test_MM(ifile, tmp);
00496         if(regul>=1) 
00497                 {
00498                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
00499                         for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
00500                         {
00501                         err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
00502                                 NET.Weights[NET.Nlayer-1][in][jn];
00503                         }
00504                 }
00505         free(tmp);
00506         return(err);
00507         }               
00508 }
00509 
00510    
00511 /***********************************************************/
00512 /* MLP_Stochastic                                          */
00513 /*                                                         */
00514 /* one epoch of MLP stochastic training                    */
00515 /* (optimized for speed)                                   */
00516 /*                                                         */
00517 /* Author: J.Schwindling   08-Jul-99                       */
00518 /* Modified: J.Schwindling 20-Jul-99 remove unused cases   */
00519 /***********************************************************/
00520    
00521 /* extern "C"Dllexport */dbl MLP_Stochastic()
00522 {
00523         int ipat, ii, inm1;
00524         dbl err = 0;
00525         int il, in1, in, itest2;
00526         dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
00527         dbl eta, eps, epseta;
00528         register dbl a, b, dd, a1, a2, a3, a4;
00529         dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
00530         dbl ***weights;
00531     
00532         if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");             
00533         weights = NET.Weights;
00534 /* shuffle patterns */          
00535         ShuffleExamples(PAT.Npat[0],ExamplesIndex); 
00536                 
00537 /* reduce learning parameter */
00538         if(LEARN.Decay<1) EtaDecay();
00539                 
00540         eta = -LEARN.eta;
00541         eps = LEARN.epsilon;
00542         epseta = eps/eta;
00543         
00544 /* loop on the examples */              
00545         for(ipat=0;ipat<PAT.Npat[0];ipat++)
00546                 {
00547                 ii = ExamplesIndex[ipat];
00548                 pond = PAT.Pond[0][ii];
00549                 
00550                 MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)])); 
00551                         
00552 /* next lines are equivalent to DeDwSum */                          
00553                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00554                         {
00555                         deriv = NET.Deriv1[NET.Nlayer-1][in]; 
00556                         a = (dbl) PAT.Rans[0][ii][in];
00557                         b = NET.Outn[NET.Nlayer-1][in]-a;
00558                         err  += b*b*pond;
00559                         NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
00560                         }
00561                         
00562                 for(il=NET.Nlayer-2; il>0; il--) 
00563                         {
00564                         dd = NET.Delta[il+1][0];
00565                         for(in=0; in<NET.Nneur[il]-3; in+=4) 
00566                                 {
00567                                 deriv1 = NET.Deriv1[il][in]; 
00568                                 deriv2 = NET.Deriv1[il][in+1]; 
00569                                 deriv3 = NET.Deriv1[il][in+2]; 
00570                                 deriv4 = NET.Deriv1[il][in+3]; 
00571                                 itest2 = (NET.Nneur[il+1]==1);
00572                                 a1 = dd*weights[il+1][0][in+1];
00573                                 a2 = dd*weights[il+1][0][in+2];
00574                                 a3 = dd*weights[il+1][0][in+3];
00575                                 a4 = dd*weights[il+1][0][in+4];
00576                                 if(itest2) goto L1;
00577                                 pdelta = &(NET.Delta[il+1][1]);
00578                                 for(in1=1; in1<NET.Nneur[il+1];
00579                                         in1++, pdelta++) 
00580                                         {
00581                                         a1 += *pdelta * weights[il+1][in1][in+1];
00582                                         a2 += *pdelta * weights[il+1][in1][in+2];
00583                                         a3 += *pdelta * weights[il+1][in1][in+3];
00584                                         a4 += *pdelta * weights[il+1][in1][in+4];
00585                                         }
00586 L1:                             NET.Delta[il][in] = a1*deriv1;
00587                                 NET.Delta[il][in+1] = a2*deriv2;
00588                                 NET.Delta[il][in+2] = a3*deriv3;
00589                                 NET.Delta[il][in+3] = a4*deriv4;
00590                                 } 
00591                         for(in=in; in<NET.Nneur[il]; in++) 
00592                                 {
00593                                 deriv = NET.Deriv1[il][in]; 
00594                                 itest2 = (NET.Nneur[il+1]==1);
00595                                 a = dd*weights[il+1][0][in+1];
00596                                 if(itest2) goto L2;
00597                                 pdelta = &(NET.Delta[il+1][1]);
00598                                 for(in1=1; in1<NET.Nneur[il+1];
00599                                         in1++, pdelta++) 
00600                                         {
00601                                         a += *pdelta * 
00602                                         weights[il+1][in1][in+1];
00603                                         }
00604 L2:                             NET.Delta[il][in] = a*deriv;
00605                                 } 
00606                                 
00607                         }                       /* end of loop on layers */
00608 
00609 
00610 /* update the weights */
00611                 if(eps==0)
00612                         {
00613                 for(il=1; il<NET.Nlayer; il++)
00614                         {
00615                         inm1 = NET.Nneur[il-1];
00616                         for(in=0; in<NET.Nneur[il]-3; in+=4)
00617                                 { 
00618                                 a1 = NET.Delta[il][in];
00619                                 a2 = NET.Delta[il][in+1];
00620                                 a3 = NET.Delta[il][in+2];
00621                                 a4 = NET.Delta[il][in+3];
00622                                 pout = &(NET.Outn[il-1][0]);
00623                                 weights[il][in][0] += a1;                               
00624                                 weights[il][in+1][0] += a2;                             
00625                                 weights[il][in+2][0] += a3;                             
00626                                 weights[il][in+3][0] += a4;                             
00627                                 weights[il][in][1] += a1* (*pout);
00628                                 weights[il][in+1][1] += a2* (*pout);
00629                                 weights[il][in+2][1] += a3* (*pout);
00630                                 weights[il][in+3][1] += a4* (*pout);
00631                                 pout++;
00632                                 pw1 = &(weights[il][in][2]);
00633                                 pw2 = &(weights[il][in+1][2]);
00634                                 pw3 = &(weights[il][in+2][2]);
00635                                 pw4 = &(weights[il][in+3][2]);
00636                                 for(in1=2; in1<=inm1; 
00637                                         ++in1, ++pout, ++pw1, ++pw2,
00638                                         ++pw3, ++pw4)
00639                                         {
00640                                         *pw1 += a1 * *pout;
00641                                         *pw2 += a2 * *pout;
00642                                         *pw3 += a3 * *pout;
00643                                         *pw4 += a4 * *pout;
00644                                         }
00645                                 }
00646                         for(in=in; in<NET.Nneur[il]; in++)
00647                                 { 
00648                                 a1 = NET.Delta[il][in];
00649                                 pout = &(NET.Outn[il-1][0]);
00650                                 weights[il][in][0] += a1;                               
00651                                 weights[il][in][1] += a1* (*pout);
00652                                 pout++;
00653                                 pw1 = &(weights[il][in][2]);
00654                                 for(in1=2; in1<=inm1; 
00655                                         ++in1, ++pout, ++pw1)
00656                                         {
00657                                         *pw1 += a1 * *pout;
00658                                         }
00659                                 }
00660                         }
00661                         }
00662                 else
00663                         {                       
00664                 for(il=1; il<NET.Nlayer; il++)
00665                         {
00666                         for(in=0; in<NET.Nneur[il]; in++)
00667                                 { 
00668                                 
00669                                 a = NET.Delta[il][in];
00670                                 LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
00671                                 NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
00672                                 
00673                                 b = a*NET.Outn[il-1][0];
00674                                 LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
00675                                 NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
00676                                 
00677                                 for(in1=2; in1<=NET.Nneur[il-1]; in1++)
00678                                         {
00679                                         b = a*NET.Outn[il-1][in1-1];
00680                                         LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
00681                                         NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
00682                                         }
00683                                 }
00684                         }
00685                         }               
00686                         
00687                 }                       /* end of loop on examples */
00688         return(err);            
00689 }
00690 
00691 
00692 /***********************************************************/
00693 /* MLP_Epoch                                               */
00694 /*                                                         */
00695 /* one epoch of MLP training                               */
00696 /* inputs:     int iepoch = epoch number                   */
00697 /*             dbl *alpmin = optimal alpha from Line Search*/
00698 /*                                                         */
00699 /* return value (dbl) = error value on learning sample     */
00700 /*                      BEFORE changing the weights        */  
00701 /*                                                         */
00702 /* Author: J.Schwindling   31-Mar-99                       */
00703 /* Modified: J.Schwindling 09-Apr-99                       */
00704 /*                re-organize routine                      */
00705 /*           J.Schwindling 13-Apr-99                       */
00706 /*                remove Quickprop algorithm               */
00707 /*                implement Ribiere-Polak conjugate grad.  */  
00708 /***********************************************************/
00709    
00710 /* extern "C"Dllexport */dbl MLP_Epoch(int iepoch, dbl *alpmin, int *Ntest)
00711 {
00712         dbl err, Norm, ONorm, beta, prod, ddir; 
00713 /*      int *index;*/
00714         int Nweights, Nlinear, ipat, ierr;
00715         int nn;
00716         
00717         err = 0;
00718         Norm = 0;
00719         *alpmin = 0.;
00720         
00721         Nweights = NET.Nweights;
00722         Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
00723         
00724         if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");          
00725 /* stochastic minimization */           
00726         if(LEARN.Meth==1) 
00727                 {
00728 
00729                 err = MLP_Stochastic();
00730                         
00731                 }
00732         else
00733                 {
00734                 if(iepoch==1 && LEARN.Meth==7)
00735                         {
00736                         SetLambda(10000);
00737                         MLP_ResLin();
00738                         if(NET.Debug>=2) PrintWeights();
00739                         }
00740                         
00741 /* save previous gradient and reset current one */
00742                 DeDwSaveZero();
00743                 if(LEARN.Meth==16) 
00744                         {                       
00745                         ShuffleExamples(PAT.Npat[0],ExamplesIndex);
00746                         nn = PAT.Npat[0];
00747                         PAT.Npat[0] = nn/10;
00748                         for(ipat=0;ipat<nn;ipat++)
00749                                 {
00750                                 ierr = MLP_Train(&ExamplesIndex[ipat],&err);
00751                                 if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
00752                                 }
00753                         } 
00754                 else
00755                         {
00756                         for(ipat=0;ipat<PAT.Npat[0];ipat++)
00757                                 {
00758                                 ierr = MLP_Train(&ipat,&err);
00759                                 if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
00760                                 }
00761                         }
00762                 DeDwScale(PAT.Npat[0]);
00763                 if(LEARN.Meth==2) StochStep();
00764                 if(LEARN.Meth==3) 
00765                         {
00766                         SteepestDir(); 
00767                         if(LineSearch(alpmin,Ntest,err)==1) StochStep();
00768                         }
00769 
00770 /* Conjugate Gradients Ribiere - Polak */
00771                 if(LEARN.Meth==4) 
00772                         {
00773                         if((iepoch-1)%LEARN.Nreset==0) 
00774                                 {
00775                                 LEARN.Norm = DeDwNorm(); /* for next epoch */
00776                                 SteepestDir();
00777                                 }
00778                         else
00779                                 {
00780                                 ONorm = LEARN.Norm;
00781                                 LEARN.Norm = DeDwNorm();
00782                                 prod = DeDwProd();
00783                                 beta = (LEARN.Norm-prod)/ONorm;
00784                                 CGDir(beta);
00785                                 }
00786                         if(LineSearch(alpmin,Ntest,err)==1) StochStep();
00787                         }
00788 
00789 /* Conjugate Gradients Fletcher - Reeves */
00790                 if(LEARN.Meth==5) 
00791                         {
00792                         if((iepoch-1)%LEARN.Nreset==0) 
00793                                 {
00794                                 LEARN.Norm = DeDwNorm(); /* for next epoch */
00795                                 SteepestDir();
00796                                 }
00797                         else
00798                                 {
00799                                 ONorm = LEARN.Norm;
00800                                 LEARN.Norm = DeDwNorm();
00801                                 beta = LEARN.Norm/ONorm;
00802                                 CGDir(beta);
00803                                 }
00804                         if(LineSearch(alpmin,Ntest,err)==1) StochStep();
00805                         }
00806                 if(LEARN.Meth==6)
00807                         {
00808                         if((iepoch-1)%LEARN.Nreset==0)
00809                                 {
00810                                 SteepestDir();
00811                                 InitBFGSH(Nweights);
00812                                 }
00813                         else
00814                                 {
00815                                 GetGammaDelta();
00816                                 ierr = GetBFGSH(Nweights);
00817                                 if(ierr)
00818                                         {
00819                                         SteepestDir();
00820                                         InitBFGSH(Nweights);
00821                                         }
00822                                 else
00823                                         {       
00824                                         BFGSdir(Nweights);
00825                                         }
00826                                 }
00827                         ddir = DerivDir();
00828                         if(ddir>0)
00829                                 {
00830                                 SteepestDir();
00831                                 InitBFGSH(Nweights);
00832                                 ddir = DerivDir();
00833                                 }
00834                         if(LineSearch(alpmin,Ntest,err)==1) 
00835                                 {
00836                                 InitBFGSH(Nweights);
00837                                 SteepestDir();
00838                                 if(LineSearch(alpmin,Ntest,err)==1) 
00839                                         {
00840                                         printf("Line search fail \n");
00841                                         }
00842                                 }
00843                         }
00844                 if(LEARN.Meth==7)
00845                         {
00846                         if((iepoch-1)%LEARN.Nreset==0)
00847                                 {
00848                                 SteepestDir();
00849                                 InitBFGSH(Nweights-Nlinear);
00850                                 }
00851                         else
00852                                 {
00853                         if(NET.Debug>=5) printf("Before GetGammaDelta \n");
00854                                 GetGammaDelta();
00855                         if(NET.Debug>=5) printf("After GetGammaDelta \n");
00856                                 ierr = GetBFGSH(Nweights-Nlinear);
00857                         if(NET.Debug>=5) printf("After GetBFGSH \n");
00858                                 if(ierr)
00859                                         {
00860                                         SteepestDir();
00861                                         InitBFGSH(Nweights-Nlinear);
00862                                         }
00863                                 else
00864                                         {       
00865                                         BFGSdir(Nweights-Nlinear);
00866                                         }
00867                         if(NET.Debug>=5) printf("After BFGSdir \n");
00868                                 }
00869                         SetLambda(10000);
00870                         if(LineSearchHyb(alpmin,Ntest)==1)
00871                                 {
00872                                 InitBFGSH(Nweights-Nlinear);
00873                                 SteepestDir();
00874                                 if(LineSearchHyb(alpmin,Ntest)==1) 
00875                                         {
00876                                         printf("Line search fail \n");
00877                                         }
00878                                 }
00879                         }
00880                 }
00881         
00882         if(NET.Debug>=5) printf(" End MLP_Epoch\n");            
00883         return(err);
00884 }
00885 
00886 
00887 /***********************************************************/
00888 /* MLP_Train                                               */
00889 /*                                                         */
00890 /* Train Network: compute output, update DeDw              */
00891 /* inputs:     int *ipat = pointer to pattern number       */
00892 /* input/output:    dbl *err = current error               */
00893 /*                                                         */
00894 /* return value (int) = error value: 1 wrong pattern number*/
00895 /*                                   2 *ipat < 0           */   
00896 /*                                                         */
00897 /* Author: J.Schwindling   09-Apr-99                       */
00898 /***********************************************************/
00899    
00900 /* extern "C"Dllexport */int MLP_Train(int *ipat, dbl *err)
00901 {
00902         int in;
00903     
00904 /*      if(*ipat>=PAT.Npat[0]) return(1);*/
00905         if(*ipat<0) return(2);
00906         
00907 /*      MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
00908         MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
00909         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
00910                 {
00911                 *err  += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
00912                         *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
00913                         PAT.Pond[0][*ipat];
00914                 }
00915         DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);       
00916         return(0); 
00917 } 
00918 
00919           
00920 /***********************************************************/
00921 /* StochStepHyb                                            */
00922 /*                                                         */
00923 /* Update the weights according to stochastic minimization */
00924 /* formula (for hybrid methods)                            */
00925 /*                                                         */
00926 /* return value (int) = 0                                  */
00927 /*                                                         */
00928 /* Author: J.Schwindling   09-Apr-99                       */
00929 /***********************************************************/
00930    
00931 /* extern "C"Dllexport */int StochStepHyb()
00932 {
00933         int il, in1, in;
00934         dbl eta, eps;
00935     
00936         eta = LEARN.eta;
00937         eps = LEARN.epsilon;
00938     for(il=NET.Nlayer-2; il>0; il--) {
00939 
00940         for(in=0; in<NET.Nneur[il]; in++) {
00941 
00942                 /* compute delta weights */
00943                 for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
00944                         LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
00945                          + eps * LEARN.Odw[il][in][in1];
00946                 }
00947                 
00948                 /* update weights */
00949                 for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
00950                         NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
00951                 }
00952         }       
00953         }       
00954     MLP_ResLin();
00955     return(0); 
00956 } 
00957           
00958 
00959 /***********************************************************/
00960 /* StochStep                                               */
00961 /*                                                         */
00962 /* Update the weights according to stochastic minimization */
00963 /*                                            formula      */
00964 /*                                                         */
00965 /* return value (int) = 0                                  */
00966 /*                                                         */
00967 /* Author: J.Schwindling   09-Apr-99                       */
00968 /***********************************************************/
00969    
00970 /* extern "C"Dllexport */int StochStep()
00971 {
00972         int il, in1, in;
00973         dbl eta, eps, epseta;
00974     
00975                 eta = -LEARN.eta;
00976                 eps = LEARN.epsilon;
00977                 epseta = eps/eta;
00978     for(il=NET.Nlayer-1; il>0; il--) {
00979                 for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
00980 
00981                 /* compute delta weights */
00982         for(in=0; in<NET.Nneur[il]; in++) {
00983                         LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
00984                          + epseta * LEARN.Odw[il][in][in1]);
00985                         NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
00986                 }
00987                 
00988         }       
00989         }       
00990 
00991     return(0); 
00992 }                 
00993 
00994 
00995 /***********************************************************/
00996 /* DeDwNorm                                                */
00997 /*                                                         */
00998 /* computes the norm of the gradient                       */
00999 /*                                                         */
01000 /* Author: J.Schwindling   31-Mar-99                       */
01001 /***********************************************************/
01002    
01003 /* extern "C"Dllexport */dbl DeDwNorm()
01004 {
01005         int il,in,jn;
01006         dbl dd=0;
01007         for(il=1; il<NET.Nlayer; il++)
01008                 for(in=0; in<NET.Nneur[il]; in++)
01009                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01010                                 dd += LEARN.DeDw[il][in][jn]*
01011                                         LEARN.DeDw[il][in][jn];
01012         return(dd);
01013 }
01014 
01015 
01016 /***********************************************************/
01017 /* DeDwProd                                                */
01018 /*                                                         */
01019 /* scalar product between new gradient and previous one    */
01020 /* (used in Polak-Ribiere Conjugate Gradient method        */
01021 /*                                                         */
01022 /* Author: J.Schwindling   26-Mar-99                       */
01023 /***********************************************************/
01024    
01025 /* extern "C"Dllexport */dbl DeDwProd()
01026 {
01027         int il,in,jn;
01028         dbl dd=0;
01029         for(il=1; il<NET.Nlayer; il++)
01030                 for(in=0; in<NET.Nneur[il]; in++)
01031                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01032                                 dd += LEARN.DeDw[il][in][jn]*
01033                                         LEARN.ODeDw[il][in][jn];
01034         return(dd);
01035 }
01036 
01037 
01038 /***********************************************************/
01039 /* DeDwZero                                                */
01040 /*                                                         */
01041 /* resets to 0 the gradient (should be done before DeDwSum)*/
01042 /*                                                         */
01043 /* Author: J.Schwindling   31-Mar-99                       */
01044 /***********************************************************/   
01045 
01046 /* extern "C"Dllexport */void DeDwZero()
01047 {
01048         int il, in, jn;
01049         for(il=1; il<NET.Nlayer; il++)
01050                 for(in=0; in<NET.Nneur[il]; in++)
01051                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01052                                 LEARN.DeDw[il][in][jn] = 0;
01053 }
01054         
01055 
01056 /***********************************************************/
01057 /* DeDwScale                                               */
01058 /*                                                         */
01059 /* divides the gradient by the number of examples          */
01060 /* inputs:     int Nexamples = number of examples          */
01061 /*                                                         */
01062 /* Author: J.Schwindling   31-Mar-99                       */
01063 /***********************************************************/   
01064 
01065 /* extern "C"Dllexport */void DeDwScale(int Nexamples)
01066 {
01067         int il, in, jn;
01068         for(il=1; il<NET.Nlayer; il++)
01069                 for(in=0; in<NET.Nneur[il]; in++)
01070                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01071                                 LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
01072 }       
01073 
01074 
01075 /***********************************************************/
01076 /* DeDwSave                                                */
01077 /*                                                         */
01078 /* copies the gradient DeDw into ODeDw                     */
01079 /*                                                         */
01080 /* Author: J.Schwindling   31-Mar-99                       */
01081 /***********************************************************/   
01082 
01083 /* extern "C"Dllexport */void DeDwSave()
01084 {
01085         int il, in, jn;
01086         for(il=1; il<NET.Nlayer; il++)
01087                 for(in=0; in<NET.Nneur[il]; in++)
01088                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01089                                 LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
01090 }       
01091 
01092 
01093 /***********************************************************/
01094 /* DeDwSaveZero                                            */
01095 /*                                                         */
01096 /* copies the gradient DeDw into ODeDw                     */
01097 /* resets DeDw to 0                                        */
01098 /*                                                         */
01099 /* Author: J.Schwindling   23-Apr-99                       */
01100 /***********************************************************/   
01101 
01102 /* extern "C"Dllexport */void DeDwSaveZero()
01103 {
01104         int il, in, jn;
01105         for(il=1; il<NET.Nlayer; il++)
01106                 for(in=0; in<NET.Nneur[il]; in++)
01107                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01108                                 {
01109                                 LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
01110                                 LEARN.DeDw[il][in][jn] = 0;
01111                                 }
01112 }       
01113 
01114 
01115 /***********************************************************/
01116 /* DeDwSum                                                 */
01117 /*                                                         */
01118 /* adds to the total gradient the gradient in the current  */
01119 /* example                                                 */
01120 /* inputs:     int Nexamples = number of examples          */
01121 /*                                                         */
01122 /* Author: J.Schwindling   31-Mar-99                       */
01123 /* Modified: B.Mansoulie   23-Apr-99                       */
01124 /*           faster and still correct way to compute the   */
01125 /*           sigmoid derivative                            */   
01126 /***********************************************************/   
01127 
01128 /* extern "C"Dllexport */int DeDwSum(type_pat *ans, dbl *out, int ipat)
01129 {
01130         int il, in1, in, ii;
01131 /*      dbl err[NMAX][4]; */
01132         dbl deriv;
01133         dbl *pout, *pdedw, *pdelta;
01134         register dbl a, b;
01135 /*      char buf[50];*/
01136 
01137 /* output layer */ 
01138         b =  (dbl) PAT.Pond[0][ipat];  
01139         for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++) 
01140         {                  
01141                 deriv = NET.Deriv1[NET.Nlayer-1][in];
01142                 NET.Delta[NET.Nlayer-1][in] = 
01143                         (out[in] - (dbl) ans[in])*deriv*b;
01144         }
01145         
01146     for(il=NET.Nlayer-2; il>0; il--) 
01147         {
01148 
01149         for(in=0; in<NET.Nneur[il]; in++) 
01150         {                          
01151                 deriv = NET.Deriv1[il][in];
01152                 a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
01153                 pdelta = &(NET.Delta[il+1][1]);
01154                 for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++) 
01155                 {
01156                         a += *pdelta * NET.Weights[il+1][in1][in+1];
01157                 }
01158                 NET.Delta[il][in] = a * deriv; 
01159         }
01160         }
01161                 
01162         for(il=1; il<NET.Nlayer; il++) 
01163                 {
01164                 ii = NET.Nneur[il-1];
01165                 for(in=0; in<NET.Nneur[il]; in++) 
01166                         {
01167                         a = NET.Delta[il][in];  
01168                         LEARN.DeDw[il][in][0] += a;
01169                         LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
01170                         pout = &(NET.Outn[il-1][1]);
01171                         pdedw = &(LEARN.DeDw[il][in][2]);
01172                         for(in1=1; in1<ii; ++in1, ++pout, ++pdedw) 
01173                                 {
01174                                 (*pdedw) += a * (*pout);
01175                                 }
01176                         }
01177                 }               
01178 
01179     return(0); 
01180 }
01181 
01182 
01183 /***********************************************************/
01184 /* SetTransFunc                                            */
01185 /*                                                         */
01186 /* to set the transfert function of a neuron               */
01187 /* inputs:     int layer = layer number (1 -> Nlayer)      */
01188 /*             int neuron = neuron number (1 -> Nneur)     */
01189 /*             int func = transfert function:              */
01190 /*                              0: y=0                     */   
01191 /*                              1: y=x                     */   
01192 /*                              2: y=1/(1+exp(-x))         */   
01193 /*                              3: y=tanh(x)               */   
01194 /*                              4: y=delta*x+1/(1+exp(-x)) */   
01195 /*                              5: y=exp(-x**2)            */   
01196 /*                                                         */
01197 /* return value (int) = error value:                       */
01198 /*                              0: no error                */ 
01199 /*                              1: layer > 4               */ 
01200 /*                              2: neuron > NMAX           */ 
01201 /*                                                         */
01202 /* Author: J.Schwindling   02-Apr-99                       */
01203 /***********************************************************/   
01204 
01205 /* extern "C"Dllexport */int SetTransFunc(int layer, int neuron, 
01206                                           int func)
01207 {    
01208     if(layer>NLMAX) return(1);
01209 /*    if(neuron>NMAX) return(2);*/
01210          
01211     NET.T_func[layer-1][neuron-1] = func;
01212 
01213     return(0); 
01214 }                 
01215 
01216 
01217 /***********************************************************/
01218 /* SetDefaultFuncs                                         */
01219 /*                                                         */
01220 /* - sets the default transfer functions to sigmoid for    */
01221 /* hidden layers and linear for output layer               */
01222 /* - sets temperatures to 1                                */
01223 /*                                                         */
01224 /*                                                         */
01225 /* Author: J.Schwindling   08-Apr-99                       */
01226 /***********************************************************/   
01227 
01228 void SetDefaultFuncs()
01229 {
01230     int il,in;
01231     for(il=0; il<NET.Nlayer; il++) {
01232        for(in=0; in<NET.Nneur[il]; in++) {
01233           NET.T_func[il][in] = 2; 
01234           if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
01235          }
01236       }
01237 
01238 }
01239 
01240 
01241 /***********************************************************/
01242 /* SteepestDir                                             */
01243 /*                                                         */
01244 /* sets the search direction to steepest descent           */
01245 /*                                                         */
01246 /* Author: J.Schwindling   02-Apr-99                       */
01247 /***********************************************************/   
01248 
01249 /* extern "C"Dllexport */void SteepestDir()
01250 {
01251         int il,in,jn;
01252         for(il=1; il<NET.Nlayer; il++)
01253                 for(in=0; in<NET.Nneur[il]; in++)
01254                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01255                                 dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
01256 }
01257 
01258 
01259 /***********************************************************/
01260 /* CGDir                                                   */
01261 /*                                                         */
01262 /* sets the search direction to conjugate gradient dir     */
01263 /* inputs:     dbl beta : d(t+1) = -g(t+1) + beta d(t)     */
01264 /*                        beta should be:                  */
01265 /*    ||g(t+1)||^2 / ||g(t)||^2 (Fletcher-Reeves)          */
01266 /*    g(t+1) (g(t+1)-g(t)) / ||g(t)||^2 (Polak-Ribiere)    */
01267 /*                                                         */
01268 /* Author: J.Schwindling   02-Apr-99                       */
01269 /***********************************************************/   
01270 
01271 /* extern "C"Dllexport */void CGDir(dbl beta)
01272 {
01273         int il,in,jn;
01274         for(il=1; il<NET.Nlayer; il++)
01275                 for(in=0; in<NET.Nneur[il]; in++)
01276                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01277                                 {
01278                                 dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
01279                                         beta*dir[il][in][jn];
01280                                 }
01281 }
01282 
01283 
01284 /***********************************************************/
01285 /* DerivDir                                                */
01286 /*                                                         */
01287 /* scalar product between gradient and direction           */
01288 /*     = derivative along direction                        */
01289 /*                                                         */
01290 /* Author: J.Schwindling   01-Jul-99                       */
01291 /***********************************************************/   
01292 
01293 /* extern "C"Dllexport */dbl DerivDir()
01294 {
01295         int il,in,jn;
01296         dbl ddir = 0;
01297         
01298         for(il=1; il<NET.Nlayer; il++)
01299                 for(in=0; in<NET.Nneur[il]; in++)
01300                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01301                                 {
01302                                 ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
01303                                 }
01304         return(ddir);                   
01305 }
01306 
01307 
01308 /***********************************************************/
01309 /* GetGammaDelta                                           */
01310 /*                                                         */
01311 /* sets the vectors Gamma (=g(t+1)-g(t))                   */
01312 /*              and delta (=w(t+1)-w(t))                   */
01313 /* (for BFGS and Hybrid learning methods)                  */
01314 /*                                                         */
01315 /* Author: J.Schwindling   02-Apr-99                       */
01316 /***********************************************************/   
01317 
01318 /* extern "C"Dllexport */void GetGammaDelta()
01319 {
01320         int i=0;
01321         int il,in,jn;
01322         for(il=1; il<NET.Nlayer; il++)
01323                 for(in=0; in<NET.Nneur[il]; in++)
01324                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01325                                 {
01326                                 Gamma[i] = LEARN.DeDw[il][in][jn]-
01327                                         LEARN.ODeDw[il][in][jn];
01328                                 delta[i] = LEARN.Odw[il][in][jn];
01329                                 i++;
01330                                 }
01331 }
01332 
01333 
01334 /***********************************************************/
01335 /* BFGSDir                                                 */
01336 /*                                                         */
01337 /* sets the search direction to BFGS direction from the    */
01338 /*                              BFGS matrix                */
01339 /*                                                         */
01340 /* inputs:     int Nweights = number of weights            */
01341 /*                                                         */
01342 /* Author: J.Schwindling   02-Apr-99                       */
01343 /***********************************************************/   
01344 
01345 /* extern "C"Dllexport */void BFGSdir(int Nweights)
01346 {
01347         dbl *g, *s;
01348         int kk=0;
01349         int il,i,j,in,jn;
01350         
01351         g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
01352         s = (dbl*) malloc(Nweights*sizeof(dbl));
01353         
01354         for(il=1; kk<Nweights; il++)
01355                 for(in=0; in<NET.Nneur[il]; in++)
01356                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01357                                 {
01358                                 g[kk] = LEARN.DeDw[il][in][jn];
01359                                 kk++;
01360                                 }
01361         for(i=0; i<Nweights; i++)
01362                 {
01363                 s[i] = 0;
01364                 for(j=0; j<Nweights; j++)
01365                         {
01366                         s[i] += BFGSH[i][j] * g[j];
01367                         }
01368                 }
01369         
01370         kk = 0;
01371         for(il=1; kk<Nweights; il++)
01372                 for(in=0; in<NET.Nneur[il]; in++)
01373                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01374                                 {
01375                                 dir[il][in][jn] = -s[kk];
01376                                 kk++;
01377                                 }
01378         free(g);
01379         free(s);
01380 }
01381 
01382 
01383 /***********************************************************/
01384 /* InitBFGS                                                */
01385 /*                                                         */
01386 /* initializes BFGS matrix to identity                     */
01387 /*                                                         */
01388 /* inputs:     int Nweights = number of weights            */
01389 /*                                                         */
01390 /* Author: J.Schwindling   02-Apr-99                       */
01391 /***********************************************************/   
01392 
01393 /* extern "C"Dllexport */void InitBFGSH(int Nweights)
01394 {
01395         int i,j;
01396         for(i=0; i<Nweights; i++)
01397                 for(j=0; j<Nweights; j++)
01398                         {
01399                         BFGSH[i][j] = 0;
01400                         if(i==j) BFGSH[i][j] = 1;
01401                         }
01402 }
01403 
01404 
01405 /***********************************************************/
01406 /* GetBFGSH                                                */
01407 /*                                                         */
01408 /* sets the BFGS matrix                                    */
01409 /*                                                         */
01410 /* inputs:     int Nweights = number of weights            */
01411 /*                                                         */
01412 /* return value (int) = 0 if no problem                    */
01413 /*                      1 is deltaTgamma = 0 -> switch to  */
01414 /*                              steepest dir               */
01415 /*                                                         */
01416 /* Author: J.Schwindling   02-Apr-99                       */
01417 /* Modified: J.Schwindling 04-May-99                       */
01418 /*           computations as Nw^2 , matrices removed       */
01419 /* Modified: J.Schwindling 11-Jun-99                       */
01420 /*           return value                                  */
01421 /***********************************************************/   
01422 
01423 /* extern "C"Dllexport */int GetBFGSH(int Nweights)
01424 {
01425         typedef double dble;
01426         dble deltaTgamma=0;
01427         dble factor=0; 
01428         dble *Hgamma;
01429         dble *tmp;
01430         register dble a, b;
01431         int i,j;
01432         
01433         Hgamma = (dble *) malloc(Nweights*sizeof(dble));
01434         tmp = (dble *) malloc(Nweights*sizeof(dble));
01435         
01436         for(i=0; i<Nweights; i++)
01437                 {
01438                 deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
01439                 a = 0;
01440                 b = 0;
01441                 for(j=0; j<Nweights; j++)
01442                         {
01443                         a += (dble) BFGSH[i][j] * (dble) Gamma[j];
01444                         b += (dble) Gamma[j] * (dble) BFGSH[j][i];
01445                         }
01446                 Hgamma[i] = a;
01447                 tmp[i] = b;     
01448                 factor += (dble) Gamma[i]*Hgamma[i];
01449                 }
01450         if(deltaTgamma == 0) return 1;
01451         a = 1 / deltaTgamma;    
01452         factor = 1 + factor*a;
01453         
01454         for(i=0; i<Nweights; i++)
01455                 {
01456                 b = (dble) delta[i];
01457                 for(j=0; j<Nweights; j++)
01458                         BFGSH[i][j] += (dbl) (factor*b* (dble) 
01459                         delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;        
01460                 }       
01461         free(Hgamma);
01462         free(tmp);
01463         return 0;
01464 }
01465 
01466 
01467 /***********************************************************/
01468 /* LineSearch                                              */
01469 /*                                                         */
01470 /* search along the line defined by dir                    */
01471 /*                                                         */
01472 /* outputs:     dbl *alpmin = optimal step length          */
01473 /*                                                         */
01474 /* Author: B.Mansoulie     01-Jul-98                       */
01475 /***********************************************************/   
01476 
01477 /* extern "C"Dllexport */
01478 int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
01479 {
01480         dbl ***w0;
01481         dbl alpha1, alpha2, alpha3;
01482         dbl err1, err2, err3;
01483         dbl tau;
01484         int icount, il, in, jn;
01485         
01486         tau=LEARN.Tau;
01487 
01488 /* store weights before line search */  
01489 
01490         *Ntest = 0;
01491         w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
01492         for(il=1; il<NET.Nlayer; il++)
01493                 {
01494                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01495                 for(in=0; in<NET.Nneur[il]; in++)
01496                         {
01497                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01498                                 sizeof(dbl));
01499                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01500                                 {
01501                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01502                                 }
01503                         }
01504                 }
01505         
01506 /* compute error(w0) */
01507 
01508 /*      err1 = MLP_Test(0,0); 
01509         (*Ntest) ++;*/
01510         err1 = Err0;
01511         
01512         if(NET.Debug>=4) printf("err depart= %f\n",err1);   
01513         
01514         *alpmin = 0;
01515         alpha1 = 0;
01516 /*      alpha2 = 0.05;
01517         if(LastAlpha != 0) alpha2 = LastAlpha;*/
01518         alpha2 = LastAlpha;
01519         if(alpha2 < 0.01) alpha2 = 0.01;
01520         if(alpha2 > 2.0) alpha2 = 2.0;
01521         MLP_Line(w0,alpha2);
01522         err2 = MLP_Test(0,0);
01523         (*Ntest) ++;
01524         if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);   
01525         
01526         alpha3 = alpha2;
01527         err3 = err2;
01528         
01529 /* try to find a triplet (alpha1, alpha2, alpha3) such that 
01530    Error(alpha1)>Error(alpha2)<Error(alpha3)                 */
01531    
01532         if(err1>err2) 
01533                 {
01534                 for(icount=1;icount<=100;icount++)
01535                         {
01536                         alpha3 = alpha3*tau;
01537                         MLP_Line(w0,alpha3);
01538                         err3 =MLP_Test(0,0);
01539         if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);   
01540                         (*Ntest) ++;
01541                         if(err3>err2) break;
01542                         alpha1 = alpha2;
01543                         err1 = err2;
01544                         alpha2 = alpha3;
01545                         err2 = err3;
01546                         }
01547                 if(icount>=100)                 /* line search fails */
01548                         {
01549                         MLP_Line(w0,0);         /* reset weights */
01550                         free(w0);
01551                         return(1);
01552                         }
01553                 }
01554         else
01555                 {
01556                 for(icount=1;icount<=100;icount++)
01557                         {
01558                         alpha2 = alpha2/tau;
01559                         MLP_Line(w0,alpha2);
01560                         err2 = MLP_Test(0,0);
01561         if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);   
01562                         (*Ntest) ++;
01563                         if(err1>err2) break;
01564                         alpha3 = alpha2;
01565                         err3 = err2;
01566                         }
01567                 if(icount>=100)                 /* line search fails */
01568                         {
01569                         MLP_Line(w0,0);         /* reset weights */
01570                         free(w0);
01571                         LastAlpha = 0.05;       /* try to be safe */
01572                         return(1);
01573                         }
01574                 }
01575 
01576 /* find bottom of parabola */
01577         
01578         *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
01579                 -(err2-err1)/(alpha2-alpha1)));
01580         if(*alpmin>10000) *alpmin=10000;
01581 
01582 /* set the weights */
01583         MLP_Line(w0,*alpmin);
01584         LastAlpha = *alpmin;
01585         
01586 /* store weight changes */
01587         for(il=1; il<NET.Nlayer; il++)
01588                 for(in=0; in<NET.Nneur[il]; in++)
01589                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01590                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01591                                 - w0[il][in][jn];
01592 
01593         for(il=1; il<NET.Nlayer; il++)
01594                 for(in=0; in<NET.Nneur[il]; in++)
01595                         free(w0[il][in]);                       
01596         for(il=1; il<NET.Nlayer; il++)
01597                 free(w0[il]);   
01598         free(w0);
01599         
01600         return(0);
01601 }
01602 
01603 
01604 /***********************************************************/
01605 /* DecreaseSearch                                          */
01606 /*                                                         */
01607 /* search along the line defined by dir for a point where  */
01608 /* error is decreased (faster than full line search)       */
01609 /*                                                         */
01610 /* outputs:     dbl *alpmin = step length                  */
01611 /*                                                         */
01612 /* Author: J.Schwindling     11-May-99                     */
01613 /***********************************************************/   
01614 
01615 /* extern "C"Dllexport */
01616 int DecreaseSearch(dbl *alpmin, int *Ntest, dbl Err0)
01617 {
01618         dbl ***w0;
01619         dbl alpha1, alpha2, alpha3;
01620         dbl err1, err2, err3;
01621         dbl tau;
01622         int icount, il, in, jn;
01623         
01624         tau=LEARN.Tau;
01625 
01626 /* store weights before line search */  
01627 
01628         *Ntest = 0;
01629         w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
01630         for(il=1; il<NET.Nlayer; il++)
01631                 {
01632                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01633                 for(in=0; in<NET.Nneur[il]; in++)
01634                         {
01635                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01636                                 sizeof(dbl));
01637                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01638                                 {
01639                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01640                                 }
01641                         }
01642                 }
01643         
01644 /* compute error(w0) */
01645 
01646 /*      err1 = MLP_Test(0,0); 
01647         (*Ntest) ++;*/
01648         err1 = Err0;
01649         
01650         if(NET.Debug>=4) printf("err depart= %f\n",err1);   
01651         
01652         *alpmin = 0;
01653         alpha1 = 0;
01654         alpha2 = 0.05;
01655         MLP_Line(w0,alpha2);
01656         err2 = MLP_Test(0,0);
01657         (*Ntest) ++;
01658         
01659         if(err2<err1) 
01660                 {
01661                 *alpmin = alpha2;
01662                 }
01663         else
01664                 {       
01665                 alpha3 = alpha2;
01666                 err3 = err2;
01667         
01668    
01669                 for(icount=1;icount<=100;icount++)
01670                         {
01671                         alpha2 = alpha2/tau;
01672                         MLP_Line(w0,alpha2);
01673                         err2 = MLP_Test(0,0);
01674                         (*Ntest) ++;
01675                         if(err1>err2) break;
01676                         alpha3 = alpha2;
01677                         err3 = err2;
01678                         }
01679                 if(icount>=100)                 /* line search fails */
01680                         {
01681                         MLP_Line(w0,0);         /* reset weights */
01682                         free(w0);
01683                         return(1);
01684                         }
01685                 *alpmin = alpha2;       
01686                 }
01687 
01688 /* set the weights */
01689         MLP_Line(w0,*alpmin);
01690         
01691 /* store weight changes */
01692         for(il=1; il<NET.Nlayer; il++)
01693                 for(in=0; in<NET.Nneur[il]; in++)
01694                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01695                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01696                                 - w0[il][in][jn];
01697 
01698         for(il=1; il<NET.Nlayer; il++)
01699                 for(in=0; in<NET.Nneur[il]; in++)
01700                         free(w0[il][in]);                       
01701         for(il=1; il<NET.Nlayer; il++)
01702                 free(w0[il]);   
01703         free(w0);
01704         
01705         return(0);
01706 }
01707 
01708 
01709 /* extern "C"Dllexport */int FixedStep(dbl alpha)
01710 {
01711         dbl ***w0;
01712         int il, in, jn;
01713         
01714         w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
01715         for(il=1; il<NET.Nlayer; il++)
01716                 {
01717                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01718                 for(in=0; in<NET.Nneur[il]; in++)
01719                         {
01720                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01721                                 sizeof(dbl));
01722                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01723                                 {
01724                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01725                                 }
01726                         }
01727                 }
01728         
01729 
01730 /* set the weights */
01731         MLP_Line(w0,alpha);
01732         
01733 /* store weight changes */
01734         for(il=1; il<NET.Nlayer; il++)
01735                 for(in=0; in<NET.Nneur[il]; in++)
01736                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01737                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01738                                 - w0[il][in][jn];
01739 
01740         for(il=1; il<NET.Nlayer; il++)
01741                 for(in=0; in<NET.Nneur[il]; in++)
01742                         free(w0[il][in]);                       
01743         for(il=1; il<NET.Nlayer; il++)
01744                 free(w0[il]);   
01745         free(w0);
01746         
01747         return(0);
01748 }
01749 
01750 /***********************************************************/
01751 /* MLP_Line                                                */
01752 /*                                                         */
01753 /* sets the weights to a point along a line                */
01754 /*                                                         */
01755 /* inputs:     dbl ***w0 = initial point                   */
01756 /*             dbl alpha = step length                     */ 
01757 /*                                                         */
01758 /* Author: B.Mansoulie     01-Jul-98                       */
01759 /***********************************************************/   
01760 
01761 /* extern "C"Dllexport */
01762 void MLP_Line(dbl ***w0, dbl alpha)
01763 {
01764         register int il,in,jn;
01765         
01766         for(il=1; il<NET.Nlayer; il++)
01767                 for(in=0; in<NET.Nneur[il]; in++)
01768                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01769                                 NET.Weights[il][in][jn] = w0[il][in][jn]+
01770                                 alpha*dir[il][in][jn];
01771                                 
01772 }
01773 
01774 
01775 /***********************************************************/
01776 /* LineSearchHyb                                           */
01777 /*                                                         */
01778 /* search along the line defined by dir                    */
01779 /*                                                         */
01780 /* outputs:     dbl *alpmin = optimal step length          */
01781 /*                                                         */
01782 /* Author: B.Mansoulie     01-Jul-98                       */
01783 /***********************************************************/   
01784 
01785 /* extern "C"Dllexport */
01786 int LineSearchHyb(dbl *alpmin, int *Ntest)
01787 {
01788         dbl ***w0;
01789         dbl alpha1, alpha2, alpha3;
01790         dbl err1, err2, err3;
01791         dbl tau;
01792         int icount, il, in, jn;
01793 
01794 /*      char buf [50];
01795   sprintf (buf,"entree linesearchhyb\n");
01796   MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
01797         
01798         if(NET.Debug>=4){
01799                 printf(" entry LineSearchHyb \n");
01800                 }
01801         tau=LEARN.Tau;
01802 
01803 /* store weights before line search */  
01804 
01805         *Ntest = 0;
01806         w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
01807         for(il=1; il<NET.Nlayer-1; il++)
01808                 {
01809                 w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
01810                 for(in=0; in<NET.Nneur[il]; in++)
01811                         {
01812                         w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
01813                                 sizeof(dbl));
01814                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01815                                 {
01816                                 w0[il][in][jn] = NET.Weights[il][in][jn];
01817                                 }
01818                         }
01819                 }
01820         
01821 /* compute error(w0) */
01822         err1 = MLP_Test(0,1);
01823         (*Ntest) ++; 
01824         if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);   
01825         
01826         *alpmin = 0;
01827         alpha1 = 0;
01828 /*      alpha2 = 0.05;
01829         if(LastAlpha != 0) alpha2 = LastAlpha;*/
01830         alpha2 = LastAlpha;
01831         if(alpha2 < 0.01) alpha2 = 0.01;
01832         if(alpha2 > 2.0) alpha2 = 2.0;
01833         MLP_LineHyb(w0,alpha2);
01834         err2 = MLP_Test(0,1);
01835         (*Ntest) ++; 
01836         
01837         alpha3 = alpha2;
01838         err3 = err2;
01839         
01840 /* try to find a triplet (alpha1, alpha2, alpha3) such that 
01841    Error(alpha1)>Error(alpha2)<Error(alpha3)                 */
01842    
01843         if(err1>err2) 
01844                 {
01845                 for(icount=1;icount<=100;icount++)
01846                         {
01847                         alpha3 = alpha3*tau;
01848                         MLP_LineHyb(w0,alpha3);
01849                         err3 = MLP_Test(0,1);
01850                         (*Ntest) ++; 
01851                         if(err3>err2) break;
01852                         alpha1 = alpha2;
01853                         err1 = err2;
01854                         alpha2 = alpha3;
01855                         err2 = err3;
01856                         }
01857                 if(icount>=100)                 /* line search fails */
01858                         {
01859                         MLP_LineHyb(w0,0);      /* reset weights */
01860                         free(w0);
01861                         return(1);
01862                         }
01863                 }
01864         else
01865                 {
01866                 for(icount=1;icount<=100;icount++)
01867                         {
01868                         alpha2 = alpha2/tau;
01869                         MLP_LineHyb(w0,alpha2);
01870                         err2 = MLP_Test(0,1);
01871                         (*Ntest) ++; 
01872                         if(err1>err2) break;
01873                         alpha3 = alpha2;
01874                         err3 = err2;
01875                         }
01876                 if(icount>=100)                 /* line search fails */
01877                         {
01878                         MLP_LineHyb(w0,0);      /* reset weights */
01879                         free(w0);
01880                         return(1);
01881                         }
01882                 }
01883 
01884 /* find bottom of parabola */
01885         
01886         *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
01887                 -(err2-err1)/(alpha2-alpha1)));
01888         if(*alpmin>10000) *alpmin=10000;
01889 
01890 /* set the weights */
01891         MLP_LineHyb(w0,*alpmin);
01892         LastAlpha = *alpmin;
01893         
01894 /* store weight changes */
01895         for(il=1; il<NET.Nlayer-1; il++)
01896                 for(in=0; in<NET.Nneur[il]; in++)
01897                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01898                                 LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
01899                                 - w0[il][in][jn];
01900 
01901         for(il=1; il<NET.Nlayer-1; il++)
01902                 for(in=0; in<NET.Nneur[il]; in++)
01903                         free(w0[il][in]);                       
01904         for(il=1; il<NET.Nlayer-1; il++)
01905                 free(w0[il]);   
01906         free(w0);
01907         if(NET.Debug>=4){
01908                 printf(" exit LineSearchHyb \n");
01909                 }
01910 
01911         return(0);
01912 }
01913 
01914 
01915 /***********************************************************/
01916 /* MLP_LineHyb                                             */
01917 /*                                                         */
01918 /* sets the weights to a point along a line                */
01919 /*                     (for hybrid methods)                */
01920 /*                                                         */
01921 /* inputs:     dbl ***w0 = initial point                   */
01922 /*             dbl alpha = step length                     */ 
01923 /*                                                         */
01924 /* Author: B.Mansoulie     01-Jul-98                       */
01925 /***********************************************************/   
01926 
01927 /* extern "C"Dllexport */
01928 void MLP_LineHyb(dbl ***w0, dbl alpha)
01929 {
01930         int il,in,jn;
01931         for(il=1; il<NET.Nlayer-1; il++)
01932                 for(in=0; in<NET.Nneur[il]; in++)
01933                         for(jn=0; jn<=NET.Nneur[il-1]; jn++)
01934                         {
01935                                 NET.Weights[il][in][jn] = w0[il][in][jn]+
01936                                 alpha*dir[il][in][jn];
01937                         }
01938         MLP_ResLin();
01939 }
01940 
01941 
01942 /***********************************************************/
01943 /* SetLambda                                               */
01944 /*                                                         */
01945 /* sets the coefficient of the regularisation term for     */
01946 /* hybrid learning method                                  */
01947 /*                                                         */
01948 /* input:     double Wmax = typical maximum weight         */
01949 /*                                                         */
01950 /* Author: J.Schwindling   13-Apr-99                       */
01951 /***********************************************************/   
01952 
01953 /* extern "C"Dllexport */
01954 void SetLambda(double Wmax)
01955 {
01956         dbl err;
01957         err = MLP_Test(0,0);
01958         LEARN.Alambda =
01959         LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
01960 }
01961 
01962         
01963 /***********************************************************/
01964 /* MLP_ResLin                                              */
01965 /*                                                         */
01966 /* resolution of linear system of equations for hybrid     */
01967 /* training method                                         */
01968 /*                                                         */
01969 /*                                                         */
01970 /* Author: B.Mansoulie        end-98                       */
01971 /* Modified: J.Schwindling 29-APR-99                       */
01972 /*                         use dgels from LAPACK           */  
01973 /***********************************************************/   
01974 
01975 /* extern "C"Dllexport */
01976 void MLP_ResLin()
01977 {
01978 /*      dbl rrans[NMAX], rrout[NMAX];*/
01979 /*      type_pat rrin[NMAX];*/
01980         doublereal *HR,*dpat; //,*wlin,*SV;
01981         double err,lambda,lambda2;
01982         integer Nl,M,Nhr,khr,nrhs,iret,ierr;
01983         int   il, in, inl, ipat;
01984         register dbl a;
01985         char Trans = 'N';
01986 
01987         
01988 /*      integer rank; */
01989 //      doublereal rcond = -1;  /* use machine precision */
01990         
01991         lambda2 = LEARN.Alambda;
01992         
01993 /* Nl = number of linear weights
01994    M = number of terms in linear system = number of examples + regularisation*/
01995         Nl = NET.Nneur[NET.Nlayer-2] + 1;
01996         M = PAT.Npat[0]+Nl;
01997 
01998         integer Lwork = 5 * M;
01999         doublereal *Work = (doublereal*) malloc((int) Lwork*sizeof(doublereal));
02000         
02001 /* memory allocation */
02002         dpat = (doublereal*) malloc((int) M*sizeof(doublereal));
02003 //      wlin = (doublereal*) malloc((int) Nl*sizeof(doublereal));
02004 //      SV = (doublereal*) malloc((int) Nl*sizeof(doublereal));
02005         
02006         Nhr = M * Nl;
02007         HR = (doublereal*) malloc((int) Nhr*sizeof(doublereal));
02008         err = 0.;
02009         for(ipat=0;ipat<PAT.Npat[0];ipat++)
02010                 {
02011 /* *** Filling dpat and HR *** */
02012 /*              for(in=0; in<NET.Nneur[0]; in++)
02013                         {
02014                         rrin[in] = PAT.Rin[0][ipat][in];
02015                         }*/
02016 
02017                 MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
02018 /*              MLP_Out(rrin,rrout);*/
02019                 for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
02020                         { 
02021                         a = (dbl) PAT.Rans[0][ipat][in];
02022                         }
02023                 il = NET.Nlayer-2;
02024                 dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
02025                 khr = ipat;
02026                 HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
02027                 for(in=0;in<NET.Nneur[il];in++)
02028                         {
02029                         khr =  M *(in+1) + ipat;
02030                         HR[khr] = NET.Outn[il][in]* 
02031                                 (dbl) sqrt(PAT.Pond[0][ipat]);
02032                         }
02033                 }
02034         il = NET.Nlayer-2;
02035         lambda = sqrt(lambda2);
02036         for(ipat=0;ipat<=NET.Nneur[il];ipat++)
02037                 {
02038                 dpat[ipat+PAT.Npat[0]] = 0;
02039                 for(in=0;in<=NET.Nneur[il];in++)
02040                         {
02041                         khr =  M *in + ipat + PAT.Npat[0];
02042                         HR[khr] = 0;
02043                         if(in==ipat) HR[khr]=lambda;
02044                         }
02045                 }
02046         if(NET.Debug>=4) 
02047                 {
02048                 err = MLP_Test(0,0);
02049                 printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err); 
02050                 }                
02051 /*                                                                */
02052 /*      Trouve les poids lineaires par resolution lineaire        */
02053 /*                                                                */
02054         nrhs = 1;
02055         ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
02056                         &Lwork,&iret);
02057         if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
02058         if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
02059         
02060 /*      ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
02061                 &iret);
02062         if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
02063         if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
02064         
02065         il = NET.Nlayer-1;
02066         for (inl=0; inl<=NET.Nneur[il-1];inl++)
02067                 {
02068                 NET.Weights[il][0][inl] = dpat[inl];
02069                 }
02070         if(NET.Debug>=4) 
02071                 {
02072                 err = MLP_Test(0,0);
02073                 printf("ResLin, apres tlsfor, err= %f\n",err); 
02074                 }                
02075         free(Work);
02076         free(dpat);
02077 //      free(wlin);
02078         free(HR);
02079 //      free(SV);
02080 }
02081 
02082 /***********************************************************/
02083 /* EtaDecay                                                */
02084 /*                                                         */
02085 /* decreases the learning parameter eta by the factor      */
02086 /* LEARN.Decay                                             */
02087 /*                                                         */
02088 /* Author: J.Schwindling   14-Apr-99                       */
02089 /***********************************************************/   
02090 
02091 void EtaDecay()
02092 {
02093         LEARN.eta *= LEARN.Decay;
02094 }
02095 
02096        
02097 /***********************************************************/
02098 /* ShuffleExamples                                         */
02099 /*                                                         */
02100 /* Shuffles the learning examples (for stochastic method)  */
02101 /*                                                         */
02102 /* Author: J.Schwindling   14-Apr-1999                     */
02103 /* Modified: J.Schwindling 21-Jul-1999  inline MLP_Rand    */
02104 /***********************************************************/   
02105 
02106 /* extern "C"Dllexport */
02107 int ShuffleExamples(int n, int *index)
02108 {
02109         int i,ii,itmp;
02110         dbl a = (dbl) (n-1);
02111         
02112         for(i=0;i<n;i++)
02113                 {
02114                 ii = (int) MLP_Rand(0.,a);
02115                 itmp = index[ii];
02116                 index[ii] = index[i];
02117                 index[i] = itmp;
02118                 }
02119         return 0;
02120 }
02121 
02122 
02123 /***********************************************************/
02124 /* MLP_Rand                                                */
02125 /*                                                         */
02126 /* Random Numbers (to initialize weights)                  */
02127 /*                                                         */
02128 /* inputs :     dbl min, dbl max = random number between   */
02129 /*                                 min and max             */
02130 /* return value: (double) random number                    */
02131 /*                                                         */
02132 /* Author: J.Schwindling   14-Apr-99                       */
02133 /***********************************************************/   
02134 
02135 /* extern "C"Dllexport */
02136 double MLP_Rand(dbl mini, dbl maxi)
02137 {
02138 return mini+(maxi-mini)*random()/RAND_MAX;
02139 }
02140 
02141 
02142 /***********************************************************/
02143 /* InitWeights                                             */
02144 /*                                                         */
02145 /* initializes the weights to random numbers between       */
02146 /* -0.5 : 0.5                                              */
02147 /*                                                         */
02148 /* Author: J.Schwindling   14-Apr-99                       */
02149 /***********************************************************/   
02150 
02151 /* extern "C"Dllexport */
02152 void InitWeights()
02153 {
02154         int ilayer,ineur,i;
02155         
02156         for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
02157                 for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
02158                         for(i=0;i<=NET.Nneur[ilayer-1];i++)
02159                                 NET.Weights[ilayer][ineur][i]=
02160                                         (dbl) MLP_Rand(-0.5, 0.5);
02161 }
02162 
02163 
02164 /***********************************************************/
02165 /* PrintWeights                                            */
02166 /*                                                         */
02167 /* Print weights on the screen                             */
02168 /*                                                         */
02169 /* Author: J.Schwindling   14-Apr-99                       */
02170 /***********************************************************/   
02171 
02172 /* extern "C"Dllexport */
02173 void PrintWeights()
02174 {
02175         int ilayer,ineur,i;
02176 
02177         for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
02178                 {
02179                 if(MessLang==1) 
02180                         {
02181                         printf("Couche %d\n",ilayer);
02182                         }
02183                 else
02184                         {
02185                         printf("Layer %d\n",ilayer);
02186                         }
02187                 for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
02188                         {
02189                         if(MessLang==1) 
02190                                 {
02191                                 printf("Neurone %d",ineur);
02192                                 }
02193                         else
02194                                 {
02195                                 printf("Neuron %d",ineur);
02196                                 }
02197                         for(i=0; i<=NET.Nneur[ilayer-1]; i++)
02198                                 {
02199                                 printf(" %f",
02200                                         (double) NET.Weights[ilayer][ineur][i]);
02201                                 }
02202                         printf("\n");
02203                         }
02204                 printf("\n");
02205                 }
02206 }
02207  
02208 
02209 /***********************************************************/
02210 /* ReadPatterns                                            */
02211 /*                                                         */
02212 /* parser for learn.pat or test.pat files                  */
02213 /*                                                         */
02214 /* inputs: char *filename = name of the file to read       */
02215 /*         int ifile = 0: learning examples                */
02216 /*                     1: test examples                    */
02217 /*                                                         */
02218 /* outputs: int *inet = 1 if a network is defined          */
02219 /*          int *ilearn = 1 if a learning method is defined*/
02220 /*          int *iexamples = 1 if examples are defined     */ 
02221 /*                                                         */
02222 /* return value (int) = 0:   no error                      */
02223 /*                    = -1:  file could not be opened      */
02224 /*                                                         */
02225 /* Author: J.Schwindling   20-Apr-99                       */
02226 /* Modified: J.Schwindling 01-Jun-99 return inet, ilearn   */
02227 /*                                          iexamples      */ 
02228 /*           J.Schwindling 21-Sep-99 return value = -1     */
02229 /***********************************************************/   
02230 
02231 #define CLEN 1024
02232 
02233 /* extern "C"Dllexport */
02234 int ReadPatterns(char *filename, int ifile,
02235                  int *inet, int *ilearn, int *iexamples)
02236 {
02237 char s[CLEN], s2[CLEN], cc[6], cc2[6];
02238 char otherfile[CLEN];
02239 double p;
02240 //int line,i,j;
02241 int line,i;
02242 //int l,ll,ipat,nmax,il,in,tf;
02243 int l,ll,ipat,nmax;
02244 int np=0;       /* nombre d'exemples */
02245 int nin=0;      /* nombre d'entrees */
02246 int nout=0;     /* nombre de sorties */
02247 int npon=0;
02248 int ntot, ierr;
02249 //char **ss;
02250 char **ss=0;
02251 FILE *LVQpat;
02252 int nlayer, nneur[NLMAX];
02253 
02254 printf("\nLoading file %s\n",filename);
02255 LVQpat=fopen(filename,"r");
02256 if(LVQpat == 0) return -1;
02257 
02258 line=0;
02259 
02260 while(fgets(s,CLEN,LVQpat))
02261         {
02262         if(*s=='N')                                 
02263                 {
02264                 if(*(s+1)=='N')                 /* NNEU */
02265                         {
02266                         printf("Number of neurons %s",s);
02267                         *inet = 1;
02268                         sscanf(s,"%s %s",cc,s2);
02269                         ierr = GetNetStructure(s2,&nlayer,nneur);
02270                         if(ierr != 0) return ierr;
02271                         ierr = MLP_SetNet(&nlayer,nneur);
02272                         if(ierr != 0) return ierr;
02273                         }
02274                 else
02275                         {       
02276                         sscanf(s,"%s %d",cc,&l);
02277                         if(*(cc+1)=='P')        /* NPAT */
02278                                 {
02279                                 np=l;
02280                                 printf("Number of patterns %d\n",np);
02281                                 }
02282                         else if(*(cc+1)=='I')   /* NINP */
02283                                 {
02284                                 nin=l;
02285                                 PAT.Nin = nin;
02286                                 printf("Number of inputs %d\n",nin);
02287                                 }
02288                         else if(*(cc+1)=='O' && *(cc+2)=='U')   /* NOUT */
02289                                 {
02290                                 nout=l;
02291                                 PAT.Nout = nout;
02292                                 printf("Number of outputs %d\n",nout);
02293                                 }
02294                         else if(*(cc+1)=='O' && *(cc+2)=='R')   /* NORM */
02295                                 {
02296                                 DIVERS.Norm=l;
02297                                 if(l==1) printf("Normalize inputs\n");
02298                                 }
02299 /* obsolete datacard     NLAY */                        
02300                         else if(*(cc+1)=='L')   
02301                                 {
02302                                 printf("NLAY datacard is no longer needed\n");
02303                                 }                               
02304                         else if(*(cc+1)=='E')   /* NEPO */ 
02305                                 {
02306                                 LEARN.Nepoch=l;
02307                                 printf("Number of epochs %d\n",l);
02308                                 }
02309                         else if(*(cc+1)=='R')   /* NRES */
02310                                 {
02311                                 LEARN.Nreset=l;
02312                                 printf(
02313                                 "Reset to steepest descent every %d epochs\n",
02314                                 l);
02315                                 }
02316                         }
02317                 }
02318         else if(*s=='L')                
02319                 {
02320                 if(*(s+1)=='P')                 /* LPAR */
02321                         {
02322                         sscanf(s,"%s %le",cc,&p);
02323                         printf("Learning parameter %f\n",p);
02324                         LEARN.eta = (dbl) p;
02325                         }
02326                 else if(*(s+1)=='M')            /* LMET */
02327                         {
02328                         *ilearn = 1;
02329                         sscanf(s,"%s %d",cc,&(LEARN.Meth));
02330                         printf("Learning method = ");
02331                         switch(LEARN.Meth)
02332                         {
02333                 case 1: printf("Stochastic Minimization\n");
02334                         break;
02335                 case 2: printf("Steepest descent with fixed step\n");
02336                         break;
02337                 case 3: printf("Steepest descent with line search\n"); break;
02338                 case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
02339                 case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
02340                         break;
02341                 case 6: printf("BFGS\n");
02342                         break;
02343                 case 7: printf("Hybrid BFGS-linear\n");
02344                         break;
02345                 default: printf("Error: unknown method\n"); break;
02346                 }
02347 
02348                         }
02349                 else if(*(s+1)=='T')    /* LTAU */
02350                         {
02351                         sscanf(s,"%s %lf",cc,&p);
02352                         printf("Tau %f\n",p);
02353                         LEARN.Tau = (dbl) p;
02354                         }
02355                 else if(*(s+1)=='A')    /* LAMB */
02356                         {
02357                         sscanf(s,"%s %lf",cc,&p);
02358                         printf("Lambda %f\n",p);
02359                         LEARN.Lambda = (dbl) p;
02360                         }
02361                 }
02362         else if(*s=='F')                
02363                 {
02364                 if(*(s+1)=='S')         /* FSPO */
02365                         {
02366                         sscanf(s,"%s %le",cc,&p);
02367                         printf("Flat spot elimination parameter %f\n",p);
02368                         LEARN.delta = (dbl) p;
02369                         }
02370                 else if(*(s+1)=='I')    /* FILE */
02371                         {
02372                         sscanf(s,"%s %s",cc,otherfile);
02373                         ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
02374                         if(ierr != 0) return ierr;
02375                         }       
02376                 }
02377         else if(*s=='M')                /* momentum */
02378                 {
02379                 sscanf(s,"%s %le",cc,&p);
02380                 printf("Momentum term %f\n",p);
02381                 LEARN.epsilon = (dbl) p;
02382                 }               
02383         else if(*s=='O')                /* OUTx */
02384                 {
02385                 if(*(s+3)=='W')         /* OUTW */
02386                         {
02387                         sscanf(s,"%s %d",cc,&OutputWeights);
02388                         if(OutputWeights == 0)
02389                                 {
02390                                 printf("Never write file weights.out\n");
02391                                 }
02392                         else if(OutputWeights == -1)
02393                                 {
02394                                 printf("Write weights to output file at the end\n");
02395                                 }
02396                         else 
02397                                 {
02398                                 printf("Write weights to file every %d epochs\n",
02399                                         OutputWeights);
02400                                 }
02401                         }
02402                 else if(*(s+3)=='F')    /* OUTF */
02403                         {
02404                         sscanf(s,"%s %s",cc,cc2);
02405                         if(*cc2=='F' || *cc2=='C')
02406                                 {
02407                                 DIVERS.Outf = *cc2;
02408                                 }
02409                         else
02410                                 {
02411                         printf(" *** Error while loading file %s at line %s :",
02412                                 filename,s);
02413                         printf(" unknown language\n");
02414                                 }       
02415                         }
02416                 else
02417                         {
02418                         printf(" *** Error while loading file %s at line %s\n",
02419                                 filename,s);
02420                         }                                       
02421                 }               
02422         else if(*s=='R')                /* RDWT */
02423                 {
02424                 sscanf(s,"%s %d",cc,&(NET.Rdwt));
02425                 if(NET.Rdwt == 0)
02426                         {
02427                         printf("Random weights \n");
02428                         }
02429                 else
02430                         {
02431                         printf("Read weights from file weights.in\n");
02432                         }       
02433                 }               
02434         else if(*s=='S')                /* STAT */
02435                 {
02436                 sscanf(s,"%s %d",cc,&(DIVERS.Stat));
02437                 }
02438 /*      else if(*s=='T')                 TFUN 
02439                 {
02440                 sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
02441                 SetTransFunc(il,in,tf);
02442                 } */    
02443         else if(*s=='H')                /* HESS */
02444                 {
02445                 sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
02446                 }               
02447         else if(*s=='D')                
02448                 {
02449                 if(*(s+1)=='C')         /* DCAY */
02450                         {       
02451                         sscanf(s,"%s %le",cc,&p);
02452                         LEARN.Decay = p;
02453                         printf("Learning parameter decay %f\n",
02454                                                 (double) LEARN.Decay);
02455                         }
02456                 if(*(s+1)=='B')         /* DBIN */      
02457                         {       
02458                         sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
02459                         printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
02460                         }
02461                 if(*(s+1)=='E')         /* DEBU */      
02462                         {       
02463                         sscanf(s,"%s %d",cc,&(NET.Debug));
02464                         printf("Debug mode %d\n",NET.Debug);
02465                         }
02466                 }               
02467         else if(*s=='P')                /* POND */
02468                 {
02469                 npon = CountLexemes(s);
02470                 if(npon==2) 
02471                         {
02472                         sscanf(s,"%s %d",cc,&(PAT.Iponde));
02473                         }
02474                 else
02475                         {
02476                         ss = (char**) malloc((npon+1)*sizeof(char*)); 
02477                         for(i=0;i<=npon;i++)
02478                                 ss[i]=(char*) malloc(40*sizeof(char));
02479                         getnLexemes(npon,s,ss);
02480                         sscanf(ss[1],"%d",&(PAT.Iponde));
02481                         for(i=2;i<npon;i++)
02482                             {   
02483                             sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
02484                             }
02485                         }
02486                 if(PAT.Iponde==0) 
02487                         {
02488                         npon = 0;
02489                         }
02490                 else
02491                         {
02492                         npon = 1;
02493                         }
02494                 }               
02495         else if(*s=='#')                            /* comments */
02496                 {
02497                 }
02498         else                                        /* exemple itself */
02499                 {
02500                 if(np==0) return 1;
02501                 if(nin==0) return 2;
02502                 if(nout==0) return 3;
02503                 
02504 
02505 /* store number of exemples and allocate memory*/               
02506                 if(line==0)
02507                         {
02508                         PAT.Npat[ifile] = np;
02509                         ierr = AllocPatterns(ifile,np,nin,nout,0);
02510                         if(ierr != 0) return ierr;
02511                         *iexamples = 1;
02512                         }
02513 
02514 /* now get exemple */
02515                                 
02516                 line++;
02517                 ll = (line-1)%2;
02518                 ipat = (line-1)/2;
02519                 /*              printf("Loading event \t %d\r",ipat);*/
02520 /*              if(ipat>NPMAX) 
02521                         {
02522                         printf("Too many examples in file\n");
02523                         printf("Loading %d examples\n",NPMAX);
02524                         PAT.Npat[ifile] = NPMAX;
02525                         break;
02526                         }
02527 */                      
02528                 
02529 /* allocate the number of lines */
02530                 
02531                 if(line==1) 
02532                         {
02533                         
02534                         nmax = nin;
02535                         if(nout>nin) nmax=nout;
02536                         ss = (char**) malloc((nmax+1)*sizeof(char*));
02537                         if(ss == 0) return -111; 
02538                         for(i=0;i<=nmax;i++)
02539                                 {
02540                                 ss[i]=(char*) malloc(40*sizeof(char));
02541                                 if(ss[i] == 0) return -111;
02542                                 }
02543                         }
02544                         
02545                 if(ll==0)    /* inputs */
02546                         {
02547                         getnLexemes(nin,s,ss);
02548                         for(i=0;i<nin;i++)
02549                                 {
02550                                 sscanf(ss[i],"%le",&p);
02551                                 PAT.Rin[ifile][ipat][i] = (type_pat) p;
02552                                 }
02553                         }
02554                 else         /* answers */
02555                         {
02556                         ntot=nout+npon;
02557                         getnLexemes(ntot,s,ss);
02558                         for(i=0;i<ntot;i++)
02559                                 {
02560                                 sscanf(ss[i],"%le",&p);
02561                                 if(i<nout)
02562                                         {
02563                                         PAT.Rans[ifile][ipat][i] = (type_pat) p;
02564                                         }
02565                                 else
02566                                         {
02567                                         if(PAT.Iponde==1)
02568                                                 {
02569                                                 PAT.Pond[ifile][ipat] = 
02570                                                         (type_pat) p;
02571                                                 }
02572                                         else
02573                                                 {
02574                                                 PAT.Pond[ifile][ipat] = 
02575                                                 (type_pat) PAT.Ponds[(int) p -1];
02576                                                 }
02577                                         }
02578                                 }
02579                         }                               
02580                 }
02581         }
02582         printf("%d examples loaded    \n\n",PAT.Npat[ifile]);
02583         fclose(LVQpat);
02584         return 0;
02585 }
02586 
02587 
02588 /* CountLexemes returns the number of lexemes in s separated by blanks.*/
02589 /* extern "C"Dllexport */
02590 int CountLexemes(char *s)
02591 {
02592   char tmp[1024];
02593   int i=0;
02594   
02595   strcpy(tmp,s);
02596   if (strtok(tmp," "))
02597     {
02598       i=1;
02599       while (strtok(NULL," ")) i++;
02600     }
02601   return i;
02602 }
02603 
02604 /* splits s in substrings ss separated by blanks*/
02605 /* extern "C"Dllexport */
02606 void getnLexemes(int n, char *s, char **ss)
02607 {
02608   char tmp[1024];
02609   int i;     
02610   strcpy(tmp,s);
02611   if (n>0)
02612     {
02613       strcpy(ss[0],strtok(tmp," "));
02614       for (i=1;i<n;i++)
02615         strcpy(ss[i],strtok(NULL," "));
02616     }
02617 }
02618 
02619 /* extern "C"Dllexport */
02620 void getLexemes(char *s,char **ss)
02621 {
02622   char tmp[1024];
02623   int i,n;   
02624     
02625   strcpy(tmp,s);
02626   n=CountLexemes(tmp);
02627   if (n>0)
02628     {
02629       strcpy(ss[0],strtok(tmp," "));
02630       for (i=1;i<n;i++)
02631         strcpy(ss[i],strtok(NULL," "));
02632     }
02633 }
02634 
02635 
02636 /***********************************************************/
02637 /* LearnFree                                               */
02638 /*                                                         */
02639 /* frees memory allocated for learning                     */
02640 /*                                                         */
02641 /* Author: J.Schwindling   28-May-99                       */
02642 /***********************************************************/   
02643 
02644 /* extern "C"Dllexport */
02645 void LearnFree()
02646 {
02647         int il,in; 
02648         if(LearnMemory==0) return;
02649         LearnMemory = 0;
02650         for(il=0; il<NET.Nlayer; il++)
02651                 {
02652                 for(in=0; in<NET.Nneur[il]; in++)
02653                         {
02654                         free(dir[il][in]);
02655                         }
02656                 free(dir[il]);
02657                 }
02658         free(dir);
02659         if(BFGSMemory==0) return;
02660         BFGSMemory = 0;
02661         for(il=0; il<NET.Nweights; il++)
02662                 {
02663                 free(BFGSH[il]);
02664                 }               
02665         free(BFGSH);
02666         free(Gamma);
02667         free(delta);
02668         
02669 /*      if(JacobianMemory == 0) return;
02670         JacobianMemory = 0;
02671         for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
02672         free(JacobianMatrix);   */
02673 }
02674 
02675 
02676 /***********************************************************/
02677 /* LearnAlloc                                              */
02678 /*                                                         */
02679 /* memory allocation for vectors and matrices used in      */
02680 /* conjugate gradients or BFGS like methods                */
02681 /*                                                         */
02682 /* return value (int) = error code: 0 no error             */
02683 /*                                  -111 no memory         */
02684 /*                                                         */
02685 /* Author: J.Schwindling   20-Apr-99                       */
02686 /* Modified: J.Schwindling 31-Jan-2000  error code         */
02687 /***********************************************************/   
02688 
02689 /* extern "C"Dllexport */
02690 int LearnAlloc()
02691 {
02692         int il,in,i; 
02693         int Nweights = 0;
02694         
02695         if(LearnMemory != 0) LearnFree();
02696         LearnMemory = 1;
02697         dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
02698         if(dir == 0) return -111;
02699         
02700         for(il=0; il<NET.Nlayer; il++)
02701                 {
02702                 dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
02703                 if(dir[il] == 0) return -111;
02704                 for(in=0; in<NET.Nneur[il]; in++)
02705                         {
02706                         if(il==0) 
02707                                 {
02708 /* TODO: understand implications of hard-coded 101 */                           
02709                                 dir[0][in] = (dbl *)
02710                                 malloc(101*sizeof(dbl));
02711                                 if(dir[0][in] == 0) return -111;
02712                                 }
02713                         else
02714                                 {
02715                                 dir[il][in] = (dbl *) 
02716                                 malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
02717                                 if(dir[il][in] == 0) return -111;
02718                                 Nweights += NET.Nneur[il-1]+1;
02719                                 }
02720                         }
02721                 }
02722         NET.Nweights = Nweights;
02723                 
02724         if(BFGSMemory==0 && LEARN.Meth>= 6) 
02725                 {
02726                 BFGSMemory = 1;
02727                 Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
02728                 delta = (dbl*) malloc(Nweights*sizeof(dbl));
02729                 BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
02730                 if(Gamma == 0 || delta == 0 || BFGSH == 0)
02731                    return -111;
02732                    
02733                 for(i=0; i<Nweights; i++)
02734                         {
02735                         BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
02736                         if(BFGSH[i] == 0) return -111;
02737                         }
02738                 }
02739                 
02740 /*      if(JacobianMemory==0)
02741                 {
02742                 JacobianMemory = 1;
02743                 printf("JacobianMemory = %d\n",JacobianMemory);
02744                 JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
02745                 for(i=0; i<PAT.Npat[0]; i++)
02746                         JacobianMatrix[i] = 
02747                                 (dbl*) malloc(Nweights*sizeof(dbl));
02748                 printf("end memory alloc\n");
02749                 }       
02750         
02751         if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
02752         
02753         return 0;
02754 }
02755 
02756 
02757 /***********************************************************/
02758 /* MLP_PrFFun                                              */
02759 /*                                                         */
02760 /* writes the MLP function to file as a fortran function   */
02761 /*                                                         */
02762 /* inputs :     char *filename = name of the output file   */
02763 /*                                                         */
02764 /* return value (int) = 0: no error                        */
02765 /*                     -1: could not open file             */ 
02766 /*                                                         */
02767 /* Author: J.Schwindling   20-Apr-99                       */
02768 /* Modified: J.Schwindling 05-May-99                       */
02769 /*              add normalization of inputs                */
02770 /*           J.Schwindling 30-Nov-99 return code           */
02771 /***********************************************************/   
02772 
02773 /* extern "C"Dllexport */
02774 int MLP_PrFFun(char *filename)
02775 {
02776         int il,in,jn;
02777         FILE *W;
02778 
02779         W=fopen(filename,"w");
02780         if(W==0) return -1;
02781         fprintf(W,"      SUBROUTINE RNNFUN(rin,rout)\n");
02782         fprintf(W,"      DIMENSION RIN(%d)\n",NET.Nneur[0]);
02783         fprintf(W,"      DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
02784         fprintf(W,"C\n");
02785         
02786         for(in=0; in<NET.Nneur[0]; in++)
02787                 {
02788                 if(DIVERS.Norm==0)
02789                         { 
02790                         fprintf(W,"      OUT%d = RIN(%d)\n",in+1,in+1);
02791                         }
02792                 else
02793                         {
02794                         fprintf(W,"      OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
02795                                         STAT.mean[in],STAT.sigma[in]);
02796                         }       
02797                 }
02798         for(il=1; il<NET.Nlayer-1; il++)
02799                 {
02800                 fprintf(W,"C\n");
02801                 fprintf(W,"C     layer %d\n",il+1);
02802                 for(in=0; in<NET.Nneur[il]; in++)
02803                         {
02804                         fprintf(W,"      RIN%d = %e\n",in+1,
02805                                         (double) NET.Weights[il][in][0]);
02806                         for(jn=1;jn<=NET.Nneur[il-1]; jn++)
02807                                 fprintf(W,"     > +(%e) * OUT%d\n",
02808                                         (double) NET.Weights[il][in][jn],jn);
02809                         }
02810                 fprintf(W,"C\n");
02811                 for(in=0; in<NET.Nneur[il]; in++)
02812                         {
02813                         if(NET.T_func[il][in]==0) 
02814                                 {
02815                                 fprintf(W,"      OUT%d = 0\n",in+1);
02816                                 }
02817                         else if(NET.T_func[il][in]==1)
02818                                 {
02819                                 fprintf(W,"      OUT%d = RIN%d\n",in+1,in+1);
02820                                 }
02821                         else if(NET.T_func[il][in]==2)
02822                                 {
02823                                 fprintf(W,"      OUT%d = SIGMOID(RIN%d)\n",
02824                                         in+1,in+1);
02825                                 }
02826                         }
02827                 }
02828         il = NET.Nlayer-1;
02829         fprintf(W,"C\n");
02830         fprintf(W,"C     layer %d\n",il+1);
02831         for(in=0; in<NET.Nneur[il]; in++)
02832                 {
02833                 fprintf(W,"      RIN%d = %e\n",in+1,
02834                                 (double) NET.Weights[il][in][0]);
02835                 for(jn=1;jn<=NET.Nneur[il-1]; jn++)
02836                         fprintf(W,"     > +(%e) * OUT%d\n",
02837                                 (double) NET.Weights[il][in][jn],jn);
02838                 }
02839         fprintf(W,"C\n");
02840         for(in=0; in<NET.Nneur[il]; in++)
02841                 {
02842                 if(NET.T_func[il][in]==0) 
02843                         {
02844                         fprintf(W,"      ROUT(%d) = 0\n",in+1);
02845                         }
02846                 else if(NET.T_func[il][in]==1)
02847                         {
02848                         fprintf(W,"      ROUT(%d) = RIN%d\n",in+1,in+1);
02849                         }
02850                 else if(NET.T_func[il][in]==2)
02851                         {
02852                         fprintf(W,"      ROUT(%d) = SIGMOID(RIN%d)\n",
02853                                 in+1,in+1);
02854                         }
02855                 }
02856                 
02857         fprintf(W,"C\n");
02858         fprintf(W,"      END\n");
02859         fprintf(W,"      REAL FUNCTION SIGMOID(X)\n");
02860         fprintf(W,"      SIGMOID = 1./(1.+EXP(-X))\n");
02861         fprintf(W,"      END\n");
02862         
02863         fclose(W);
02864         return 0;
02865 }
02866 
02867 
02868 /***********************************************************/
02869 /* MLP_PrCFun                                              */
02870 /*                                                         */
02871 /* writes the MLP function to file as a C function         */
02872 /*                                                         */
02873 /* inputs :     char *filename = name of the output file   */
02874 /*                                                         */
02875 /* return value (int) = 0: no error                        */
02876 /*                      -1: could not open file            */ 
02877 /*                                                         */
02878 /* Author: J.Schwindling   23-Apr-99                       */
02879 /* Modified: J.Schwindling 30-Nov-99 return code           */
02880 /***********************************************************/   
02881 
02882 /* extern "C"Dllexport */
02883 int MLP_PrCFun(char *filename)
02884 {
02885         int il,in,jn;
02886         FILE *W;
02887 
02888         W=fopen(filename,"w");
02889         if(W==0) return -1;
02890         
02891         fprintf(W,"double sigmoid(double x)\n");
02892         fprintf(W,"{\n");
02893         fprintf(W,"return 1/(1+exp(-x));\n");
02894         fprintf(W,"}\n");
02895         fprintf(W,"void rnnfun(double *rin,double *rout)\n");
02896         fprintf(W,"{\n");
02897         fprintf(W,"      double out1[%d];\n",NET.Nneur[0]);
02898         fprintf(W,"      double out2[%d];\n",NET.Nneur[1]);
02899         if(NET.Nlayer>=3) fprintf(W,"      double out3[%d];\n",NET.Nneur[2]);
02900         if(NET.Nlayer>=4) fprintf(W,"      double out4[%d];\n",NET.Nneur[3]);
02901         fprintf(W,"\n");
02902         
02903         for(in=0; in<NET.Nneur[0]; in++)
02904                 {
02905                 if(DIVERS.Norm==0)
02906                         { 
02907                         fprintf(W,"      out1[%d] = rin[%d];\n",in,in);
02908                         }
02909                 else
02910                         {
02911                         fprintf(W,"      out1[%d] = (rin[%d]-%e)/%e;\n",
02912                                         in,in,
02913                                         STAT.mean[in],STAT.sigma[in]);
02914                         }       
02915                 }
02916                 
02917         for(il=1; il<=NET.Nlayer-1; il++)
02918                 {
02919                 fprintf(W,"\n");
02920                 fprintf(W,"/*     layer %d */\n",il+1);
02921                 for(in=0; in<NET.Nneur[il]; in++)
02922                         {
02923                         fprintf(W,"      out%d[%d] = %e\n",il+1,in,
02924                                         (double) NET.Weights[il][in][0]);
02925                         for(jn=1;jn<=NET.Nneur[il-1]; jn++)
02926                                 fprintf(W,"      +(%e) * out%d[%d]\n",
02927                                         (double) NET.Weights[il][in][jn],il,jn-1);
02928                                 fprintf(W,"      ;\n");
02929                         }
02930                 fprintf(W,"\n");
02931                 for(in=0; in<NET.Nneur[il]; in++)
02932                         {
02933                         if(NET.T_func[il][in]==0) 
02934                                 {
02935                                 fprintf(W,"      out%d[%d] = 0;\n",il+1,in);
02936                                 }
02937                         else if(NET.T_func[il][in]==1)
02938                                 {
02939                                 }
02940                         else if(NET.T_func[il][in]==2)
02941                                 {
02942                                 fprintf(W,"      out%d[%d] = sigmoid(out%d[%d]);\n",
02943                                         il+1,in,il+1,in);
02944                                 }
02945                         }
02946                 }
02947         il = NET.Nlayer-1;
02948         for(in=0; in<NET.Nneur[il]; in++)
02949                 {
02950                 fprintf(W,"      rout[%d] = out%d[%d];\n",in,il+1,in);
02951                 }
02952         fprintf(W,"}\n");       
02953         fclose(W);
02954         return 0;
02955 }
02956 
02957 
02958 /***********************************************************/
02959 /* SaveWeights                                             */
02960 /*                                                         */
02961 /* writes the weights to file                              */
02962 /*                                                         */
02963 /* inputs :     char *filename = name of the output file   */
02964 /*              int iepoch = epoch number                  */
02965 /*                                                         */
02966 /* return value (int): 0 if OK                             */
02967 /*                     -1 if file could not be opened      */    
02968 /*                                                         */
02969 /* Author: J.Schwindling   20-Apr-99                       */
02970 /* Modified: J.Schwindling 11-Jun-99                       */
02971 /*                         print net structure in header   */
02972 /* Modified: J.Schwindling 05-Nov-99                       */
02973 /*                         return error code               */
02974 /***********************************************************/   
02975 
02976 /* extern "C"Dllexport */
02977 int SaveWeights(char *filename, int iepoch)
02978 {
02979         FILE *W;
02980         int ilayer,ineur,i;
02981 
02982         W=fopen(filename,"w");
02983         if(W==0) return -1;
02984         
02985         fprintf(W,"# network structure ");
02986         for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
02987                 {
02988                 fprintf(W,"%d ",NET.Nneur[ilayer]);
02989                 }
02990                 
02991         fprintf(W,"\n %d\n",iepoch);
02992         for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
02993                 {
02994                 for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
02995                         {
02996                         for(i=0; i<=NET.Nneur[ilayer-1]; i++)
02997                                 {
02998                                 fprintf(W," %1.15e\n",
02999                                 (double) NET.Weights[ilayer][ineur][i]);
03000                                 }
03001                         }
03002                 }
03003         fclose(W);
03004         return 0;
03005 }
03006         
03007 
03008 /***********************************************************/
03009 /* LoadWeights                                             */
03010 /*                                                         */
03011 /* reads the weights from file                             */
03012 /*                                                         */
03013 /* input :      char *filename = name of the input file    */
03014 /* output :     int *iepoch = epoch number                 */
03015 /*                                                         */
03016 /* return value (int): 0 if OK                             */
03017 /*                     -1 if file could not be opened      */    
03018 /*                                                         */
03019 /* Author: J.Schwindling   20-Apr-99                       */
03020 /* Modified: J.Schwindling 11-Jun-99                       */
03021 /*           lines starting with # are skipped             */
03022 /* Modified: J.Schwindling 05-Nov-99                       */
03023 /*                         return error code               */
03024 /***********************************************************/   
03025 
03026 /* extern "C"Dllexport */
03027 int LoadWeights(char *filename, int *iepoch)
03028 {
03029         FILE *W;
03030         int ilayer,ineur,i;
03031         double p;
03032         char s[80];
03033 
03034         W=fopen(filename,"r");
03035         if(W==0) return -1;
03036         do
03037                 {
03038                 fgets(s,80,W);
03039                 }
03040         while(*s == '#');       
03041                 sscanf(s," %d",iepoch);
03042                 for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
03043                         {
03044                         for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
03045                                 {
03046                                 for(i=0; i<=NET.Nneur[ilayer-1]; i++)
03047                                         {
03048                                         fscanf(W," %le",&p);
03049                                         NET.Weights[ilayer][ineur][i] = (dbl) p;
03050                                         }
03051                                 }
03052                         }
03053                 
03054         fclose(W);
03055         return 0;
03056 }
03057 
03058 
03059 /***********************************************************/
03060 /* AllocPatterns                                           */
03061 /*                                                         */
03062 /* allocate memory for the examples                        */
03063 /*                                                         */
03064 /* input :      int ifile = file number (0 or 1)           */
03065 /*              int npat = number of examples              */
03066 /*              int nin  = number of inputs                */
03067 /*              int nout = number of outputs               */
03068 /*              int iadd = 0: new examples                 */
03069 /*                         1: add to existing ones         */   
03070 /*                                                         */
03071 /* return value (int) = error code: 0 = no error           */
03072 /*                                  1 = wrong file number  */
03073 /*                                 -111 = no memory        */
03074 /*                                                         */
03075 /* Author: J.Schwindling   21-Apr-99                       */
03076 /* Modified: J.Schwindling 26-Apr-99                       */
03077 /*            - frees memory if already booked and iadd=0  */
03078 /*              (should remove need to call mlpfree)       */ 
03079 /*            - implement iadd = 1                         */  
03080 /***********************************************************/   
03081 
03082 /* extern "C"Dllexport */int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
03083 {
03084         int j, ierr;
03085         type_pat *tmp, *tmp3;
03086         type_pat **tmp2;
03087         int ntot;
03088         
03089         if(ifile>1 || ifile<0) return(1);
03090 /*      scanf("%d",&j); */
03091         if(ExamplesMemory==0) 
03092                 {
03093                 ExamplesMemory=1;
03094                 PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
03095                 PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
03096                 PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
03097                 PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
03098                 if(PAT.Pond == 0 || PAT.Rin == 0
03099                    || PAT.Rans == 0 || PAT.vRin == 0) return -111; 
03100                 } 
03101         
03102 
03103 /* if iadd=0, check that memory not already allocated. Otherwise free it */
03104         if(iadd==0 && PatMemory[ifile]!=0)
03105                 {
03106                 ierr = FreePatterns(ifile);
03107                 }
03108 
03109 /* allocate memory and initialize ponderations */
03110         if(iadd==0 || PatMemory[ifile]==0)
03111         {
03112         PatMemory[ifile] = 1;           
03113         PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
03114         if(PAT.Pond[ifile] == 0) return -111;
03115         for(j=0; j<npat; j++)
03116                 PAT.Pond[ifile][j] = 1;
03117                         
03118         PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
03119         if(PAT.Rin[ifile] == 0) return -111;
03120         PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
03121         if(PAT.Rans[ifile] == 0) return -111;
03122 
03123         PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
03124                                                 sizeof(type_pat));
03125         if(PAT.vRin[ifile] == 0) return -111;
03126                                                 
03127         for(j=0; j<npat; j++)
03128                 {
03129                 PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
03130                 PAT.vRin[ifile][j*(nin+1)] = 1;
03131                 }
03132         for(j=0; j<npat; j++)
03133                 {
03134                 PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
03135                 if(PAT.Rans[ifile][j] == 0) return -111;
03136                 }
03137         PAT.Npat[ifile] = npat;
03138         
03139         if(ifile==0)
03140                 {       
03141                 ExamplesIndex = (int *) malloc(npat*sizeof(int));
03142                 if(ExamplesIndex == 0) return -111;
03143                 for(j=0; j<npat; j++) ExamplesIndex[j] = j;
03144                 }
03145         }
03146         else            /* add examples */
03147         {
03148         ntot = PAT.Npat[ifile]+npat;
03149         
03150 /* event weighting */   
03151         tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
03152         if(tmp == 0) return -111;
03153         
03154         for(j=0; j<PAT.Npat[ifile]; j++) 
03155                 {
03156                 tmp[j] = PAT.Pond[ifile][j];
03157                 }
03158         for(j=PAT.Npat[ifile];j<ntot;j++)
03159                 {
03160                 tmp[j] = 1;
03161                 }
03162         if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
03163         PAT.Pond[ifile] = tmp;  
03164         
03165 /* examples */
03166 /*      tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));            
03167         for(j=0; j<PAT.Npat[ifile]; j++) 
03168                 {
03169                 tmp2[j] = PAT.Rin[ifile][j];
03170                 }
03171         for(j=PAT.Npat[ifile];j<ntot;j++)
03172                 {
03173                 tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
03174                 }
03175         if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
03176         PAT.Rin[ifile] = tmp2;  */
03177         
03178         tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
03179         if(tmp3 == 0) return -111;
03180         
03181         for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
03182                 {
03183                 tmp3[j] = PAT.vRin[ifile][j];
03184                 }
03185         if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
03186         PAT.vRin[ifile] = tmp3;
03187         for(j=0; j<ntot; j++)
03188                 {
03189                 PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
03190                 PAT.vRin[ifile][j*(nin+1)] = 1;
03191                 }
03192                 
03193         tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
03194         if(tmp2 == 0) return -111;              
03195         for(j=0; j<PAT.Npat[ifile]; j++) 
03196                 {
03197                 tmp2[j] = PAT.Rans[ifile][j];
03198                 }
03199         for(j=PAT.Npat[ifile];j<ntot;j++)
03200                 {
03201                 tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
03202                 if(tmp2[j] == 0) return -111;
03203                 }
03204         if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
03205         PAT.Rans[ifile] = tmp2; 
03206         PAT.Npat[ifile] = ntot;
03207         PatMemory[ifile] = 1;           
03208         
03209 /* indices */   
03210         if(ifile==0)
03211                 {
03212                 free(ExamplesIndex);    
03213                 ExamplesIndex = (int *) malloc(ntot*sizeof(int));
03214                 if(ExamplesIndex == 0) return -111;
03215                 for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
03216                 }
03217         }
03218                 
03219         return 0;
03220 } 
03221 
03222 
03223 /***********************************************************/
03224 /* FreePatterns                                            */
03225 /*                                                         */
03226 /* frees memory for the examples                           */
03227 /*                                                         */
03228 /* input :      int ifile = file number (0 or 1)           */
03229 /*                                                         */
03230 /* return value (int) = error code: 0 = no error           */
03231 /*                                  1 = wrong file number  */
03232 /*                                  2 = no mem allocated   */
03233 /*                                                         */
03234 /* Author: J.Schwindling   26-Apr-99                       */
03235 /***********************************************************/   
03236 
03237 /* extern "C"Dllexport */int FreePatterns(int ifile)
03238 {
03239         int i;
03240 
03241         if(ifile>1 || ifile<0) return 1;
03242 /*      printf("%d %d \n",ifile,PatMemory[ifile]);*/
03243         if(PatMemory[ifile]==0) return 2;
03244         
03245         free(PAT.Pond[ifile]);
03246         for(i=0; i<PAT.Npat[ifile]; i++)
03247                 {
03248 /*              free(PAT.Rin[ifile][i]); */
03249                 free(PAT.Rans[ifile][i]); 
03250                 }
03251         free(PAT.Rin[ifile]);
03252         free(PAT.Rans[ifile]);
03253         free(PAT.vRin[ifile]);
03254         PatMemory[ifile] = 0;
03255         PAT.Npat[ifile] = 0;
03256         
03257         return 0;
03258 }               
03259 
03260 
03261 /***********************************************************/
03262 /* MLP_StatInputs                                          */
03263 /*                                                         */
03264 /* compute statistics about the inputs: mean, RMS, min, max*/
03265 /*                                                         */
03266 /* inputs:      int Nexamples = number of examples         */
03267 /*              int Niputs = number of quantities          */
03268 /*              type_pat **inputs = input values           */
03269 /*                                                         */
03270 /* outputs:     dbl *mean = mean value                     */
03271 /*              dbl *sigma = RMS                           */
03272 /*              dbl *minimum = minimum                     */
03273 /*              dbl *maximum = maximum                     */
03274 /*                                                         */
03275 /* return value (int):  always = 0                         */  
03276 /*                                                         */
03277 /* Author: J.Schwindling   11-Oct-99                       */
03278 /***********************************************************/   
03279 
03280 int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, 
03281                 dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)      
03282 {
03283         dbl *fmean;
03284         int j, ipat, nmax;
03285         
03286 /* first compute a fast rough mean using the first 100 events */
03287         fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
03288         nmax = 100;
03289         if(Nexamples<100) nmax=Nexamples;
03290         
03291         for(j=0;j<Ninputs;j++)
03292                 {
03293                 fmean[j] = 0;
03294                 for(ipat=0;ipat<nmax;ipat++)
03295                         {
03296                         fmean[j] += (dbl) inputs[ipat][j];
03297                         }
03298                 fmean[j] = fmean[j]/(dbl) nmax;
03299                 
03300 /* now compute real mean and sigma, min and max */              
03301                 mean[j] = 0;
03302                 sigma[j] = 0;
03303                 minimum[j] = 99999;
03304                 maximum[j] = -99999;
03305                 for(ipat=0;ipat<Nexamples;ipat++)
03306                         {
03307                         mean[j] += (dbl) inputs[ipat][j];
03308                         sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
03309                                     ((dbl) inputs[ipat][j]-fmean[j]);
03310                         if((dbl) inputs[ipat][j] > maximum[j]) 
03311                                 maximum[j]=(dbl) inputs[ipat][j];           
03312                         if((dbl) inputs[ipat][j] < minimum[j]) 
03313                                 minimum[j]=(dbl) inputs[ipat][j];           
03314                         }
03315                 mean[j] = mean[j]/(dbl) Nexamples;
03316                 sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples - 
03317                                 (mean[j]-fmean[j])*
03318                                 (mean[j]-fmean[j]));    
03319                 }
03320         free(fmean);
03321         return 0;
03322 }
03323 
03324 /***********************************************************/
03325 /* MLP_PrintInputStat                                      */
03326 /*                                                         */
03327 /* prints statistics about the inputs: mean, RMS, min, max */
03328 /*                                                         */
03329 /* return value (int) = error code: 0 = OK                 */
03330 /*                                  -111 = could not       */
03331 /*                                         allocate memory */
03332 /*                                                         */
03333 /* Author: J.Schwindling   11-Oct-99                       */
03334 /* Modified: J.Schwindling 31-Jan-2000: return value       */
03335 /***********************************************************/   
03336 
03337 int MLP_PrintInputStat()
03338 {
03339         int j;
03340         dbl *mean, *sigma, *minimum, *maximum;
03341 
03342 /* allocate memory */
03343         mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03344         sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03345         minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03346         maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03347 
03348         if(mean == 0 || sigma == 0 || minimum == 0
03349            || maximum == 0) return -111;
03350 
03351         MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
03352                         mean,sigma,minimum,maximum);
03353 
03354         printf("\t mean \t\t RMS \t\t min \t\t max\n");
03355         for(j=0;j<NET.Nneur[0];j++)
03356                 {
03357                 printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
03358                                         mean[j],sigma[j],minimum[j],maximum[j]);
03359                 }
03360         
03361         free(mean);
03362         free(sigma);
03363         free(minimum);
03364         free(maximum);  
03365         printf("\n");
03366         return 0;
03367 }
03368 
03369 
03370 /***********************************************************/
03371 /* NormalizeInputs                                         */
03372 /*                                                         */
03373 /* normalize the inputs: I' = (I - <I>) / RMS(I)           */
03374 /*                                                         */
03375 /* return value (int) = error code: 0 = OK                 */
03376 /*                                  -111 = could not       */
03377 /*                                         allocate memory */
03378 /*                                                         */
03379 /* Author: J.Schwindling   04-May-1999                     */
03380 /* Modified: J.Schwindling 31-Jan-2000: return value       */
03381 /***********************************************************/   
03382 
03383 /* extern "C"Dllexport */int NormalizeInputs()
03384 {
03385         int j, ipat;
03386         dbl *mean, *sigma, *minimum, *maximum;
03387 
03388 /* allocate memory */
03389         mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03390         sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03391         STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03392         STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03393         minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03394         maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
03395         
03396         if(mean == 0 || sigma == 0 || minimum == 0
03397            || maximum == 0 || STAT.mean == 0 ||
03398            STAT.sigma == 0) return -111;
03399 
03400         MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
03401                         mean,sigma,minimum,maximum);
03402 
03403         if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
03404         for(j=0;j<NET.Nneur[0];j++)
03405                 {
03406                 if(NET.Debug>=1)
03407                         printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
03408                                         mean[j],sigma[j],minimum[j],maximum[j]);
03409                 
03410 /* store mean and sigma for output function */          
03411                 STAT.mean[j] = mean[j];                 
03412                 STAT.sigma[j] = sigma[j];
03413                                         
03414 /* finally apply the normalization */
03415                 for(ipat=0;ipat<PAT.Npat[0];ipat++)
03416                         {
03417                         PAT.Rin[0][ipat][j] =
03418                         (PAT.Rin[0][ipat][j]-(float) mean[j])/
03419                         (float) sigma[j];
03420                         }       
03421                 for(ipat=0;ipat<PAT.Npat[1];ipat++)
03422                         {
03423                         PAT.Rin[1][ipat][j] =
03424                         (PAT.Rin[1][ipat][j]-(float) mean[j])/
03425                         (float) sigma[j];
03426                         }       
03427                 }
03428         
03429         free(mean);
03430         free(sigma);
03431         free(minimum);
03432         free(maximum);  
03433         if(NET.Debug>=1) printf("\n");
03434         return 0;
03435 }
03436 
03437 
03438 /***********************************************************/
03439 /* AllocNetwork                                            */
03440 /*                                                         */
03441 /* memory allocation for weights, etc                      */
03442 /*                                                         */
03443 /* inputs:      int Nlayer: number of layers               */
03444 /*              int *Neurons: nulber of neurons per layer  */
03445 /*                                                         */
03446 /* return value (int): error = 0: no error                 */
03447 /*                           = -111: could not allocate mem*/ 
03448 /*                                                         */
03449 /* Author: J.Schwindling   28-Sep-99                       */
03450 /***********************************************************/   
03451 
03452 int AllocNetwork(int Nlayer, int *Neurons)
03453 {
03454         int i, j, k, l;
03455         
03456         if(NetMemory != 0) FreeNetwork();
03457         NetMemory = 1;
03458         
03459         NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
03460         if(NET.Nneur == 0) return -111;
03461         
03462         NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
03463         NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
03464         NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
03465         NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
03466         NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
03467         if(NET.T_func == 0 || NET.Deriv1 == 0
03468                 || NET.Inn == 0 || NET.Outn == 0
03469                 || NET.Delta == 0) return -111;
03470         
03471         for(i=0; i<Nlayer; i++)
03472                 {
03473                 NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
03474                 NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03475                 NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03476                 NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03477                 NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
03478                 if(NET.T_func[i] == 0 || NET.Deriv1[i] == 0 
03479                         || NET.Inn[i] == 0 || NET.Outn[i] == 0
03480                         || NET.Delta[i] ==0 ) return -111;
03481                 }
03482                 
03483         NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03484         NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
03485         LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03486         LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03487         LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
03488         if(NET.Weights == 0 || NET.vWeights == 0 
03489           || LEARN.Odw == 0 || LEARN.ODeDw == 0
03490           || LEARN.DeDw == 0)  return -111;
03491           
03492         for(i=1; i<Nlayer; i++)
03493                 {
03494                 k = Neurons[i-1]+1;
03495                 NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
03496                                                 sizeof(dbl));
03497                 NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03498                 LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03499                 LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03500                 LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
03501                 if(NET.Weights[i] == 0 || NET.vWeights[i] == 0 
03502                   || LEARN.Odw[i] == 0 || LEARN.ODeDw[i] == 0
03503                   || LEARN.DeDw[i] == 0)  return -111;
03504                   
03505                 for(j=0; j<Neurons[i]; j++)
03506                         {
03507                         NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
03508                         LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
03509                         LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
03510                         LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
03511                         if(LEARN.Odw[i][j] == 0 
03512                           || LEARN.ODeDw[i][j] == 0
03513                           || LEARN.DeDw[i][j] == 0)  return -111;
03514                         
03515                         for(l=0; l<k; l++)
03516                                 {
03517                                 LEARN.Odw[i][j][l] = 0;
03518                                 LEARN.ODeDw[i][j][l] = 0;
03519                                 }
03520                         }
03521                 }
03522         return 0;       
03523 }
03524 
03525 
03526 /***********************************************************/
03527 /* FreeNetwork                                             */
03528 /*                                                         */
03529 /* frees the memory allocated for the network              */
03530 /*                                                         */
03531 /* Author: J.Schwindling   06-Oct-99                       */
03532 /***********************************************************/   
03533 
03534 void FreeNetwork()
03535 {
03536         int i, j;
03537         for(i=1; i<NET.Nlayer; i++)
03538                 {
03539                 for(j=0; j<NET.Nneur[i]; j++)
03540                         {
03541 /*                      free(NET.Weights[i][j]); */
03542                         free(LEARN.Odw[i][j]);
03543                         free(LEARN.ODeDw[i][j]);
03544                         free(LEARN.DeDw[i][j]);
03545                         }
03546                 free(NET.vWeights[i]);
03547                 free(NET.Weights[i]);
03548                 free(LEARN.Odw[i]);
03549                 free(LEARN.ODeDw[i]);
03550                 free(LEARN.DeDw[i]);
03551                 }
03552         free(NET.Weights);
03553         free(LEARN.Odw);
03554         free(LEARN.ODeDw);
03555         free(LEARN.DeDw);       
03556         
03557         free(NET.Nneur);
03558         
03559         for(i=0; i<NET.Nlayer; i++)
03560                 {
03561                 free(NET.T_func[i]);
03562                 free(NET.Deriv1[i]);
03563                 free(NET.Inn[i]);
03564                 free(NET.Outn[i]);
03565                 free(NET.Delta[i]);
03566                 }
03567         free(NET.T_func);
03568         free(NET.Deriv1);
03569         free(NET.Inn);
03570         free(NET.Outn);
03571         free(NET.Delta);
03572         
03573         NetMemory = 0;  
03574 }
03575 
03576 /***********************************************************/
03577 /* GetNetStructure                                         */
03578 /*                                                         */
03579 /* given a strinng like "3,4,1" returns the network        */
03580 /* structure                                               */
03581 /*                                                         */
03582 /* inputs:      char *s: input string                      */
03583 /*                                                         */
03584 /* outputs:     int *Nlayer: number of layers              */
03585 /*              int *Nneur: number of neurons per layer    */
03586 /*                                                         */
03587 /* return value (int): error = 0: no error                 */
03588 /*                           = -1: s is empty              */ 
03589 /*                           = -2: s is too long           */
03590 /*                           = -3: too many layers         */
03591 /*                                                         */
03592 /* Author: J.Schwindling   04-Oct-99                       */
03593 /***********************************************************/   
03594 
03595 int GetNetStructure(char *s, int *Nlayer, int *Nneur)
03596 {
03597         int i=0;
03598         char tmp[1024];
03599 
03600         if(strlen(s)==0) return -1;
03601         if(strlen(s)>1024) return -2;
03602 
03603         strcpy(tmp,s);
03604         if (strtok(tmp,","))
03605                 {
03606                 i=1;
03607                 while (strtok(NULL,",")) i++;
03608                 }
03609         *Nlayer = i;
03610         if(i > NLMAX) return -3;
03611 
03612         strcpy(tmp,s);
03613         if (*Nlayer>0)
03614                 {
03615                 sscanf(strtok(tmp,","),"%d",&(Nneur[0]));
03616                 for (i=1;i<*Nlayer;i++)
03617                         sscanf(strtok(NULL,","),"%d",&(Nneur[i]));
03618                 }
03619 
03620         return 0;
03621 }
03622 
03623 
03624 /***********************************************************/
03625 /* MLP_SetNet                                              */
03626 /*                                                         */
03627 /* to set the structure of a neural network                */
03628 /* inputs:     int *nl = number of layers                  */
03629 /*             int *nn = number of neurons                 */
03630 /*                                                         */
03631 /* return value (int) = error value:                       */
03632 /*                              0: no error                */ 
03633 /*                              1: N layers > NLMAX        */ 
03634 /*                              2: N layers < 2            */ 
03635 /*                           -111: Error allocating memory */ 
03636 /*                                                         */
03637 /* Author: J.Schwindling   14-Apr-99                       */
03638 /* Modified: J.Schwindling 05-Oct-99 allocate memory       */
03639 /* Modified: J.Schwindling 29-Nov-99 LearnFree, LearnAlloc */
03640 /***********************************************************/   
03641                                                 
03642 int MLP_SetNet(int *nl, int *nn)
03643 {
03644     int il,ierr;
03645 
03646     if((*nl)>NLMAX) return(1);
03647     if((*nl)<2) return(2);
03648     
03649 /*    LearnFree(); */
03650 /* allocate memory */      
03651     ierr = AllocNetwork(*nl,nn);
03652     if(ierr != 0) return ierr;
03653       
03654 /* set number of layers */               
03655     NET.Nlayer = (int) *nl;
03656 
03657 /* set number of neurons */               
03658     for(il=0; il<NET.Nlayer; il++) {
03659        NET.Nneur[il] = nn[il];
03660       }
03661 
03662 /* set transfer functions */      
03663     SetDefaultFuncs();
03664 /*    LearnAlloc(); */
03665       
03666     return(0); 
03667 }
03668 
03669 
03670 /***********************************************************/
03671 /* MLP_MatrixVectorBias                                    */
03672 /*                                                         */
03673 /* computes a Matrix-Vector product                        */
03674 /* r[j] = M[j][0] + Sum_i M[j][i] v[i]                     */ 
03675 /*                                                         */
03676 /* inputs:     dbl *M = matrix (n lines, m+1 columns)      */
03677 /*             dbl *v = vector (dimension m)               */
03678 /*             dbl *r = resulting vector (dimension n)     */
03679 /*             int n                                       */
03680 /*             int m                                       */ 
03681 /*                                                         */
03682 /* Author: J.Schwindling   24-Jan-00                       */
03683 /***********************************************************/
03684    
03685 void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
03686 {
03687         int i,j;
03688         register dbl a1, a2, a3, a4, c, d;
03689         dbl *pM1 = M;
03690         dbl *pM2 = &(M[m+1]);
03691         dbl *pM3 = &(M[2*(m+1)]);
03692         dbl *pM4 = &(M[3*(m+1)]);
03693         dbl *pr = r;
03694         int mp1 = m+1;
03695         
03696         for(i=0; i<n-3; 
03697                 i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1, 
03698                 pr+=4)
03699                 {
03700                 a1 = *pM1;
03701                 a2 = *pM2;
03702                 a3 = *pM3;
03703                 a4 = *pM4;
03704                 pM1++; pM2++; pM3++; pM4++;
03705                 for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2) 
03706                         {
03707                         c = v[j];
03708                         d = v[j+1];
03709                         a1 = a1 + *pM1 * c + *(pM1+1) * d;
03710                         a2 = a2 + *pM2 * c + *(pM2+1) * d; 
03711                         a3 = a3 + *pM3 * c + *(pM3+1) * d;
03712                         a4 = a4 + *pM4 * c + *(pM4+1) * d; 
03713                         }
03714                 for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
03715                         {
03716                         c = v[j];
03717                         a1 = a1 + *pM1 * c;
03718                         a2 = a2 + *pM2 * c; 
03719                         a3 = a3 + *pM3 * c;
03720                         a4 = a4 + *pM4 * c; 
03721                         }       
03722                 *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
03723                 }
03724         for(i=i; i<n; i++)
03725                 {
03726                 pM1 = &(M[i*(m+1)]);
03727                 a1 = *pM1;
03728                 pM1++;
03729                 for(j=0; j<m; j++, pM1++)
03730                         {
03731                         a1 = a1 + *pM1 * v[j];
03732                         }
03733                 r[i] = a1;      
03734                 }       
03735 }
03736 /***********************************************************/
03737 /* MLP_MatrixVector                                        */
03738 /*                                                         */
03739 /* computes a Matrix-Vector product                        */
03740 /* r[j] = Sum_i M[j][i] v[i]                               */ 
03741 /*                                                         */
03742 /* inputs:     dbl *M = matrix (n lines, m+1 columns)      */
03743 /*             dbl *v = vector (dimension m)               */
03744 /*             dbl *r = resulting vector (dimension n)     */
03745 /*             int n                                       */
03746 /*             int m                                       */ 
03747 /*                                                         */
03748 /* Author: J.Schwindling   24-Jan-00                       */
03749 /***********************************************************/
03750    
03751 void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
03752 {
03753         int i,j;
03754         register dbl a1, a2, a3, a4, c, d;
03755         dbl *pM1 = M;
03756         dbl *pM2 = &(M[m]);
03757         dbl *pM3 = &(M[2*m]);
03758         dbl *pM4 = &(M[3*m]);
03759         dbl *pr = r;
03760         int mp1 = m;
03761         
03762         for(i=0; i<n-3; 
03763                 i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1, 
03764                 pr+=4)
03765                 {
03766                 a1 = 0;
03767                 a2 = 0;
03768                 a3 = 0;
03769                 a4 = 0;
03770                 for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2) 
03771                         {
03772                         c = v[j];
03773                         d = v[j+1];
03774                         a1 = a1 + *pM1 * c + *(pM1+1) * d;
03775                         a2 = a2 + *pM2 * c + *(pM2+1) * d; 
03776                         a3 = a3 + *pM3 * c + *(pM3+1) * d;
03777                         a4 = a4 + *pM4 * c + *(pM4+1) * d; 
03778                         }
03779                 for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
03780                         {
03781                         c = v[j];
03782                         a1 = a1 + *pM1 * c;
03783                         a2 = a2 + *pM2 * c; 
03784                         a3 = a3 + *pM3 * c;
03785                         a4 = a4 + *pM4 * c; 
03786                         }       
03787                 *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
03788                 }
03789         for(i=i; i<n; i++)
03790                 {
03791                 pM1 = &(M[i*m]);
03792                 a1 = 0;
03793                 for(j=0; j<m; j++, pM1++)
03794                         {
03795                         a1 = a1 + *pM1 * v[j];
03796                         }
03797                 r[i] = a1;      
03798                 }       
03799 }
03800 
03801 
03802 /***********************************************************/
03803 /* MLP_MM2rows                                             */
03804 /*                                                         */
03805 /* computes a Matrix-Matrix product, with the first matrix */
03806 /* having 2 rows                                           */
03807 /*                                                         */
03808 /* inputs:     dbl *c = resulting matrix (Nj * Nk)         */
03809 /*             dbl *a = first matrix (Ni * Nj)             */
03810 /*             dbl *b = second matrix (Nj * Nk)            */
03811 /*             int Ni                                      */
03812 /*             int Nj                                      */ 
03813 /*             int Nk                                      */ 
03814 /*             int NaOffs                                  */ 
03815 /*             int NbOffs                                  */ 
03816 /*                                                         */
03817 /* Author: J.Schwindling   24-Jan-00                       */
03818 /***********************************************************/
03819    
03820 void MLP_MM2rows(dbl* c, type_pat* a, dbl* b,
03821              int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
03822 {
03823 //int i,j,k;
03824 int j,k;
03825 dbl s00,s01,s10,s11;
03826 type_pat *pa0,*pa1;
03827 dbl *pb0,*pb1,*pc0,*pc1;
03828 
03829   for (j=0; j<=Nj-2; j+=2)
03830    {
03831     pc0 = c+j;
03832     pc1 = c+j+Nj;
03833     s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
03834 
03835     for (k=0,pb0=b+k+NbOffs*j,
03836              pb1=b+k+NbOffs*(j+1),
03837              pa0=a+k,
03838              pa1=a+k+NaOffs;
03839          k<Nk;
03840          k++,pa0++,
03841              pa1++,
03842              pb0++,
03843              pb1++)
03844      {
03845       s00 += (*pa0)*(*pb0);
03846       s01 += (*pa0)*(*pb1);
03847       s10 += (*pa1)*(*pb0);
03848       s11 += (*pa1)*(*pb1);
03849      }
03850     *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
03851    }
03852   for (j=j; j<Nj; j++)
03853    {
03854     pc0 = c+j;
03855     pc1 = c+j+Nj;
03856     s00 = 0.0; s10 = 0.0;
03857     for (k=0,pb0=b+k+NbOffs*j,
03858              pa0=a+k,
03859              pa1=a+k+NaOffs;
03860          k<Nk;
03861          k++,pa0++,
03862              pa1++,
03863              pb0++)
03864      {
03865       s00 += (*pa0)*(*pb0);
03866       s10 += (*pa1)*(*pb0);
03867      }
03868     *pc0 = s00; *pc1 = s10;
03869    }
03870 }
03871 
03872 #ifdef __cplusplus
03873 } // extern "C"
03874 #endif