CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Macros | Functions | Variables
mlp_gen.cc File Reference
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include "mlp_gen.h"
#include "mlp_sigmoide.h"

Go to the source code of this file.

Macros

#define CLEN   1024
 
#define NLMAX   1000
 
#define NPMAX   100000
 

Functions

int AllocNetwork (int Nlayer, int *Neurons)
 
int AllocPatterns (int ifile, int npat, int nin, int nout, int iadd)
 
void BFGSdir (int Nweights)
 
void CGDir (dbl beta)
 
int CountLexemes (char *s)
 
int DecreaseSearch (dbl *alpmin, int *Ntest, dbl Err0)
 
dbl DeDwNorm ()
 
dbl DeDwProd ()
 
void DeDwSave ()
 
void DeDwSaveZero ()
 
void DeDwScale (int Nexamples)
 
int DeDwSum (type_pat *ans, dbl *out, int ipat)
 
void DeDwZero ()
 
dbl DerivDir ()
 
int dgels_ (char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info)
 
void EtaDecay ()
 
int FixedStep (dbl alpha)
 
void FreeNetwork ()
 
int FreePatterns (int ifile)
 
int GetBFGSH (int Nweights)
 
void GetGammaDelta ()
 
void getLexemes (char *s, char **ss)
 
int GetNetStructure (char *s, int *Nlayer, int *Nneur)
 
void getnLexemes (int n, char *s, char **ss)
 
void InitBFGSH (int Nweights)
 
void InitWeights ()
 
int LearnAlloc ()
 
void LearnFree ()
 
int LineSearch (dbl *alpmin, int *Ntest, dbl Err0)
 
int LineSearchHyb (dbl *alpmin, int *Ntest)
 
int LoadWeights (char *filename, int *iepoch)
 
dbl MLP_Epoch (int iepoch, dbl *alpmin, int *Ntest)
 
void MLP_Line (dbl ***w0, dbl alpha)
 
void MLP_LineHyb (dbl ***w0, dbl alpha)
 
void MLP_MatrixVector (dbl *M, type_pat *v, dbl *r, int n, int m)
 
void MLP_MatrixVectorBias (dbl *M, dbl *v, dbl *r, int n, int m)
 
void MLP_MM2rows (dbl *c, type_pat *a, dbl *b, int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
 
void MLP_Out (type_pat *rrin, dbl *rrout)
 
void MLP_Out2 (type_pat *rrin)
 
void MLP_Out_T (type_pat *rrin)
 
int MLP_PrCFun (char *filename)
 
int MLP_PrFFun (char *filename)
 
int MLP_PrintInputStat ()
 
double MLP_Rand (dbl mini, dbl maxi)
 
void MLP_ResLin ()
 
int MLP_SetNet (int *nl, int *nn)
 
int MLP_StatInputs (int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
 
dbl MLP_Stochastic ()
 
dbl MLP_Test (int ifile, int regul)
 
dbl MLP_Test_MM (int ifile, dbl *tmp)
 
int MLP_Train (int *ipat, dbl *err)
 
int NormalizeInputs ()
 
void PrintWeights ()
 
int ReadPatterns (char *filename, int ifile, int *inet, int *ilearn, int *iexamples)
 
int SaveWeights (char *filename, int iepoch)
 
void SetDefaultFuncs ()
 
void SetLambda (double Wmax)
 
int SetTransFunc (int layer, int neuron, int func)
 
int ShuffleExamples (int n, int *index)
 
void SteepestDir ()
 
int StochStep ()
 
int StochStepHyb ()
 

Variables

dbl ** BFGSH
 
int BFGSMemory = 0
 
dbldelta
 
dbl *** dir
 
int * ExamplesIndex
 
int ExamplesMemory = 0
 
dblGamma
 
dbl ** Hessian
 
dbl ** JacobianMatrix
 
int JacobianMemory = 0
 
dbl LastAlpha = 0
 
int LearnMemory = 0
 
int MessLang = 0
 
struct net_ net_ MLP_HIDDEN
 
float MLPfitVersion = (float) 1.40
 
int NetMemory = 0
 
int NLineSearchFail = 0
 
int OutputWeights = 100
 
int PatMemory [2] = {0,0}
 
int WeightsMemory = 0
 

Macro Definition Documentation

#define CLEN   1024

Definition at line 2226 of file mlp_gen.cc.

Referenced by ReadPatterns().

#define NLMAX   1000

Definition at line 14 of file mlp_gen.cc.

Referenced by GetNetStructure(), MLP_SetNet(), ReadPatterns(), and SetTransFunc().

#define NPMAX   100000

Definition at line 13 of file mlp_gen.cc.

Function Documentation

int AllocNetwork ( int  Nlayer,
int *  Neurons 
)

Definition at line 3449 of file mlp_gen.cc.

References FreeNetwork(), i, j, relval_2017::k, cmsLHEtoEOSManager::l, LEARN, and NET.

Referenced by MLP_SetNet().

3450 {
3451  int i, j, k, l;
3452 
3453  if(NetMemory != 0) FreeNetwork();
3454  NetMemory = 1;
3455 
3456  NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
3457  if(NET.Nneur == 0) return -111;
3458 
3459  NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
3460  NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
3461  NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3462  NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3463  NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
3464  if(NET.T_func == 0 || NET.Deriv1 == 0
3465  || NET.Inn == 0 || NET.Outn == 0
3466  || NET.Delta == 0) return -111;
3467 
3468  for(i=0; i<Nlayer; i++)
3469  {
3470  NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
3471  NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3472  NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3473  NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3474  NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3475  if(NET.T_func[i] == 0 || NET.Deriv1[i] == 0
3476  || NET.Inn[i] == 0 || NET.Outn[i] == 0
3477  || NET.Delta[i] ==0 ) return -111;
3478  }
3479 
3480  NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3481  NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
3482  LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3483  LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3484  LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3485  if(NET.Weights == 0 || NET.vWeights == 0
3486  || LEARN.Odw == 0 || LEARN.ODeDw == 0
3487  || LEARN.DeDw == 0) return -111;
3488 
3489  for(i=1; i<Nlayer; i++)
3490  {
3491  k = Neurons[i-1]+1;
3492  NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
3493  sizeof(dbl));
3494  NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3495  LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3496  LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3497  LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3498  if(NET.Weights[i] == 0 || NET.vWeights[i] == 0
3499  || LEARN.Odw[i] == 0 || LEARN.ODeDw[i] == 0
3500  || LEARN.DeDw[i] == 0) return -111;
3501 
3502  for(j=0; j<Neurons[i]; j++)
3503  {
3504  NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
3505  LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3506  LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3507  LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3508  if(LEARN.Odw[i][j] == 0
3509  || LEARN.ODeDw[i][j] == 0
3510  || LEARN.DeDw[i][j] == 0) return -111;
3511 
3512  for(l=0; l<k; l++)
3513  {
3514  LEARN.Odw[i][j][l] = 0;
3515  LEARN.ODeDw[i][j][l] = 0;
3516  }
3517  }
3518  }
3519  return 0;
3520 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
int NetMemory
Definition: mlp_gen.cc:30
#define LEARN
Definition: mlp_gen.h:36
void FreeNetwork()
Definition: mlp_gen.cc:3531
double dbl
Definition: mlp_gen.h:12
int AllocPatterns ( int  ifile,
int  npat,
int  nin,
int  nout,
int  iadd 
)

Definition at line 3077 of file mlp_gen.cc.

References FreePatterns(), compare_using_db::ifile, j, PAT, and tmp.

Referenced by PhysicsTools::MLP::init(), and ReadPatterns().

3078 {
3079  int j;
3080  type_pat *tmp, *tmp3;
3081  type_pat **tmp2;
3082  int ntot;
3083 
3084  if(ifile>1 || ifile<0) return(1);
3085 /* scanf("%d",&j); */
3086  if(ExamplesMemory==0)
3087  {
3088  ExamplesMemory=1;
3089  PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
3090  PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
3091  PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
3092  PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
3093  if(PAT.Pond == 0 || PAT.Rin == 0
3094  || PAT.Rans == 0 || PAT.vRin == 0) return -111;
3095  }
3096 
3097 
3098 /* if iadd=0, check that memory not already allocated. Otherwise free it */
3099  if(iadd==0 && PatMemory[ifile]!=0)
3100  {
3102  }
3103 
3104 /* allocate memory and initialize ponderations */
3105  if(iadd==0 || PatMemory[ifile]==0)
3106  {
3107  PatMemory[ifile] = 1;
3108  PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
3109  if(PAT.Pond[ifile] == 0) return -111;
3110  for(j=0; j<npat; j++)
3111  PAT.Pond[ifile][j] = 1;
3112 
3113  PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3114  if(PAT.Rin[ifile] == 0) return -111;
3115  PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3116  if(PAT.Rans[ifile] == 0) return -111;
3117 
3118  PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
3119  sizeof(type_pat));
3120  if(PAT.vRin[ifile] == 0) return -111;
3121 
3122  for(j=0; j<npat; j++)
3123  {
3124  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3125  PAT.vRin[ifile][j*(nin+1)] = 1;
3126  }
3127  for(j=0; j<npat; j++)
3128  {
3129  PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
3130  if(PAT.Rans[ifile][j] == 0) return -111;
3131  }
3132  PAT.Npat[ifile] = npat;
3133 
3134  if(ifile==0)
3135  {
3136  ExamplesIndex = (int *) malloc(npat*sizeof(int));
3137  if(ExamplesIndex == 0) return -111;
3138  for(j=0; j<npat; j++) ExamplesIndex[j] = j;
3139  }
3140  }
3141  else /* add examples */
3142  {
3143  ntot = PAT.Npat[ifile]+npat;
3144 
3145 /* event weighting */
3146  tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
3147  if(tmp == 0) return -111;
3148 
3149  for(j=0; j<PAT.Npat[ifile]; j++)
3150  {
3151  tmp[j] = PAT.Pond[ifile][j];
3152  }
3153  for(j=PAT.Npat[ifile];j<ntot;j++)
3154  {
3155  tmp[j] = 1;
3156  }
3157  if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
3158  PAT.Pond[ifile] = tmp;
3159 
3160 /* examples */
3161 /* tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3162  for(j=0; j<PAT.Npat[ifile]; j++)
3163  {
3164  tmp2[j] = PAT.Rin[ifile][j];
3165  }
3166  for(j=PAT.Npat[ifile];j<ntot;j++)
3167  {
3168  tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
3169  }
3170  if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
3171  PAT.Rin[ifile] = tmp2; */
3172 
3173  tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
3174  if(tmp3 == 0) return -111;
3175 
3176  for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
3177  {
3178  tmp3[j] = PAT.vRin[ifile][j];
3179  }
3180  if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
3181  PAT.vRin[ifile] = tmp3;
3182  for(j=0; j<ntot; j++)
3183  {
3184  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3185  PAT.vRin[ifile][j*(nin+1)] = 1;
3186  }
3187 
3188  tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3189  if(tmp2 == 0) return -111;
3190  for(j=0; j<PAT.Npat[ifile]; j++)
3191  {
3192  tmp2[j] = PAT.Rans[ifile][j];
3193  }
3194  for(j=PAT.Npat[ifile];j<ntot;j++)
3195  {
3196  tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
3197  if(tmp2[j] == 0) return -111;
3198  }
3199  if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
3200  PAT.Rans[ifile] = tmp2;
3201  PAT.Npat[ifile] = ntot;
3202  PatMemory[ifile] = 1;
3203 
3204 /* indices */
3205  if(ifile==0)
3206  {
3207  free(ExamplesIndex);
3208  ExamplesIndex = (int *) malloc(ntot*sizeof(int));
3209  if(ExamplesIndex == 0) return -111;
3210  for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
3211  }
3212  }
3213 
3214  return 0;
3215 }
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
int nin
double type_pat
Definition: mlp_gen.h:13
int j
Definition: DBlmapReader.cc:9
int PatMemory[2]
Definition: mlp_gen.cc:26
int ExamplesMemory
Definition: mlp_gen.cc:24
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int nout
double dbl
Definition: mlp_gen.h:12
int FreePatterns(int ifile)
Definition: mlp_gen.cc:3232
void BFGSdir ( int  Nweights)

Definition at line 1340 of file mlp_gen.cc.

References g, i, recoMuon::in, j, GetRecoTauVFromDQM_MC_cff::kk, LEARN, NET, and alignCSCRings::s.

Referenced by MLP_Epoch().

1341 {
1342  dbl *g, *s;
1343  int kk=0;
1344  int il,i,j,in,jn;
1345 
1346  g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
1347  s = (dbl*) malloc(Nweights*sizeof(dbl));
1348 
1349  for(il=1; kk<Nweights; il++)
1350  for(in=0; in<NET.Nneur[il]; in++)
1351  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1352  {
1353  g[kk] = LEARN.DeDw[il][in][jn];
1354  kk++;
1355  }
1356  for(i=0; i<Nweights; i++)
1357  {
1358  s[i] = 0;
1359  for(j=0; j<Nweights; j++)
1360  {
1361  s[i] += BFGSH[i][j] * g[j];
1362  }
1363  }
1364 
1365  kk = 0;
1366  for(il=1; kk<Nweights; il++)
1367  for(in=0; in<NET.Nneur[il]; in++)
1368  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1369  {
1370  dir[il][in][jn] = -s[kk];
1371  kk++;
1372  }
1373  free(g);
1374  free(s);
1375 }
int i
Definition: DBlmapReader.cc:9
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e g
Definition: Activities.doc:4
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void CGDir ( dbl  beta)

Definition at line 1266 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1267 {
1268  int il,in,jn;
1269  for(il=1; il<NET.Nlayer; il++)
1270  for(in=0; in<NET.Nneur[il]; in++)
1271  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1272  {
1273  dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
1274  beta*dir[il][in][jn];
1275  }
1276 }
const double beta
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl *** dir
Definition: mlp_gen.cc:35
int CountLexemes ( char *  s)

Definition at line 2585 of file mlp_gen.cc.

References i, NULL, and tmp.

Referenced by getLexemes(), and ReadPatterns().

2586 {
2587  char tmp[1024];
2588  int i=0;
2589 
2590  strcpy(tmp,s);
2591  if (strtok(tmp," "))
2592  {
2593  i=1;
2594  while (strtok(NULL," ")) i++;
2595  }
2596  return i;
2597 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int DecreaseSearch ( dbl alpmin,
int *  Ntest,
dbl  Err0 
)

Definition at line 1616 of file mlp_gen.cc.

References recoMuon::in, LEARN, MLP_Line(), MLP_Test(), NET, and metsig::tau.

1617 {
1618  dbl ***w0;
1619  dbl alpha2;
1620  dbl err1, err2;
1621  dbl tau;
1622  int icount, il, in, jn;
1623 
1624  tau=LEARN.Tau;
1625 
1626 /* store weights before line search */
1627 
1628  *Ntest = 0;
1629  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1630  for(il=1; il<NET.Nlayer; il++)
1631  {
1632  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1633  for(in=0; in<NET.Nneur[il]; in++)
1634  {
1635  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1636  sizeof(dbl));
1637  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1638  {
1639  w0[il][in][jn] = NET.Weights[il][in][jn];
1640  }
1641  }
1642  }
1643 
1644 /* compute error(w0) */
1645 
1646 /* err1 = MLP_Test(0,0);
1647  (*Ntest) ++;*/
1648  err1 = Err0;
1649 
1650  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1651 
1652  *alpmin = 0;
1653  alpha2 = 0.05;
1654  MLP_Line(w0,alpha2);
1655  err2 = MLP_Test(0,0);
1656  (*Ntest) ++;
1657 
1658  if(err2<err1)
1659  {
1660  *alpmin = alpha2;
1661  }
1662  else
1663  {
1664 
1665 
1666  for(icount=1;icount<=100;icount++)
1667  {
1668  alpha2 = alpha2/tau;
1669  MLP_Line(w0,alpha2);
1670  err2 = MLP_Test(0,0);
1671  (*Ntest) ++;
1672  if(err1>err2) break;
1673  }
1674  if(icount>=100) /* line search fails */
1675  {
1676  MLP_Line(w0,0); /* reset weights */
1677  free(w0);
1678  return(1);
1679  }
1680  *alpmin = alpha2;
1681  }
1682 
1683 /* set the weights */
1684  MLP_Line(w0,*alpmin);
1685 
1686 /* store weight changes */
1687  for(il=1; il<NET.Nlayer; il++)
1688  for(in=0; in<NET.Nneur[il]; in++)
1689  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1690  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1691  - w0[il][in][jn];
1692 
1693  for(il=1; il<NET.Nlayer; il++)
1694  for(in=0; in<NET.Nneur[il]; in++)
1695  free(w0[il][in]);
1696  for(il=1; il<NET.Nlayer; il++)
1697  free(w0[il]);
1698  free(w0);
1699 
1700  return(0);
1701 }
#define NET
Definition: mlp_gen.h:25
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
dbl DeDwNorm ( )

Definition at line 998 of file mlp_gen.cc.

References createTree::dd, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

999 {
1000  int il,in,jn;
1001  dbl dd=0;
1002  for(il=1; il<NET.Nlayer; il++)
1003  for(in=0; in<NET.Nneur[il]; in++)
1004  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1005  dd += LEARN.DeDw[il][in][jn]*
1006  LEARN.DeDw[il][in][jn];
1007  return(dd);
1008 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl DeDwProd ( )

Definition at line 1020 of file mlp_gen.cc.

References createTree::dd, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1021 {
1022  int il,in,jn;
1023  dbl dd=0;
1024  for(il=1; il<NET.Nlayer; il++)
1025  for(in=0; in<NET.Nneur[il]; in++)
1026  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1027  dd += LEARN.DeDw[il][in][jn]*
1028  LEARN.ODeDw[il][in][jn];
1029  return(dd);
1030 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
void DeDwSave ( )

Definition at line 1078 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

1079 {
1080  int il, in, jn;
1081  for(il=1; il<NET.Nlayer; il++)
1082  for(in=0; in<NET.Nneur[il]; in++)
1083  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1084  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1085 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
void DeDwSaveZero ( )

Definition at line 1097 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1098 {
1099  int il, in, jn;
1100  for(il=1; il<NET.Nlayer; il++)
1101  for(in=0; in<NET.Nneur[il]; in++)
1102  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1103  {
1104  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1105  LEARN.DeDw[il][in][jn] = 0;
1106  }
1107 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
void DeDwScale ( int  Nexamples)

Definition at line 1060 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1061 {
1062  int il, in, jn;
1063  for(il=1; il<NET.Nlayer; il++)
1064  for(in=0; in<NET.Nneur[il]; in++)
1065  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1066  LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
1067 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
int DeDwSum ( type_pat ans,
dbl out,
int  ipat 
)

Definition at line 1123 of file mlp_gen.cc.

References a, b, cuy::ii, recoMuon::in, LEARN, NET, and PAT.

Referenced by MLP_Train().

1124 {
1125  int il, in1, in, ii;
1126 /* dbl err[NMAX][4]; */
1127  dbl deriv;
1128  dbl *pout, *pdedw, *pdelta;
1129  register dbl a, b;
1130 /* char buf[50];*/
1131 
1132 /* output layer */
1133  b = (dbl) PAT.Pond[0][ipat];
1134  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
1135  {
1136  deriv = NET.Deriv1[NET.Nlayer-1][in];
1137  NET.Delta[NET.Nlayer-1][in] =
1138  (out[in] - (dbl) ans[in])*deriv*b;
1139  }
1140 
1141  for(il=NET.Nlayer-2; il>0; il--)
1142  {
1143 
1144  for(in=0; in<NET.Nneur[il]; in++)
1145  {
1146  deriv = NET.Deriv1[il][in];
1147  a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
1148  pdelta = &(NET.Delta[il+1][1]);
1149  for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++)
1150  {
1151  a += *pdelta * NET.Weights[il+1][in1][in+1];
1152  }
1153  NET.Delta[il][in] = a * deriv;
1154  }
1155  }
1156 
1157  for(il=1; il<NET.Nlayer; il++)
1158  {
1159  ii = NET.Nneur[il-1];
1160  for(in=0; in<NET.Nneur[il]; in++)
1161  {
1162  a = NET.Delta[il][in];
1163  LEARN.DeDw[il][in][0] += a;
1164  LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
1165  pout = &(NET.Outn[il-1][1]);
1166  pdedw = &(LEARN.DeDw[il][in][2]);
1167  for(in1=1; in1<ii; ++in1, ++pout, ++pdedw)
1168  {
1169  (*pdedw) += a * (*pout);
1170  }
1171  }
1172  }
1173 
1174  return(0);
1175 }
int ii
Definition: cuy.py:588
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void DeDwZero ( )

Definition at line 1041 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

1042 {
1043  int il, in, jn;
1044  for(il=1; il<NET.Nlayer; il++)
1045  for(in=0; in<NET.Nneur[il]; in++)
1046  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1047  LEARN.DeDw[il][in][jn] = 0;
1048 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl DerivDir ( )

Definition at line 1288 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1289 {
1290  int il,in,jn;
1291  dbl ddir = 0;
1292 
1293  for(il=1; il<NET.Nlayer; il++)
1294  for(in=0; in<NET.Nneur[il]; in++)
1295  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1296  {
1297  ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
1298  }
1299  return(ddir);
1300 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
int dgels_ ( char *  trans,
int *  m,
int *  n,
int *  nrhs,
double *  a,
int *  lda,
double *  b,
int *  ldb,
double *  work,
int *  lwork,
int *  info 
)

Referenced by MLP_ResLin().

void EtaDecay ( )

Definition at line 2086 of file mlp_gen.cc.

References LEARN.

Referenced by MLP_Stochastic().

2087 {
2088  LEARN.eta *= LEARN.Decay;
2089 }
#define LEARN
Definition: mlp_gen.h:36
int FixedStep ( dbl  alpha)

Definition at line 1704 of file mlp_gen.cc.

References recoMuon::in, LEARN, MLP_Line(), and NET.

1705 {
1706  dbl ***w0;
1707  int il, in, jn;
1708 
1709  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1710  for(il=1; il<NET.Nlayer; il++)
1711  {
1712  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1713  for(in=0; in<NET.Nneur[il]; in++)
1714  {
1715  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1716  sizeof(dbl));
1717  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1718  {
1719  w0[il][in][jn] = NET.Weights[il][in][jn];
1720  }
1721  }
1722  }
1723 
1724 
1725 /* set the weights */
1726  MLP_Line(w0,alpha);
1727 
1728 /* store weight changes */
1729  for(il=1; il<NET.Nlayer; il++)
1730  for(in=0; in<NET.Nneur[il]; in++)
1731  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1732  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1733  - w0[il][in][jn];
1734 
1735  for(il=1; il<NET.Nlayer; il++)
1736  for(in=0; in<NET.Nneur[il]; in++)
1737  free(w0[il][in]);
1738  for(il=1; il<NET.Nlayer; il++)
1739  free(w0[il]);
1740  free(w0);
1741 
1742  return(0);
1743 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
void FreeNetwork ( )

Definition at line 3531 of file mlp_gen.cc.

References i, j, LEARN, and NET.

Referenced by AllocNetwork().

3532 {
3533  int i, j;
3534  for(i=1; i<NET.Nlayer; i++)
3535  {
3536  for(j=0; j<NET.Nneur[i]; j++)
3537  {
3538 /* free(NET.Weights[i][j]); */
3539  free(LEARN.Odw[i][j]);
3540  free(LEARN.ODeDw[i][j]);
3541  free(LEARN.DeDw[i][j]);
3542  }
3543  free(NET.vWeights[i]);
3544  free(NET.Weights[i]);
3545  free(LEARN.Odw[i]);
3546  free(LEARN.ODeDw[i]);
3547  free(LEARN.DeDw[i]);
3548  }
3549  free(NET.Weights);
3550  free(LEARN.Odw);
3551  free(LEARN.ODeDw);
3552  free(LEARN.DeDw);
3553 
3554  free(NET.Nneur);
3555 
3556  for(i=0; i<NET.Nlayer; i++)
3557  {
3558  free(NET.T_func[i]);
3559  free(NET.Deriv1[i]);
3560  free(NET.Inn[i]);
3561  free(NET.Outn[i]);
3562  free(NET.Delta[i]);
3563  }
3564  free(NET.T_func);
3565  free(NET.Deriv1);
3566  free(NET.Inn);
3567  free(NET.Outn);
3568  free(NET.Delta);
3569 
3570  NetMemory = 0;
3571 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
int NetMemory
Definition: mlp_gen.cc:30
#define LEARN
Definition: mlp_gen.h:36
int FreePatterns ( int  ifile)

Definition at line 3232 of file mlp_gen.cc.

References i, compare_using_db::ifile, and PAT.

Referenced by AllocPatterns(), and PhysicsTools::MLP::clear().

3233 {
3234  int i;
3235 
3236  if(ifile>1 || ifile<0) return 1;
3237 /* printf("%d %d \n",ifile,PatMemory[ifile]);*/
3238  if(PatMemory[ifile]==0) return 2;
3239 
3240  free(PAT.Pond[ifile]);
3241  for(i=0; i<PAT.Npat[ifile]; i++)
3242  {
3243 /* free(PAT.Rin[ifile][i]); */
3244  free(PAT.Rans[ifile][i]);
3245  }
3246  free(PAT.Rin[ifile]);
3247  free(PAT.Rans[ifile]);
3248  free(PAT.vRin[ifile]);
3249  PatMemory[ifile] = 0;
3250  PAT.Npat[ifile] = 0;
3251 
3252  return 0;
3253 }
int i
Definition: DBlmapReader.cc:9
#define PAT
Definition: mlp_gen.h:45
int PatMemory[2]
Definition: mlp_gen.cc:26
int GetBFGSH ( int  Nweights)

Definition at line 1418 of file mlp_gen.cc.

References a, b, V0MonitoringClient_cfi::factor, i, j, and tmp.

Referenced by MLP_Epoch().

1419 {
1420  typedef double dble;
1421  dble deltaTgamma=0;
1422  dble factor=0;
1423  dble *Hgamma;
1424  dble *tmp;
1425  register dble a, b;
1426  int i,j;
1427 
1428  Hgamma = (dble *) malloc(Nweights*sizeof(dble));
1429  tmp = (dble *) malloc(Nweights*sizeof(dble));
1430 
1431  for(i=0; i<Nweights; i++)
1432  {
1433  deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
1434  a = 0;
1435  b = 0;
1436  for(j=0; j<Nweights; j++)
1437  {
1438  a += (dble) BFGSH[i][j] * (dble) Gamma[j];
1439  b += (dble) Gamma[j] * (dble) BFGSH[j][i];
1440  }
1441  Hgamma[i] = a;
1442  tmp[i] = b;
1443  factor += (dble) Gamma[i]*Hgamma[i];
1444  }
1445  if(deltaTgamma == 0)
1446  {
1447  free(tmp);
1448  free(Hgamma);
1449  return 1;
1450  }
1451  a = 1 / deltaTgamma;
1452  factor = 1 + factor*a;
1453 
1454  for(i=0; i<Nweights; i++)
1455  {
1456  b = (dble) delta[i];
1457  for(j=0; j<Nweights; j++)
1458  BFGSH[i][j] += (dbl) (factor*b* (dble)
1459  delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;
1460  }
1461  free(Hgamma);
1462  free(tmp);
1463  return 0;
1464 }
dbl * delta
Definition: mlp_gen.cc:36
int i
Definition: DBlmapReader.cc:9
dbl ** BFGSH
Definition: mlp_gen.cc:37
int j
Definition: DBlmapReader.cc:9
double b
Definition: hdecay.h:120
return(e1-e2)*(e1-e2)+dp *dp
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double a
Definition: hdecay.h:121
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
void GetGammaDelta ( )

Definition at line 1313 of file mlp_gen.cc.

References i, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1314 {
1315  int i=0;
1316  int il,in,jn;
1317  for(il=1; il<NET.Nlayer; il++)
1318  for(in=0; in<NET.Nneur[il]; in++)
1319  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1320  {
1321  Gamma[i] = LEARN.DeDw[il][in][jn]-
1322  LEARN.ODeDw[il][in][jn];
1323  delta[i] = LEARN.Odw[il][in][jn];
1324  i++;
1325  }
1326 }
dbl * delta
Definition: mlp_gen.cc:36
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl * Gamma
Definition: mlp_gen.cc:38
void getLexemes ( char *  s,
char **  ss 
)

Definition at line 2615 of file mlp_gen.cc.

References CountLexemes(), i, gen::n, NULL, and tmp.

2616 {
2617  char tmp[1024];
2618  int i,n;
2619 
2620  strcpy(tmp,s);
2621  n=CountLexemes(tmp);
2622  if (n>0)
2623  {
2624  strcpy(ss[0],strtok(tmp," "));
2625  for (i=1;i<n;i++)
2626  strcpy(ss[i],strtok(NULL," "));
2627  }
2628 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
int CountLexemes(char *s)
Definition: mlp_gen.cc:2585
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int GetNetStructure ( char *  s,
int *  Nlayer,
int *  Nneur 
)

Definition at line 3592 of file mlp_gen.cc.

References i, NLMAX, NULL, and tmp.

Referenced by ReadPatterns().

3593 {
3594  int i=0;
3595  char tmp[1024];
3596 
3597  if(strlen(s)==0) return -1;
3598  if(strlen(s)>1024) return -2;
3599 
3600  strcpy(tmp,s);
3601  if (strtok(tmp,","))
3602  {
3603  i=1;
3604  while (strtok(NULL,",")) i++;
3605  }
3606  *Nlayer = i;
3607  if(i > NLMAX) return -3;
3608 
3609  strcpy(tmp,s);
3610  if (*Nlayer>0)
3611  {
3612  sscanf(strtok(tmp,","),"%d",&(Nneur[0]));
3613  for (i=1;i<*Nlayer;i++)
3614  sscanf(strtok(NULL,","),"%d",&(Nneur[i]));
3615  }
3616 
3617  return 0;
3618 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
#define NLMAX
Definition: mlp_gen.cc:14
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
void getnLexemes ( int  n,
char *  s,
char **  ss 
)

Definition at line 2601 of file mlp_gen.cc.

References i, gen::n, NULL, and tmp.

Referenced by ReadPatterns().

2602 {
2603  char tmp[1024];
2604  int i;
2605  strcpy(tmp,s);
2606  if (n>0)
2607  {
2608  strcpy(ss[0],strtok(tmp," "));
2609  for (i=1;i<n;i++)
2610  strcpy(ss[i],strtok(NULL," "));
2611  }
2612 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
void InitBFGSH ( int  Nweights)

Definition at line 1388 of file mlp_gen.cc.

References i, and j.

Referenced by MLP_Epoch().

1389 {
1390  int i,j;
1391  for(i=0; i<Nweights; i++)
1392  for(j=0; j<Nweights; j++)
1393  {
1394  BFGSH[i][j] = 0;
1395  if(i==j) BFGSH[i][j] = 1;
1396  }
1397 }
int i
Definition: DBlmapReader.cc:9
dbl ** BFGSH
Definition: mlp_gen.cc:37
int j
Definition: DBlmapReader.cc:9
void InitWeights ( )

Definition at line 2147 of file mlp_gen.cc.

References i, MLP_Rand(), and NET.

Referenced by PhysicsTools::MLP::MLP().

2148 {
2149  int ilayer,ineur,i;
2150 
2151  for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
2152  for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
2153  for(i=0;i<=NET.Nneur[ilayer-1];i++)
2154  NET.Weights[ilayer][ineur][i]=
2155  (dbl) MLP_Rand(-0.5, 0.5);
2156 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2131
double dbl
Definition: mlp_gen.h:12
int LearnAlloc ( )

Definition at line 2685 of file mlp_gen.cc.

References i, recoMuon::in, LEARN, LearnFree(), and NET.

Referenced by PhysicsTools::MLP::MLP().

2686 {
2687  int il,in,i;
2688  int Nweights = 0;
2689 
2690  if(LearnMemory != 0) LearnFree();
2691  LearnMemory = 1;
2692  dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
2693  if(dir == 0) return -111;
2694 
2695  for(il=0; il<NET.Nlayer; il++)
2696  {
2697  dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
2698  if(dir[il] == 0) return -111;
2699  for(in=0; in<NET.Nneur[il]; in++)
2700  {
2701  if(il==0)
2702  {
2703 /* TODO: understand implications of hard-coded 101 */
2704  dir[0][in] = (dbl *)
2705  malloc(101*sizeof(dbl));
2706  if(dir[0][in] == 0) return -111;
2707  }
2708  else
2709  {
2710  dir[il][in] = (dbl *)
2711  malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
2712  if(dir[il][in] == 0) return -111;
2713  Nweights += NET.Nneur[il-1]+1;
2714  }
2715  }
2716  }
2717  NET.Nweights = Nweights;
2718 
2719  if(BFGSMemory==0 && LEARN.Meth>= 6)
2720  {
2721  BFGSMemory = 1;
2722  Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
2723  delta = (dbl*) malloc(Nweights*sizeof(dbl));
2724  BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
2725  if(Gamma == 0 || delta == 0 || BFGSH == 0)
2726  return -111;
2727 
2728  for(i=0; i<Nweights; i++)
2729  {
2730  BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
2731  if(BFGSH[i] == 0) return -111;
2732  }
2733  }
2734 
2735 /* if(JacobianMemory==0)
2736  {
2737  JacobianMemory = 1;
2738  printf("JacobianMemory = %d\n",JacobianMemory);
2739  JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
2740  for(i=0; i<PAT.Npat[0]; i++)
2741  JacobianMatrix[i] =
2742  (dbl*) malloc(Nweights*sizeof(dbl));
2743  printf("end memory alloc\n");
2744  }
2745 
2746  if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
2747 
2748  return 0;
2749 }
dbl * delta
Definition: mlp_gen.cc:36
int i
Definition: DBlmapReader.cc:9
void LearnFree()
Definition: mlp_gen.cc:2640
int LearnMemory
Definition: mlp_gen.cc:29
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int BFGSMemory
Definition: mlp_gen.cc:27
#define LEARN
Definition: mlp_gen.h:36
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void LearnFree ( )

Definition at line 2640 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by LearnAlloc(), and PhysicsTools::MLP::~MLP().

2641 {
2642  int il,in;
2643  if(LearnMemory==0) return;
2644  LearnMemory = 0;
2645  for(il=0; il<NET.Nlayer; il++)
2646  {
2647  for(in=0; in<NET.Nneur[il]; in++)
2648  {
2649  free(dir[il][in]);
2650  }
2651  free(dir[il]);
2652  }
2653  free(dir);
2654  if(BFGSMemory==0) return;
2655  BFGSMemory = 0;
2656  for(il=0; il<NET.Nweights; il++)
2657  {
2658  free(BFGSH[il]);
2659  }
2660  free(BFGSH);
2661  free(Gamma);
2662  free(delta);
2663 
2664 /* if(JacobianMemory == 0) return;
2665  JacobianMemory = 0;
2666  for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
2667  free(JacobianMatrix); */
2668 }
dbl * delta
Definition: mlp_gen.cc:36
int LearnMemory
Definition: mlp_gen.cc:29
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int BFGSMemory
Definition: mlp_gen.cc:27
dbl * Gamma
Definition: mlp_gen.cc:38
dbl *** dir
Definition: mlp_gen.cc:35
int LineSearch ( dbl alpmin,
int *  Ntest,
dbl  Err0 
)

Definition at line 1478 of file mlp_gen.cc.

References recoMuon::in, LastAlpha, LEARN, MLP_Line(), MLP_Test(), NET, and metsig::tau.

Referenced by MLP_Epoch().

1479 {
1480  dbl ***w0;
1481  dbl alpha1, alpha2, alpha3;
1482  dbl err1, err2, err3;
1483  dbl tau;
1484  int icount, il, in, jn;
1485 
1486  tau=LEARN.Tau;
1487 
1488 /* store weights before line search */
1489 
1490  *Ntest = 0;
1491  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1492  for(il=1; il<NET.Nlayer; il++)
1493  {
1494  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1495  for(in=0; in<NET.Nneur[il]; in++)
1496  {
1497  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1498  sizeof(dbl));
1499  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1500  {
1501  w0[il][in][jn] = NET.Weights[il][in][jn];
1502  }
1503  }
1504  }
1505 
1506 /* compute error(w0) */
1507 
1508 /* err1 = MLP_Test(0,0);
1509  (*Ntest) ++;*/
1510  err1 = Err0;
1511 
1512  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1513 
1514  *alpmin = 0;
1515  alpha1 = 0;
1516 /* alpha2 = 0.05;
1517  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1518  alpha2 = LastAlpha;
1519  if(alpha2 < 0.01) alpha2 = 0.01;
1520  if(alpha2 > 2.0) alpha2 = 2.0;
1521  MLP_Line(w0,alpha2);
1522  err2 = MLP_Test(0,0);
1523  (*Ntest) ++;
1524  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1525 
1526  alpha3 = alpha2;
1527  err3 = err2;
1528 
1529 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1530  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1531 
1532  if(err1>err2)
1533  {
1534  for(icount=1;icount<=100;icount++)
1535  {
1536  alpha3 = alpha3*tau;
1537  MLP_Line(w0,alpha3);
1538  err3 =MLP_Test(0,0);
1539  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);
1540  (*Ntest) ++;
1541  if(err3>err2) break;
1542  alpha1 = alpha2;
1543  err1 = err2;
1544  alpha2 = alpha3;
1545  err2 = err3;
1546  }
1547  if(icount>=100) /* line search fails */
1548  {
1549  MLP_Line(w0,0); /* reset weights */
1550  free(w0);
1551  return(1);
1552  }
1553  }
1554  else
1555  {
1556  for(icount=1;icount<=100;icount++)
1557  {
1558  alpha2 = alpha2/tau;
1559  MLP_Line(w0,alpha2);
1560  err2 = MLP_Test(0,0);
1561  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1562  (*Ntest) ++;
1563  if(err1>err2) break;
1564  alpha3 = alpha2;
1565  err3 = err2;
1566  }
1567  if(icount>=100) /* line search fails */
1568  {
1569  MLP_Line(w0,0); /* reset weights */
1570  free(w0);
1571  LastAlpha = 0.05; /* try to be safe */
1572  return(1);
1573  }
1574  }
1575 
1576 /* find bottom of parabola */
1577 
1578  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1579  -(err2-err1)/(alpha2-alpha1)));
1580  if(*alpmin>10000) *alpmin=10000;
1581 
1582 /* set the weights */
1583  MLP_Line(w0,*alpmin);
1584  LastAlpha = *alpmin;
1585 
1586 /* store weight changes */
1587  for(il=1; il<NET.Nlayer; il++)
1588  for(in=0; in<NET.Nneur[il]; in++)
1589  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1590  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1591  - w0[il][in][jn];
1592 
1593  for(il=1; il<NET.Nlayer; il++)
1594  for(in=0; in<NET.Nneur[il]; in++)
1595  free(w0[il][in]);
1596  for(il=1; il<NET.Nlayer; il++)
1597  free(w0[il]);
1598  free(w0);
1599 
1600  return(0);
1601 }
#define NET
Definition: mlp_gen.h:25
dbl LastAlpha
Definition: mlp_gen.cc:32
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int LineSearchHyb ( dbl alpmin,
int *  Ntest 
)

Definition at line 1781 of file mlp_gen.cc.

References recoMuon::in, LastAlpha, LEARN, MLP_LineHyb(), MLP_Test(), NET, and metsig::tau.

Referenced by MLP_Epoch().

1782 {
1783  dbl ***w0;
1784  dbl alpha1, alpha2, alpha3;
1785  dbl err1, err2, err3;
1786  dbl tau;
1787  int icount, il, in, jn;
1788 
1789 /* char buf [50];
1790  sprintf (buf,"entree linesearchhyb\n");
1791  MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
1792 
1793  if(NET.Debug>=4){
1794  printf(" entry LineSearchHyb \n");
1795  }
1796  tau=LEARN.Tau;
1797 
1798 /* store weights before line search */
1799 
1800  *Ntest = 0;
1801  w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
1802  for(il=1; il<NET.Nlayer-1; il++)
1803  {
1804  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1805  for(in=0; in<NET.Nneur[il]; in++)
1806  {
1807  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1808  sizeof(dbl));
1809  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1810  {
1811  w0[il][in][jn] = NET.Weights[il][in][jn];
1812  }
1813  }
1814  }
1815 
1816 /* compute error(w0) */
1817  err1 = MLP_Test(0,1);
1818  (*Ntest) ++;
1819  if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);
1820 
1821  *alpmin = 0;
1822  alpha1 = 0;
1823 /* alpha2 = 0.05;
1824  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1825  alpha2 = LastAlpha;
1826  if(alpha2 < 0.01) alpha2 = 0.01;
1827  if(alpha2 > 2.0) alpha2 = 2.0;
1828  MLP_LineHyb(w0,alpha2);
1829  err2 = MLP_Test(0,1);
1830  (*Ntest) ++;
1831 
1832  alpha3 = alpha2;
1833  err3 = err2;
1834 
1835 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1836  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1837 
1838  if(err1>err2)
1839  {
1840  for(icount=1;icount<=100;icount++)
1841  {
1842  alpha3 = alpha3*tau;
1843  MLP_LineHyb(w0,alpha3);
1844  err3 = MLP_Test(0,1);
1845  (*Ntest) ++;
1846  if(err3>err2) break;
1847  alpha1 = alpha2;
1848  err1 = err2;
1849  alpha2 = alpha3;
1850  err2 = err3;
1851  }
1852  if(icount>=100) /* line search fails */
1853  {
1854  MLP_LineHyb(w0,0); /* reset weights */
1855  free(w0);
1856  return(1);
1857  }
1858  }
1859  else
1860  {
1861  for(icount=1;icount<=100;icount++)
1862  {
1863  alpha2 = alpha2/tau;
1864  MLP_LineHyb(w0,alpha2);
1865  err2 = MLP_Test(0,1);
1866  (*Ntest) ++;
1867  if(err1>err2) break;
1868  alpha3 = alpha2;
1869  err3 = err2;
1870  }
1871  if(icount>=100) /* line search fails */
1872  {
1873  MLP_LineHyb(w0,0); /* reset weights */
1874  free(w0);
1875  return(1);
1876  }
1877  }
1878 
1879 /* find bottom of parabola */
1880 
1881  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1882  -(err2-err1)/(alpha2-alpha1)));
1883  if(*alpmin>10000) *alpmin=10000;
1884 
1885 /* set the weights */
1886  MLP_LineHyb(w0,*alpmin);
1887  LastAlpha = *alpmin;
1888 
1889 /* store weight changes */
1890  for(il=1; il<NET.Nlayer-1; il++)
1891  for(in=0; in<NET.Nneur[il]; in++)
1892  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1893  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1894  - w0[il][in][jn];
1895 
1896  for(il=1; il<NET.Nlayer-1; il++)
1897  for(in=0; in<NET.Nneur[il]; in++)
1898  free(w0[il][in]);
1899  for(il=1; il<NET.Nlayer-1; il++)
1900  free(w0[il]);
1901  free(w0);
1902  if(NET.Debug>=4){
1903  printf(" exit LineSearchHyb \n");
1904  }
1905 
1906  return(0);
1907 }
#define NET
Definition: mlp_gen.h:25
dbl LastAlpha
Definition: mlp_gen.cc:32
void MLP_LineHyb(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1923
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int LoadWeights ( char *  filename,
int *  iepoch 
)

Definition at line 3022 of file mlp_gen.cc.

References i, NET, AlCaHLTBitMon_ParallelJobs::p, and alignCSCRings::s.

Referenced by PhysicsTools::MLP::load().

3023 {
3024  FILE *W;
3025  int ilayer,ineur,i;
3026  double p;
3027  char s[80];
3028 
3029  W=fopen(filename,"r");
3030  if(W==0) return -1;
3031  do
3032  {
3033  fgets(s,80,W);
3034  }
3035  while(*s == '#');
3036  sscanf(s," %d",iepoch);
3037  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
3038  {
3039  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
3040  {
3041  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
3042  {
3043  fscanf(W," %le",&p);
3044  NET.Weights[ilayer][ineur][i] = (dbl) p;
3045  }
3046  }
3047  }
3048 
3049  fclose(W);
3050  return 0;
3051 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
tuple filename
Definition: lut2db_cfg.py:20
double dbl
Definition: mlp_gen.h:12
dbl MLP_Epoch ( int  iepoch,
dbl alpmin,
int *  Ntest 
)

Definition at line 706 of file mlp_gen.cc.

References beta, BFGSdir(), CGDir(), DeDwNorm(), DeDwProd(), DeDwSaveZero(), DeDwScale(), DerivDir(), GetBFGSH(), GetGammaDelta(), InitBFGSH(), LEARN, LineSearch(), LineSearchHyb(), MLP_ResLin(), MLP_Stochastic(), MLP_Train(), NET, PAT, PrintWeights(), parseEventContent::prod, SetLambda(), ShuffleExamples(), SteepestDir(), and StochStep().

Referenced by PhysicsTools::MLP::train().

707 {
708  dbl err, ONorm, beta, prod, ddir;
709 /* int *index;*/
710  int Nweights, Nlinear, ipat, ierr;
711  int nn;
712 
713  err = 0;
714  *alpmin = 0.;
715 
716  Nweights = NET.Nweights;
717  Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
718 
719  if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");
720 /* stochastic minimization */
721  if(LEARN.Meth==1)
722  {
723 
724  err = MLP_Stochastic();
725 
726  }
727  else
728  {
729  if(iepoch==1 && LEARN.Meth==7)
730  {
731  SetLambda(10000);
732  MLP_ResLin();
733  if(NET.Debug>=2) PrintWeights();
734  }
735 
736 /* save previous gradient and reset current one */
737  DeDwSaveZero();
738  if(LEARN.Meth==16)
739  {
741  nn = PAT.Npat[0];
742  PAT.Npat[0] = nn/10;
743  for(ipat=0;ipat<nn;ipat++)
744  {
745  ierr = MLP_Train(&ExamplesIndex[ipat],&err);
746  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
747  }
748  }
749  else
750  {
751  for(ipat=0;ipat<PAT.Npat[0];ipat++)
752  {
753  ierr = MLP_Train(&ipat,&err);
754  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
755  }
756  }
757  DeDwScale(PAT.Npat[0]);
758  if(LEARN.Meth==2) StochStep();
759  if(LEARN.Meth==3)
760  {
761  SteepestDir();
762  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
763  }
764 
765 /* Conjugate Gradients Ribiere - Polak */
766  if(LEARN.Meth==4)
767  {
768  if((iepoch-1)%LEARN.Nreset==0)
769  {
770  LEARN.Norm = DeDwNorm(); /* for next epoch */
771  SteepestDir();
772  }
773  else
774  {
775  ONorm = LEARN.Norm;
776  LEARN.Norm = DeDwNorm();
777  prod = DeDwProd();
778  beta = (LEARN.Norm-prod)/ONorm;
779  CGDir(beta);
780  }
781  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
782  }
783 
784 /* Conjugate Gradients Fletcher - Reeves */
785  if(LEARN.Meth==5)
786  {
787  if((iepoch-1)%LEARN.Nreset==0)
788  {
789  LEARN.Norm = DeDwNorm(); /* for next epoch */
790  SteepestDir();
791  }
792  else
793  {
794  ONorm = LEARN.Norm;
795  LEARN.Norm = DeDwNorm();
796  beta = LEARN.Norm/ONorm;
797  CGDir(beta);
798  }
799  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
800  }
801  if(LEARN.Meth==6)
802  {
803  if((iepoch-1)%LEARN.Nreset==0)
804  {
805  SteepestDir();
806  InitBFGSH(Nweights);
807  }
808  else
809  {
810  GetGammaDelta();
811  ierr = GetBFGSH(Nweights);
812  if(ierr)
813  {
814  SteepestDir();
815  InitBFGSH(Nweights);
816  }
817  else
818  {
819  BFGSdir(Nweights);
820  }
821  }
822  ddir = DerivDir();
823  if(ddir>0)
824  {
825  SteepestDir();
826  InitBFGSH(Nweights);
827  ddir = DerivDir();
828  }
829  if(LineSearch(alpmin,Ntest,err)==1)
830  {
831  InitBFGSH(Nweights);
832  SteepestDir();
833  if(LineSearch(alpmin,Ntest,err)==1)
834  {
835  printf("Line search fail \n");
836  }
837  }
838  }
839  if(LEARN.Meth==7)
840  {
841  if((iepoch-1)%LEARN.Nreset==0)
842  {
843  SteepestDir();
844  InitBFGSH(Nweights-Nlinear);
845  }
846  else
847  {
848  if(NET.Debug>=5) printf("Before GetGammaDelta \n");
849  GetGammaDelta();
850  if(NET.Debug>=5) printf("After GetGammaDelta \n");
851  ierr = GetBFGSH(Nweights-Nlinear);
852  if(NET.Debug>=5) printf("After GetBFGSH \n");
853  if(ierr)
854  {
855  SteepestDir();
856  InitBFGSH(Nweights-Nlinear);
857  }
858  else
859  {
860  BFGSdir(Nweights-Nlinear);
861  }
862  if(NET.Debug>=5) printf("After BFGSdir \n");
863  }
864  SetLambda(10000);
865  if(LineSearchHyb(alpmin,Ntest)==1)
866  {
867  InitBFGSH(Nweights-Nlinear);
868  SteepestDir();
869  if(LineSearchHyb(alpmin,Ntest)==1)
870  {
871  printf("Line search fail \n");
872  }
873  }
874  }
875  }
876 
877  if(NET.Debug>=5) printf(" End MLP_Epoch\n");
878  return(err);
879 }
const double beta
dbl DeDwProd()
Definition: mlp_gen.cc:1020
void CGDir(dbl beta)
Definition: mlp_gen.cc:1266
void DeDwSaveZero()
Definition: mlp_gen.cc:1097
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
void SetLambda(double Wmax)
Definition: mlp_gen.cc:1949
void SteepestDir()
Definition: mlp_gen.cc:1244
int GetBFGSH(int Nweights)
Definition: mlp_gen.cc:1418
void PrintWeights()
Definition: mlp_gen.cc:2168
int StochStep()
Definition: mlp_gen.cc:965
dbl MLP_Stochastic()
Definition: mlp_gen.cc:518
int MLP_Train(int *ipat, dbl *err)
Definition: mlp_gen.cc:895
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2102
dbl DeDwNorm()
Definition: mlp_gen.cc:998
void InitBFGSH(int Nweights)
Definition: mlp_gen.cc:1388
void GetGammaDelta()
Definition: mlp_gen.cc:1313
void MLP_ResLin()
Definition: mlp_gen.cc:1971
void DeDwScale(int Nexamples)
Definition: mlp_gen.cc:1060
#define LEARN
Definition: mlp_gen.h:36
int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1478
void BFGSdir(int Nweights)
Definition: mlp_gen.cc:1340
double dbl
Definition: mlp_gen.h:12
int LineSearchHyb(dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:1781
dbl DerivDir()
Definition: mlp_gen.cc:1288
void MLP_Line ( dbl ***  w0,
dbl  alpha 
)

Definition at line 1757 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by DecreaseSearch(), FixedStep(), and LineSearch().

1758 {
1759  register int il,in,jn;
1760 
1761  for(il=1; il<NET.Nlayer; il++)
1762  for(in=0; in<NET.Nneur[il]; in++)
1763  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1764  NET.Weights[il][in][jn] = w0[il][in][jn]+
1765  alpha*dir[il][in][jn];
1766 
1767 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_LineHyb ( dbl ***  w0,
dbl  alpha 
)

Definition at line 1923 of file mlp_gen.cc.

References recoMuon::in, MLP_ResLin(), and NET.

Referenced by LineSearchHyb().

1924 {
1925  int il,in,jn;
1926  for(il=1; il<NET.Nlayer-1; il++)
1927  for(in=0; in<NET.Nneur[il]; in++)
1928  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1929  {
1930  NET.Weights[il][in][jn] = w0[il][in][jn]+
1931  alpha*dir[il][in][jn];
1932  }
1933  MLP_ResLin();
1934 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
void MLP_ResLin()
Definition: mlp_gen.cc:1971
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_MatrixVector ( dbl M,
type_pat v,
dbl r,
int  n,
int  m 
)

Definition at line 3748 of file mlp_gen.cc.

References EnergyCorrector::c, ztail::d, i, j, visualization-live-secondInstance_cfg::m, gen::n, and alignCSCRings::r.

Referenced by MLP_Test_MM().

3749 {
3750  int i,j;
3751  register dbl a1, a2, a3, a4, c, d;
3752  dbl *pM1 = M;
3753  dbl *pM2 = &(M[m]);
3754  dbl *pM3 = &(M[2*m]);
3755  dbl *pM4 = &(M[3*m]);
3756  dbl *pr = r;
3757  int mp1 = m;
3758 
3759  for(i=0; i<n-3;
3760  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3761  pr+=4)
3762  {
3763  a1 = 0;
3764  a2 = 0;
3765  a3 = 0;
3766  a4 = 0;
3767  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3768  {
3769  c = v[j];
3770  d = v[j+1];
3771  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3772  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3773  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3774  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3775  }
3776  for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3777  {
3778  c = v[j];
3779  a1 = a1 + *pM1 * c;
3780  a2 = a2 + *pM2 * c;
3781  a3 = a3 + *pM3 * c;
3782  a4 = a4 + *pM4 * c;
3783  }
3784  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3785  }
3786  for(i=i; i<n; i++)
3787  {
3788  pM1 = &(M[i*m]);
3789  a1 = 0;
3790  for(j=0; j<m; j++, pM1++)
3791  {
3792  a1 = a1 + *pM1 * v[j];
3793  }
3794  r[i] = a1;
3795  }
3796 }
int i
Definition: DBlmapReader.cc:9
tuple d
Definition: ztail.py:151
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
void MLP_MatrixVectorBias ( dbl M,
dbl v,
dbl r,
int  n,
int  m 
)

Definition at line 3682 of file mlp_gen.cc.

References EnergyCorrector::c, ztail::d, i, j, visualization-live-secondInstance_cfg::m, gen::n, and alignCSCRings::r.

Referenced by MLP_Out(), and MLP_Out2().

3683 {
3684  int i,j;
3685  register dbl a1, a2, a3, a4, c, d;
3686  dbl *pM1 = M;
3687  dbl *pM2 = &(M[m+1]);
3688  dbl *pM3 = &(M[2*(m+1)]);
3689  dbl *pM4 = &(M[3*(m+1)]);
3690  dbl *pr = r;
3691  int mp1 = m+1;
3692 
3693  for(i=0; i<n-3;
3694  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3695  pr+=4)
3696  {
3697  a1 = *pM1;
3698  a2 = *pM2;
3699  a3 = *pM3;
3700  a4 = *pM4;
3701  pM1++; pM2++; pM3++; pM4++;
3702  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3703  {
3704  c = v[j];
3705  d = v[j+1];
3706  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3707  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3708  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3709  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3710  }
3711  for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3712  {
3713  c = v[j];
3714  a1 = a1 + *pM1 * c;
3715  a2 = a2 + *pM2 * c;
3716  a3 = a3 + *pM3 * c;
3717  a4 = a4 + *pM4 * c;
3718  }
3719  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3720  }
3721  for(i=i; i<n; i++)
3722  {
3723  pM1 = &(M[i*(m+1)]);
3724  a1 = *pM1;
3725  pM1++;
3726  for(j=0; j<m; j++, pM1++)
3727  {
3728  a1 = a1 + *pM1 * v[j];
3729  }
3730  r[i] = a1;
3731  }
3732 }
int i
Definition: DBlmapReader.cc:9
tuple d
Definition: ztail.py:151
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
void MLP_MM2rows ( dbl c,
type_pat a,
dbl b,
int  Ni,
int  Nj,
int  Nk,
int  NaOffs,
int  NbOffs 
)

Definition at line 3817 of file mlp_gen.cc.

References j, and relval_2017::k.

Referenced by MLP_Test_MM().

3819 {
3820 //int i,j,k;
3821 int j,k;
3822 dbl s00,s01,s10,s11;
3823 type_pat *pa0,*pa1;
3824 dbl *pb0,*pb1,*pc0,*pc1;
3825 
3826  for (j=0; j<=Nj-2; j+=2)
3827  {
3828  pc0 = c+j;
3829  pc1 = c+j+Nj;
3830  s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
3831 
3832  for (k=0,pb0=b+k+NbOffs*j,
3833  pb1=b+k+NbOffs*(j+1),
3834  pa0=a+k,
3835  pa1=a+k+NaOffs;
3836  k<Nk;
3837  k++,pa0++,
3838  pa1++,
3839  pb0++,
3840  pb1++)
3841  {
3842  s00 += (*pa0)*(*pb0);
3843  s01 += (*pa0)*(*pb1);
3844  s10 += (*pa1)*(*pb0);
3845  s11 += (*pa1)*(*pb1);
3846  }
3847  *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
3848  }
3849  for (j=j; j<Nj; j++)
3850  {
3851  pc0 = c+j;
3852  pc1 = c+j+Nj;
3853  s00 = 0.0; s10 = 0.0;
3854  for (k=0,pb0=b+k+NbOffs*j,
3855  pa0=a+k,
3856  pa1=a+k+NaOffs;
3857  k<Nk;
3858  k++,pa0++,
3859  pa1++,
3860  pb0++)
3861  {
3862  s00 += (*pa0)*(*pb0);
3863  s10 += (*pa1)*(*pb0);
3864  }
3865  *pc0 = s00; *pc1 = s10;
3866  }
3867 }
double type_pat
Definition: mlp_gen.h:13
int j
Definition: DBlmapReader.cc:9
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void MLP_Out ( type_pat rrin,
dbl rrout 
)

Definition at line 62 of file mlp_gen.cc.

References i, recoMuon::in, j, visualization-live-secondInstance_cfg::m, MLP_MatrixVectorBias(), MLP_vSigmoideDeriv(), and NET.

Referenced by MLP_ResLin().

63 {
64  int i, il, in, j, m, mp1;
65  dbl **deriv1;
66 
67 /* input layer */
68 
69  deriv1 = NET.Deriv1;
70  m = NET.Nneur[0]%4;
71  if(m==0) goto L10;
72  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
73 L10:
74  mp1 = m+1;
75  for(i=mp1; i<=NET.Nneur[0]; i+=4)
76  {
77  NET.Outn[0][i-1] = rrin[i-1];
78  NET.Outn[0][i] = rrin[i];
79  NET.Outn[0][i+1] = rrin[i+1];
80  NET.Outn[0][i+2] = rrin[i+2];
81  }
82 
83 /* hidden and output layers */
84 
85  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
86  NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
87 
88  for(il=2; il<NET.Nlayer; il++)
89  {
90  MLP_vSigmoideDeriv(NET.Outn[il-1],
91  deriv1[il-1],NET.Nneur[il-1]);
92  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
93  NET.Outn[il],NET.Nneur[il],
94  NET.Nneur[il-1]);
95  }
96  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
97  {
98  deriv1[NET.Nlayer-1][in] = 1;
99  }
100 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
int j
Definition: DBlmapReader.cc:9
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3682
double dbl
Definition: mlp_gen.h:12
void MLP_Out2 ( type_pat rrin)

Definition at line 184 of file mlp_gen.cc.

References i, recoMuon::in, visualization-live-secondInstance_cfg::m, MLP_MatrixVectorBias(), MLP_vSigmoideDeriv(), NET, and nin.

Referenced by MLP_Stochastic(), and MLP_Train().

185 {
186  int il, in, m, mp1;
187  register int i;
188  dbl **rrout, **deriv1;
189  register dbl *prrout;
190  type_pat *prrin;
191  int nhid = NET.Nneur[1];
192  int nin = NET.Nneur[0];
193 
194  rrout = NET.Outn;
195  deriv1 = NET.Deriv1;
196 
197  m = NET.Nneur[0]%4;
198  if(m==0) goto L10;
199  if(m==1)
200  {
201  rrout[0][0] = rrin[1];
202  goto L10;
203  }
204  else if(m==2)
205  {
206  rrout[0][0] = rrin[1];
207  rrout[0][1] = rrin[2];
208  goto L10;
209  }
210  else if(m==3)
211  {
212  rrout[0][0] = rrin[1];
213  rrout[0][1] = rrin[2];
214  rrout[0][2] = rrin[3];
215  goto L10;
216  }
217 L10:
218  mp1 = m+1;
219  prrout = &(rrout[0][mp1]);
220  prrin = &(rrin[mp1+1]);
221  for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
222  {
223  *(prrout-1) = *(prrin-1);
224  *prrout = *prrin;
225  *(prrout+1)= *(prrin+1);
226  *(prrout+2) = *(prrin+2);
227  }
228 
229 /* input layer */
230 
231  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
232  NET.Outn[1],nhid,nin);
233 
234 
235 /* hidden and output layers */
236 
237  for(il=2; il<NET.Nlayer; il++)
238  {
239  MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
240  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
241  NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
242  }
243  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
244  deriv1[NET.Nlayer-1][in] = 1;
245 }
int i
Definition: DBlmapReader.cc:9
int nin
#define NET
Definition: mlp_gen.h:25
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
double type_pat
Definition: mlp_gen.h:13
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3682
double dbl
Definition: mlp_gen.h:12
void MLP_Out_T ( type_pat rrin)

Definition at line 113 of file mlp_gen.cc.

References a, i, recoMuon::in, j, visualization-live-secondInstance_cfg::m, MLP_Sigmoide(), and NET.

Referenced by PhysicsTools::MLP::eval(), and MLP_Test().

114 {
115  int i, il, in, j, ilm1, m, mp1;
116  register dbl a;
117 
118 /* input layer */
119 
120  m = NET.Nneur[0]%4;
121  if(m==0) goto L10;
122  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
123 L10:
124  mp1 = m+1;
125  for(i=mp1; i<=NET.Nneur[0]; i+=4)
126  {
127  NET.Outn[0][i-1] = rrin[i-1];
128  NET.Outn[0][i] = rrin[i];
129  NET.Outn[0][i+1] = rrin[i+1];
130  NET.Outn[0][i+2] = rrin[i+2];
131  }
132 
133 /* hidden and output layers */
134 
135 /* for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
136  NET.Outn[0][in],NET.Weights[1][0][in]);
137  printf("\n"); */
138  for(il=1; il<NET.Nlayer; il++)
139  {
140  ilm1 = il-1;
141  m = NET.Nneur[ilm1]%4;
142  for(in=0; in<NET.Nneur[il]; in++)
143  {
144  a = NET.Weights[il][in][0];
145  if(m==0) goto L20;
146  for(j=1;j<=m;j++) a +=
147  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
148 L20:
149  mp1 = m+1;
150  for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
151  {
152  a +=
153  NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
154  NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
155  NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
156  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
157  }
158  switch(NET.T_func[il][in])
159  {
160  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
161  break;
162  case 1: NET.Outn[il][in] = a;
163  break;
164  case 0: NET.Outn[il][in] = 0;
165  break;
166  }
167  }
168  }
169 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
dbl MLP_Sigmoide(dbl x)
Definition: mlp_sigmoide.cc:27
int MLP_PrCFun ( char *  filename)

Definition at line 2878 of file mlp_gen.cc.

References DIVERS, recoMuon::in, NET, and STAT.

2879 {
2880  int il,in,jn;
2881  FILE *W;
2882 
2883  W=fopen(filename,"w");
2884  if(W==0) return -1;
2885 
2886  fprintf(W,"double sigmoid(double x)\n");
2887  fprintf(W,"{\n");
2888  fprintf(W,"return 1/(1+exp(-x));\n");
2889  fprintf(W,"}\n");
2890  fprintf(W,"void rnnfun(double *rin,double *rout)\n");
2891  fprintf(W,"{\n");
2892  fprintf(W," double out1[%d];\n",NET.Nneur[0]);
2893  fprintf(W," double out2[%d];\n",NET.Nneur[1]);
2894  if(NET.Nlayer>=3) fprintf(W," double out3[%d];\n",NET.Nneur[2]);
2895  if(NET.Nlayer>=4) fprintf(W," double out4[%d];\n",NET.Nneur[3]);
2896  fprintf(W,"\n");
2897 
2898  for(in=0; in<NET.Nneur[0]; in++)
2899  {
2900  if(DIVERS.Norm==0)
2901  {
2902  fprintf(W," out1[%d] = rin[%d];\n",in,in);
2903  }
2904  else
2905  {
2906  fprintf(W," out1[%d] = (rin[%d]-%e)/%e;\n",
2907  in,in,
2908  STAT.mean[in],STAT.sigma[in]);
2909  }
2910  }
2911 
2912  for(il=1; il<=NET.Nlayer-1; il++)
2913  {
2914  fprintf(W,"\n");
2915  fprintf(W,"/* layer %d */\n",il+1);
2916  for(in=0; in<NET.Nneur[il]; in++)
2917  {
2918  fprintf(W," out%d[%d] = %e\n",il+1,in,
2919  (double) NET.Weights[il][in][0]);
2920  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2921  fprintf(W," +(%e) * out%d[%d]\n",
2922  (double) NET.Weights[il][in][jn],il,jn-1);
2923  fprintf(W," ;\n");
2924  }
2925  fprintf(W,"\n");
2926  for(in=0; in<NET.Nneur[il]; in++)
2927  {
2928  if(NET.T_func[il][in]==0)
2929  {
2930  fprintf(W," out%d[%d] = 0;\n",il+1,in);
2931  }
2932  else if(NET.T_func[il][in]==1)
2933  {
2934  }
2935  else if(NET.T_func[il][in]==2)
2936  {
2937  fprintf(W," out%d[%d] = sigmoid(out%d[%d]);\n",
2938  il+1,in,il+1,in);
2939  }
2940  }
2941  }
2942  il = NET.Nlayer-1;
2943  for(in=0; in<NET.Nneur[il]; in++)
2944  {
2945  fprintf(W," rout[%d] = out%d[%d];\n",in,il+1,in);
2946  }
2947  fprintf(W,"}\n");
2948  fclose(W);
2949  return 0;
2950 }
#define STAT
Definition: mlp_gen.h:60
#define NET
Definition: mlp_gen.h:25
#define DIVERS
Definition: mlp_gen.h:54
tuple filename
Definition: lut2db_cfg.py:20
int MLP_PrFFun ( char *  filename)

Definition at line 2769 of file mlp_gen.cc.

References DIVERS, recoMuon::in, NET, and STAT.

2770 {
2771  int il,in,jn;
2772  FILE *W;
2773 
2774  W=fopen(filename,"w");
2775  if(W==0) return -1;
2776  fprintf(W," SUBROUTINE RNNFUN(rin,rout)\n");
2777  fprintf(W," DIMENSION RIN(%d)\n",NET.Nneur[0]);
2778  fprintf(W," DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
2779  fprintf(W,"C\n");
2780 
2781  for(in=0; in<NET.Nneur[0]; in++)
2782  {
2783  if(DIVERS.Norm==0)
2784  {
2785  fprintf(W," OUT%d = RIN(%d)\n",in+1,in+1);
2786  }
2787  else
2788  {
2789  fprintf(W," OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
2790  STAT.mean[in],STAT.sigma[in]);
2791  }
2792  }
2793  for(il=1; il<NET.Nlayer-1; il++)
2794  {
2795  fprintf(W,"C\n");
2796  fprintf(W,"C layer %d\n",il+1);
2797  for(in=0; in<NET.Nneur[il]; in++)
2798  {
2799  fprintf(W," RIN%d = %e\n",in+1,
2800  (double) NET.Weights[il][in][0]);
2801  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2802  fprintf(W," > +(%e) * OUT%d\n",
2803  (double) NET.Weights[il][in][jn],jn);
2804  }
2805  fprintf(W,"C\n");
2806  for(in=0; in<NET.Nneur[il]; in++)
2807  {
2808  if(NET.T_func[il][in]==0)
2809  {
2810  fprintf(W," OUT%d = 0\n",in+1);
2811  }
2812  else if(NET.T_func[il][in]==1)
2813  {
2814  fprintf(W," OUT%d = RIN%d\n",in+1,in+1);
2815  }
2816  else if(NET.T_func[il][in]==2)
2817  {
2818  fprintf(W," OUT%d = SIGMOID(RIN%d)\n",
2819  in+1,in+1);
2820  }
2821  }
2822  }
2823  il = NET.Nlayer-1;
2824  fprintf(W,"C\n");
2825  fprintf(W,"C layer %d\n",il+1);
2826  for(in=0; in<NET.Nneur[il]; in++)
2827  {
2828  fprintf(W," RIN%d = %e\n",in+1,
2829  (double) NET.Weights[il][in][0]);
2830  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2831  fprintf(W," > +(%e) * OUT%d\n",
2832  (double) NET.Weights[il][in][jn],jn);
2833  }
2834  fprintf(W,"C\n");
2835  for(in=0; in<NET.Nneur[il]; in++)
2836  {
2837  if(NET.T_func[il][in]==0)
2838  {
2839  fprintf(W," ROUT(%d) = 0\n",in+1);
2840  }
2841  else if(NET.T_func[il][in]==1)
2842  {
2843  fprintf(W," ROUT(%d) = RIN%d\n",in+1,in+1);
2844  }
2845  else if(NET.T_func[il][in]==2)
2846  {
2847  fprintf(W," ROUT(%d) = SIGMOID(RIN%d)\n",
2848  in+1,in+1);
2849  }
2850  }
2851 
2852  fprintf(W,"C\n");
2853  fprintf(W," END\n");
2854  fprintf(W," REAL FUNCTION SIGMOID(X)\n");
2855  fprintf(W," SIGMOID = 1./(1.+EXP(-X))\n");
2856  fprintf(W," END\n");
2857 
2858  fclose(W);
2859  return 0;
2860 }
#define STAT
Definition: mlp_gen.h:60
#define NET
Definition: mlp_gen.h:25
#define DIVERS
Definition: mlp_gen.h:54
tuple filename
Definition: lut2db_cfg.py:20
int MLP_PrintInputStat ( )

Definition at line 3332 of file mlp_gen.cc.

References j, timingPdfMaker::mean, MLP_StatInputs(), NET, PAT, and stat_::sigma.

3333 {
3334  int j;
3335  dbl *mean, *sigma, *minimum, *maximum;
3336 
3337 /* allocate memory */
3338  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3339  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3340  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3341  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3342 
3343  if(mean == 0 || sigma == 0 || minimum == 0
3344  || maximum == 0) return -111;
3345 
3346  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
3347  mean,sigma,minimum,maximum);
3348 
3349  printf("\t mean \t\t RMS \t\t min \t\t max\n");
3350  for(j=0;j<NET.Nneur[0];j++)
3351  {
3352  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3353  mean[j],sigma[j],minimum[j],maximum[j]);
3354  }
3355 
3356  free(mean);
3357  free(sigma);
3358  free(minimum);
3359  free(maximum);
3360  printf("\n");
3361  return 0;
3362 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3275
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
double MLP_Rand ( dbl  mini,
dbl  maxi 
)

Definition at line 2131 of file mlp_gen.cc.

References random.

Referenced by InitWeights(), and ShuffleExamples().

2132 {
2133 return mini+(maxi-mini)*random()/RAND_MAX;
2134 }
TRandom random
Definition: MVATrainer.cc:138
void MLP_ResLin ( )

Definition at line 1971 of file mlp_gen.cc.

References dgels_(), recoMuon::in, LEARN, MLP_Out(), MLP_Test(), NET, PAT, and mathSSE::sqrt().

Referenced by MLP_Epoch(), MLP_LineHyb(), and StochStepHyb().

1972 {
1973 /* dbl rrans[NMAX], rrout[NMAX];*/
1974 /* type_pat rrin[NMAX];*/
1975  double *HR,*dpat; //,*wlin,*SV;
1976  double err,lambda,lambda2;
1977  int Nl,M,Nhr,khr,nrhs,iret,ierr;
1978  int il, in, inl, ipat;
1979  /*register dbl a;*/ //a unused
1980  char Trans = 'N';
1981 
1982 
1983 /* int rank; */
1984 // double rcond = -1; /* use machine precision */
1985 
1986  lambda2 = LEARN.Alambda;
1987 
1988 /* Nl = number of linear weights
1989  M = number of terms in linear system = number of examples + regularisation*/
1990  Nl = NET.Nneur[NET.Nlayer-2] + 1;
1991  M = PAT.Npat[0]+Nl;
1992 
1993  int Lwork = 5 * M;
1994  double *Work = (double*) malloc((int) Lwork*sizeof(double));
1995 
1996 /* memory allocation */
1997  dpat = (double*) malloc((int) M*sizeof(double));
1998 // wlin = (double*) malloc((int) Nl*sizeof(double));
1999 // SV = (double*) malloc((int) Nl*sizeof(double));
2000 
2001  Nhr = M * Nl;
2002  HR = (double*) malloc((int) Nhr*sizeof(double));
2003  err = 0.;
2004  for(ipat=0;ipat<PAT.Npat[0];ipat++)
2005  {
2006 /* *** Filling dpat and HR *** */
2007 /* for(in=0; in<NET.Nneur[0]; in++)
2008  {
2009  rrin[in] = PAT.Rin[0][ipat][in];
2010  }*/
2011 
2012  MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
2013 /* MLP_Out(rrin,rrout);*/
2014  /*for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
2015  {
2016  a = (dbl) PAT.Rans[0][ipat][in]; //a was not used
2017  } */
2018  il = NET.Nlayer-2;
2019  dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
2020  khr = ipat;
2021  HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
2022  for(in=0;in<NET.Nneur[il];in++)
2023  {
2024  khr = M *(in+1) + ipat;
2025  HR[khr] = NET.Outn[il][in]*
2026  (dbl) sqrt(PAT.Pond[0][ipat]);
2027  }
2028  }
2029  il = NET.Nlayer-2;
2030  lambda = sqrt(lambda2);
2031  for(ipat=0;ipat<=NET.Nneur[il];ipat++)
2032  {
2033  dpat[ipat+PAT.Npat[0]] = 0;
2034  for(in=0;in<=NET.Nneur[il];in++)
2035  {
2036  khr = M *in + ipat + PAT.Npat[0];
2037  HR[khr] = 0;
2038  if(in==ipat) HR[khr]=lambda;
2039  }
2040  }
2041  if(NET.Debug>=4)
2042  {
2043  err = MLP_Test(0,0);
2044  printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err);
2045  }
2046 /* */
2047 /* Trouve les poids lineaires par resolution lineaire */
2048 /* */
2049  nrhs = 1;
2050  ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
2051  &Lwork,&iret);
2052  if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
2053  if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
2054 
2055 /* ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
2056  &iret);
2057  if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
2058  if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
2059 
2060  il = NET.Nlayer-1;
2061  for (inl=0; inl<=NET.Nneur[il-1];inl++)
2062  {
2063  NET.Weights[il][0][inl] = dpat[inl];
2064  }
2065  if(NET.Debug>=4)
2066  {
2067  err = MLP_Test(0,0);
2068  printf("ResLin, apres tlsfor, err= %f\n",err);
2069  }
2070  free(Work);
2071  free(dpat);
2072 // free(wlin);
2073  free(HR);
2074 // free(SV);
2075 }
int dgels_(char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info)
void MLP_Out(type_pat *rrin, dbl *rrout)
Definition: mlp_gen.cc:62
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
T sqrt(T t)
Definition: SSEVec.h:18
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int MLP_SetNet ( int *  nl,
int *  nn 
)

Definition at line 3639 of file mlp_gen.cc.

References AllocNetwork(), NET, NLMAX, and SetDefaultFuncs().

Referenced by PhysicsTools::MLP::MLP(), and ReadPatterns().

3640 {
3641  int il,ierr;
3642 
3643  if((*nl)>NLMAX) return(1);
3644  if((*nl)<2) return(2);
3645 
3646 /* LearnFree(); */
3647 /* allocate memory */
3648  ierr = AllocNetwork(*nl,nn);
3649  if(ierr != 0) return ierr;
3650 
3651 /* set number of layers */
3652  NET.Nlayer = (int) *nl;
3653 
3654 /* set number of neurons */
3655  for(il=0; il<NET.Nlayer; il++) {
3656  NET.Nneur[il] = nn[il];
3657  }
3658 
3659 /* set transfer functions */
3660  SetDefaultFuncs();
3661 /* LearnAlloc(); */
3662 
3663  return(0);
3664 }
#define NLMAX
Definition: mlp_gen.cc:14
#define NET
Definition: mlp_gen.h:25
void SetDefaultFuncs()
Definition: mlp_gen.cc:1223
int AllocNetwork(int Nlayer, int *Neurons)
Definition: mlp_gen.cc:3449
int MLP_StatInputs ( int  Nexamples,
int  Ninputs,
type_pat **  inputs,
dbl mean,
dbl sigma,
dbl minimum,
dbl maximum 
)

Definition at line 3275 of file mlp_gen.cc.

References j, and mathSSE::sqrt().

Referenced by MLP_PrintInputStat(), and NormalizeInputs().

3277 {
3278  dbl *fmean;
3279  int j, ipat, nmax;
3280 
3281 /* first compute a fast rough mean using the first 100 events */
3282  fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
3283  nmax = 100;
3284  if(Nexamples<100) nmax=Nexamples;
3285 
3286  for(j=0;j<Ninputs;j++)
3287  {
3288  fmean[j] = 0;
3289  for(ipat=0;ipat<nmax;ipat++)
3290  {
3291  fmean[j] += (dbl) inputs[ipat][j];
3292  }
3293  fmean[j] = fmean[j]/(dbl) nmax;
3294 
3295 /* now compute real mean and sigma, min and max */
3296  mean[j] = 0;
3297  sigma[j] = 0;
3298  minimum[j] = 99999;
3299  maximum[j] = -99999;
3300  for(ipat=0;ipat<Nexamples;ipat++)
3301  {
3302  mean[j] += (dbl) inputs[ipat][j];
3303  sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
3304  ((dbl) inputs[ipat][j]-fmean[j]);
3305  if((dbl) inputs[ipat][j] > maximum[j])
3306  maximum[j]=(dbl) inputs[ipat][j];
3307  if((dbl) inputs[ipat][j] < minimum[j])
3308  minimum[j]=(dbl) inputs[ipat][j];
3309  }
3310  mean[j] = mean[j]/(dbl) Nexamples;
3311  sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples -
3312  (mean[j]-fmean[j])*
3313  (mean[j]-fmean[j]));
3314  }
3315  free(fmean);
3316  return 0;
3317 }
T sqrt(T t)
Definition: SSEVec.h:18
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
dbl MLP_Stochastic ( )

Definition at line 518 of file mlp_gen.cc.

References a, b, createTree::dd, eta, EtaDecay(), cuy::ii, recoMuon::in, LEARN, MLP_Out2(), NET, PAT, ShuffleExamples(), and create_public_pileup_plots::weights.

Referenced by MLP_Epoch().

519 {
520  int ipat, ii, inm1;
521  dbl err = 0;
522  int il, in1, in, itest2;
523  dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
524  dbl eta, eps;
525  register dbl a, b, dd, a1, a2, a3, a4;
526  dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
527  dbl ***weights;
528 
529  if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");
530  weights = NET.Weights;
531 /* shuffle patterns */
533 
534 /* reduce learning parameter */
535  if(LEARN.Decay<1) EtaDecay();
536 
537  eta = -LEARN.eta;
538  eps = LEARN.epsilon;
539 
540 /* loop on the examples */
541  for(ipat=0;ipat<PAT.Npat[0];ipat++)
542  {
543  ii = ExamplesIndex[ipat];
544  pond = PAT.Pond[0][ii];
545 
546  MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)]));
547 
548 /* next lines are equivalent to DeDwSum */
549  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
550  {
551  deriv = NET.Deriv1[NET.Nlayer-1][in];
552  a = (dbl) PAT.Rans[0][ii][in];
553  b = NET.Outn[NET.Nlayer-1][in]-a;
554  err += b*b*pond;
555  NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
556  }
557 
558  for(il=NET.Nlayer-2; il>0; il--)
559  {
560  dd = NET.Delta[il+1][0];
561  for(in=0; in<NET.Nneur[il]-3; in+=4)
562  {
563  deriv1 = NET.Deriv1[il][in];
564  deriv2 = NET.Deriv1[il][in+1];
565  deriv3 = NET.Deriv1[il][in+2];
566  deriv4 = NET.Deriv1[il][in+3];
567  itest2 = (NET.Nneur[il+1]==1);
568  a1 = dd*weights[il+1][0][in+1];
569  a2 = dd*weights[il+1][0][in+2];
570  a3 = dd*weights[il+1][0][in+3];
571  a4 = dd*weights[il+1][0][in+4];
572  if(itest2) goto L1;
573  pdelta = &(NET.Delta[il+1][1]);
574  for(in1=1; in1<NET.Nneur[il+1];
575  in1++, pdelta++)
576  {
577  a1 += *pdelta * weights[il+1][in1][in+1];
578  a2 += *pdelta * weights[il+1][in1][in+2];
579  a3 += *pdelta * weights[il+1][in1][in+3];
580  a4 += *pdelta * weights[il+1][in1][in+4];
581  }
582 L1: NET.Delta[il][in] = a1*deriv1;
583  NET.Delta[il][in+1] = a2*deriv2;
584  NET.Delta[il][in+2] = a3*deriv3;
585  NET.Delta[il][in+3] = a4*deriv4;
586  }
587  for(in=in; in<NET.Nneur[il]; in++)
588  {
589  deriv = NET.Deriv1[il][in];
590  itest2 = (NET.Nneur[il+1]==1);
591  a = dd*weights[il+1][0][in+1];
592  if(itest2) goto L2;
593  pdelta = &(NET.Delta[il+1][1]);
594  for(in1=1; in1<NET.Nneur[il+1];
595  in1++, pdelta++)
596  {
597  a += *pdelta *
598  weights[il+1][in1][in+1];
599  }
600 L2: NET.Delta[il][in] = a*deriv;
601  }
602 
603  } /* end of loop on layers */
604 
605 
606 /* update the weights */
607  if(eps==0)
608  {
609  for(il=1; il<NET.Nlayer; il++)
610  {
611  inm1 = NET.Nneur[il-1];
612  for(in=0; in<NET.Nneur[il]-3; in+=4)
613  {
614  a1 = NET.Delta[il][in];
615  a2 = NET.Delta[il][in+1];
616  a3 = NET.Delta[il][in+2];
617  a4 = NET.Delta[il][in+3];
618  pout = &(NET.Outn[il-1][0]);
619  weights[il][in][0] += a1;
620  weights[il][in+1][0] += a2;
621  weights[il][in+2][0] += a3;
622  weights[il][in+3][0] += a4;
623  weights[il][in][1] += a1* (*pout);
624  weights[il][in+1][1] += a2* (*pout);
625  weights[il][in+2][1] += a3* (*pout);
626  weights[il][in+3][1] += a4* (*pout);
627  pout++;
628  pw1 = &(weights[il][in][2]);
629  pw2 = &(weights[il][in+1][2]);
630  pw3 = &(weights[il][in+2][2]);
631  pw4 = &(weights[il][in+3][2]);
632  for(in1=2; in1<=inm1;
633  ++in1, ++pout, ++pw1, ++pw2,
634  ++pw3, ++pw4)
635  {
636  *pw1 += a1 * *pout;
637  *pw2 += a2 * *pout;
638  *pw3 += a3 * *pout;
639  *pw4 += a4 * *pout;
640  }
641  }
642  for(in=in; in<NET.Nneur[il]; in++)
643  {
644  a1 = NET.Delta[il][in];
645  pout = &(NET.Outn[il-1][0]);
646  weights[il][in][0] += a1;
647  weights[il][in][1] += a1* (*pout);
648  pout++;
649  pw1 = &(weights[il][in][2]);
650  for(in1=2; in1<=inm1;
651  ++in1, ++pout, ++pw1)
652  {
653  *pw1 += a1 * *pout;
654  }
655  }
656  }
657  }
658  else
659  {
660  for(il=1; il<NET.Nlayer; il++)
661  {
662  for(in=0; in<NET.Nneur[il]; in++)
663  {
664 
665  a = NET.Delta[il][in];
666  LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
667  NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
668 
669  b = a*NET.Outn[il-1][0];
670  LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
671  NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
672 
673  for(in1=2; in1<=NET.Nneur[il-1]; in1++)
674  {
675  b = a*NET.Outn[il-1][in1-1];
676  LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
677  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
678  }
679  }
680  }
681  }
682 
683  } /* end of loop on examples */
684  return(err);
685 }
int * ExamplesIndex
Definition: mlp_gen.cc:40
int ii
Definition: cuy.py:588
void EtaDecay()
Definition: mlp_gen.cc:2086
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2102
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:184
dbl MLP_Test ( int  ifile,
int  regul 
)

Definition at line 446 of file mlp_gen.cc.

References compare_using_db::ifile, recoMuon::in, LEARN, MLP_Out_T(), MLP_Test_MM(), NET, PAT, and tmp.

Referenced by DecreaseSearch(), LineSearch(), LineSearchHyb(), MLP_ResLin(), and SetLambda().

447 {
448  dbl err, rrans;
449  int in,jn,ipat,ipati;
450 
451  dbl *tmp;
452 
453  tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl));
454  if(tmp == 0) /* not enough memory */
455  {
456  printf("not enough memory in MLP_Test\n");
457  err = 0;
458  for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
459  {
460  if(ifile==0)
461  {
462  ipati = ExamplesIndex[ipat];
463  }
464  else
465  {
466  ipati = ipat;
467  }
468  MLP_Out_T(PAT.Rin[ifile][ipati]);
469  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
470  {
471  rrans = (dbl) PAT.Rans[ifile][ipati][in];
472  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
473  (rrans-NET.Outn[NET.Nlayer-1][in])*
474  PAT.Pond[ifile][ipati];
475  }
476  }
477 
478  if(regul>=1)
479  {
480  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
481  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
482  {
483  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
484  NET.Weights[NET.Nlayer-1][in][jn];
485  }
486  }
487  free(tmp);
488  return(err);
489  }
490  else /* computation using matrix - matrix multiply */
491  {
492  err = MLP_Test_MM(ifile, tmp);
493  if(regul>=1)
494  {
495  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
496  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
497  {
498  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
499  NET.Weights[NET.Nlayer-1][in][jn];
500  }
501  }
502  free(tmp);
503  return(err);
504  }
505 }
dbl MLP_Test_MM(int ifile, dbl *tmp)
Definition: mlp_gen.cc:263
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
void MLP_Out_T(type_pat *rrin)
Definition: mlp_gen.cc:113
#define LEARN
Definition: mlp_gen.h:36
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test_MM ( int  ifile,
dbl tmp 
)

Definition at line 263 of file mlp_gen.cc.

References a, compare_using_db::ifile, recoMuon::in, j, visualization-live-secondInstance_cfg::m, MLP_MatrixVector(), MLP_MM2rows(), MLP_Sigmoide(), MLP_vSigmoide(), NET, nin, and PAT.

Referenced by MLP_Test().

264 {
265  int ipat;
266  int npat = PAT.Npat[ifile];
267  int nhid = NET.Nneur[1];
268  int nin = NET.Nneur[0];
269  int jpat, j, il, ilm1, m, in, mp1;
270  dbl err, a, rrans;
271  dbl *pweights, *ptmp;
272 
273  err = 0;
274  for(ipat=0; ipat<npat-1; ipat+=2)
275  {
276  MLP_MM2rows(tmp, &(PAT.vRin[ifile][ipat*(nin+1)]),
277  NET.vWeights[1], 2, nhid, nin+1,
278  nin+1, nin+1);
279 
280  switch(NET.T_func[1][0])
281  {
282  case 2:
283  ptmp = &(tmp[0]);
284  MLP_vSigmoide(ptmp,2*nhid);
285  break;
286 
287  case 1:
288  break;
289 
290  case 0:
291  for(jpat=0; jpat<2; jpat++)
292  {
293  for(j=0; j<nhid; j++)
294  {
295  tmp[j+jpat*nhid] = 0;
296  }
297  }
298  break;
299  }
300 
301  for(jpat=0; jpat<2; jpat++)
302  {
303  for(in=0; in<nhid; in++)
304  {
305  NET.Outn[1][in] = tmp[jpat*nhid+in];
306  }
307  for(il=2; il<NET.Nlayer; il++)
308  {
309  ilm1 = il-1;
310  m = NET.Nneur[ilm1]%4;
311  for(in=0; in<NET.Nneur[il]; in++)
312  {
313  pweights = &(NET.Weights[il][in][0]);
314  a = *pweights;
315  pweights++;
316  if(m==0) goto L20;
317  for(j=1;j<=m;j++,pweights++) a +=
318  (*pweights)*NET.Outn[ilm1][j-1];
319 L20:
320  mp1 = m+1;
321  for(j=mp1; j<=NET.Nneur[ilm1];
322  j+=4, pweights+=4)
323  {
324  a +=
325  *(pweights+3)*NET.Outn[ilm1][j+2]+
326  *(pweights+2)*NET.Outn[ilm1][j+1]+
327  *(pweights+1)*NET.Outn[ilm1][j]+
328  *(pweights )*NET.Outn[ilm1][j-1];
329  }
330  switch(NET.T_func[il][in])
331  {
332  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
333  break;
334  case 1: NET.Outn[il][in] = a;
335  break;
336  case 0: NET.Outn[il][in] = 0;
337  break;
338  }
339  }
340  if(il == NET.Nlayer-1)
341  {
342  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
343  {
344  rrans = (dbl) PAT.Rans[ifile][ipat+jpat][in];
345  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
346  (rrans-NET.Outn[NET.Nlayer-1][in])*
347  PAT.Pond[ifile][ipat+jpat];
348  }
349  }
350  }
351  }
352  }
353 
354 /* cas npat impair */
355  for(ipat=ipat; ipat<npat; ipat++)
356  {
357  MLP_MatrixVector(NET.vWeights[1],
358  &(PAT.vRin[ifile][ipat*(nin+1)]),tmp,
359  nhid,nin+1);
360 
361  switch(NET.T_func[1][0])
362  {
363  case 2:
364  ptmp = &(tmp[0]);
365  MLP_vSigmoide(ptmp,2*nhid);
366  break;
367 
368  case 1:
369  break;
370 
371  case 0:
372  for(j=0; j<nhid; j++)
373  {
374  tmp[j] = 0;
375  }
376  break;
377  }
378 
379  for(in=0; in<nhid; in++)
380  {
381  NET.Outn[1][in] = tmp[in];
382  }
383  for(il=2; il<NET.Nlayer; il++)
384  {
385  ilm1 = il-1;
386  m = NET.Nneur[ilm1]%4;
387  for(in=0; in<NET.Nneur[il]; in++)
388  {
389  pweights = &(NET.Weights[il][in][0]);
390  a = *pweights;
391  pweights++;
392  if(m==0) goto L25;
393  for(j=1;j<=m;j++,pweights++) a +=
394  (*pweights)*NET.Outn[ilm1][j-1];
395 L25:
396  mp1 = m+1;
397  for(j=mp1; j<=NET.Nneur[ilm1];
398  j+=4, pweights+=4)
399  {
400  a +=
401  *(pweights+3)*NET.Outn[ilm1][j+2]+
402  *(pweights+2)*NET.Outn[ilm1][j+1]+
403  *(pweights+1)*NET.Outn[ilm1][j]+
404  *(pweights )*NET.Outn[ilm1][j-1];
405  }
406  switch(NET.T_func[il][in])
407  {
408  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
409  break;
410  case 1: NET.Outn[il][in] = a;
411  break;
412  case 0: NET.Outn[il][in] = 0;
413  break;
414  }
415  }
416  if(il == NET.Nlayer-1)
417  {
418  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
419  {
420  rrans = (dbl) PAT.Rans[ifile][ipat][in];
421  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
422  (rrans-NET.Outn[NET.Nlayer-1][in])*
423  PAT.Pond[ifile][ipat];
424  }
425  }
426  }
427  }
428  return(err);
429 }
#define PAT
Definition: mlp_gen.h:45
int nin
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
void MLP_MM2rows(dbl *c, type_pat *a, dbl *b, int Ni, int Nj, int Nk, int NaOffs, int NbOffs)
Definition: mlp_gen.cc:3817
void MLP_MatrixVector(dbl *M, type_pat *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3748
void MLP_vSigmoide(dbl *x, int n)
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
dbl MLP_Sigmoide(dbl x)
Definition: mlp_sigmoide.cc:27
int MLP_Train ( int *  ipat,
dbl err 
)

Definition at line 895 of file mlp_gen.cc.

References DeDwSum(), recoMuon::in, MLP_Out2(), NET, and PAT.

Referenced by MLP_Epoch().

896 {
897  int in;
898 
899 /* if(*ipat>=PAT.Npat[0]) return(1);*/
900  if(*ipat<0) return(2);
901 
902 /* MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
903  MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
904  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
905  {
906  *err += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
907  *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
908  PAT.Pond[0][*ipat];
909  }
910  DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);
911  return(0);
912 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int DeDwSum(type_pat *ans, dbl *out, int ipat)
Definition: mlp_gen.cc:1123
double dbl
Definition: mlp_gen.h:12
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:184
int NormalizeInputs ( )

Definition at line 3378 of file mlp_gen.cc.

References j, timingPdfMaker::mean, MLP_StatInputs(), NET, PAT, stat_::sigma, and STAT.

3379 {
3380  int j, ipat;
3381  dbl *mean, *sigma, *minimum, *maximum;
3382 
3383 /* allocate memory */
3384  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3385  if (mean == 0) return -111;
3386  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3387  if (sigma == 0) return -111;
3388  STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3389  if (STAT.mean == 0) return -111;
3390  STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3391  if (STAT.sigma == 0) return -111;
3392  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3393  if (minimum == 0) return -111;
3394  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3395  if (maximum == 0) return -111;
3396 
3397  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
3398  mean,sigma,minimum,maximum);
3399 
3400  if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
3401  for(j=0;j<NET.Nneur[0];j++)
3402  {
3403  if(NET.Debug>=1)
3404  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3405  mean[j],sigma[j],minimum[j],maximum[j]);
3406 
3407 /* store mean and sigma for output function */
3408  STAT.mean[j] = mean[j];
3409  STAT.sigma[j] = sigma[j];
3410 
3411 /* finally apply the normalization */
3412  for(ipat=0;ipat<PAT.Npat[0];ipat++)
3413  {
3414  PAT.Rin[0][ipat][j] =
3415  (PAT.Rin[0][ipat][j]-(float) mean[j])/
3416  (float) sigma[j];
3417  }
3418  for(ipat=0;ipat<PAT.Npat[1];ipat++)
3419  {
3420  PAT.Rin[1][ipat][j] =
3421  (PAT.Rin[1][ipat][j]-(float) mean[j])/
3422  (float) sigma[j];
3423  }
3424  }
3425 
3426  free(mean);
3427  free(sigma);
3428  free(minimum);
3429  free(maximum);
3430  if(NET.Debug>=1) printf("\n");
3431  return 0;
3432 }
#define STAT
Definition: mlp_gen.h:60
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3275
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
void PrintWeights ( )

Definition at line 2168 of file mlp_gen.cc.

References i, and NET.

Referenced by MLP_Epoch().

2169 {
2170  int ilayer,ineur,i;
2171 
2172  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2173  {
2174  if(MessLang==1)
2175  {
2176  printf("Couche %d\n",ilayer);
2177  }
2178  else
2179  {
2180  printf("Layer %d\n",ilayer);
2181  }
2182  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2183  {
2184  if(MessLang==1)
2185  {
2186  printf("Neurone %d",ineur);
2187  }
2188  else
2189  {
2190  printf("Neuron %d",ineur);
2191  }
2192  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2193  {
2194  printf(" %f",
2195  (double) NET.Weights[ilayer][ineur][i]);
2196  }
2197  printf("\n");
2198  }
2199  printf("\n");
2200  }
2201 }
int i
Definition: DBlmapReader.cc:9
int MessLang
Definition: mlp_gen.cc:22
#define NET
Definition: mlp_gen.h:25
int ReadPatterns ( char *  filename,
int  ifile,
int *  inet,
int *  ilearn,
int *  iexamples 
)

Definition at line 2229 of file mlp_gen.cc.

References AllocPatterns(), CLEN, CountLexemes(), DIVERS, GetNetStructure(), getnLexemes(), i, compare_using_db::ifile, cmsLHEtoEOSManager::l, LEARN, geometryCSVtoXML::line, MLP_SetNet(), NET, nin, NLMAX, nout, np, AlCaHLTBitMon_ParallelJobs::p, PAT, ReadPatterns(), alignCSCRings::s, indexGen::s2, and contentValuesCheck::ss.

Referenced by ReadPatterns().

2231 {
2232 char s[CLEN], s2[CLEN], cc[6], cc2[6];
2233 char otherfile[CLEN];
2234 double p;
2235 //int line,i,j;
2236 int line,i;
2237 //int l,ll,ipat,nmax,il,in,tf;
2238 int l,ll,ipat,nmax;
2239 int np=0; /* nombre d'exemples */
2240 int nin=0; /* nombre d'entrees */
2241 int nout=0; /* nombre de sorties */
2242 int npon=0;
2243 int ntot, ierr;
2244 //char **ss;
2245 char **ss=0;
2246 FILE *LVQpat;
2247 int nlayer, nneur[NLMAX];
2248 
2249 printf("\nLoading file %s\n",filename);
2250 LVQpat=fopen(filename,"r");
2251 if(LVQpat == 0) return -1;
2252 
2253 line=0;
2254 
2255 while(fgets(s,CLEN,LVQpat))
2256  {
2257  if(*s=='N')
2258  {
2259  if(*(s+1)=='N') /* NNEU */
2260  {
2261  printf("Number of neurons %s",s);
2262  *inet = 1;
2263  sscanf(s,"%s %s",cc,s2);
2264  ierr = GetNetStructure(s2,&nlayer,nneur);
2265  if(ierr != 0) return ierr;
2266  ierr = MLP_SetNet(&nlayer,nneur);
2267  if(ierr != 0) return ierr;
2268  }
2269  else
2270  {
2271  sscanf(s,"%s %d",cc,&l);
2272  if(*(cc+1)=='P') /* NPAT */
2273  {
2274  np=l;
2275  printf("Number of patterns %d\n",np);
2276  }
2277  else if(*(cc+1)=='I') /* NINP */
2278  {
2279  nin=l;
2280  PAT.Nin = nin;
2281  printf("Number of inputs %d\n",nin);
2282  }
2283  else if(*(cc+1)=='O' && *(cc+2)=='U') /* NOUT */
2284  {
2285  nout=l;
2286  PAT.Nout = nout;
2287  printf("Number of outputs %d\n",nout);
2288  }
2289  else if(*(cc+1)=='O' && *(cc+2)=='R') /* NORM */
2290  {
2291  DIVERS.Norm=l;
2292  if(l==1) printf("Normalize inputs\n");
2293  }
2294 /* obsolete datacard NLAY */
2295  else if(*(cc+1)=='L')
2296  {
2297  printf("NLAY datacard is no longer needed\n");
2298  }
2299  else if(*(cc+1)=='E') /* NEPO */
2300  {
2301  LEARN.Nepoch=l;
2302  printf("Number of epochs %d\n",l);
2303  }
2304  else if(*(cc+1)=='R') /* NRES */
2305  {
2306  LEARN.Nreset=l;
2307  printf(
2308  "Reset to steepest descent every %d epochs\n",
2309  l);
2310  }
2311  }
2312  }
2313  else if(*s=='L')
2314  {
2315  if(*(s+1)=='P') /* LPAR */
2316  {
2317  sscanf(s,"%s %le",cc,&p);
2318  printf("Learning parameter %f\n",p);
2319  LEARN.eta = (dbl) p;
2320  }
2321  else if(*(s+1)=='M') /* LMET */
2322  {
2323  *ilearn = 1;
2324  sscanf(s,"%s %d",cc,&(LEARN.Meth));
2325  printf("Learning method = ");
2326  switch(LEARN.Meth)
2327  {
2328  case 1: printf("Stochastic Minimization\n");
2329  break;
2330  case 2: printf("Steepest descent with fixed step\n");
2331  break;
2332  case 3: printf("Steepest descent with line search\n"); break;
2333  case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
2334  case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
2335  break;
2336  case 6: printf("BFGS\n");
2337  break;
2338  case 7: printf("Hybrid BFGS-linear\n");
2339  break;
2340  default: printf("Error: unknown method\n"); break;
2341  }
2342 
2343  }
2344  else if(*(s+1)=='T') /* LTAU */
2345  {
2346  sscanf(s,"%s %lf",cc,&p);
2347  printf("Tau %f\n",p);
2348  LEARN.Tau = (dbl) p;
2349  }
2350  else if(*(s+1)=='A') /* LAMB */
2351  {
2352  sscanf(s,"%s %lf",cc,&p);
2353  printf("Lambda %f\n",p);
2354  LEARN.Lambda = (dbl) p;
2355  }
2356  }
2357  else if(*s=='F')
2358  {
2359  if(*(s+1)=='S') /* FSPO */
2360  {
2361  sscanf(s,"%s %le",cc,&p);
2362  printf("Flat spot elimination parameter %f\n",p);
2363  LEARN.delta = (dbl) p;
2364  }
2365  else if(*(s+1)=='I') /* FILE */
2366  {
2367  sscanf(s,"%s %s",cc,otherfile);
2368  ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
2369  if(ierr != 0) return ierr;
2370  }
2371  }
2372  else if(*s=='M') /* momentum */
2373  {
2374  sscanf(s,"%s %le",cc,&p);
2375  printf("Momentum term %f\n",p);
2376  LEARN.epsilon = (dbl) p;
2377  }
2378  else if(*s=='O') /* OUTx */
2379  {
2380  if(*(s+3)=='W') /* OUTW */
2381  {
2382  sscanf(s,"%s %d",cc,&OutputWeights);
2383  if(OutputWeights == 0)
2384  {
2385  printf("Never write file weights.out\n");
2386  }
2387  else if(OutputWeights == -1)
2388  {
2389  printf("Write weights to output file at the end\n");
2390  }
2391  else
2392  {
2393  printf("Write weights to file every %d epochs\n",
2394  OutputWeights);
2395  }
2396  }
2397  else if(*(s+3)=='F') /* OUTF */
2398  {
2399  sscanf(s,"%s %s",cc,cc2);
2400  if(*cc2=='F' || *cc2=='C')
2401  {
2402  DIVERS.Outf = *cc2;
2403  }
2404  else
2405  {
2406  printf(" *** Error while loading file %s at line %s :",
2407  filename,s);
2408  printf(" unknown language\n");
2409  }
2410  }
2411  else
2412  {
2413  printf(" *** Error while loading file %s at line %s\n",
2414  filename,s);
2415  }
2416  }
2417  else if(*s=='R') /* RDWT */
2418  {
2419  sscanf(s,"%s %d",cc,&(NET.Rdwt));
2420  if(NET.Rdwt == 0)
2421  {
2422  printf("Random weights \n");
2423  }
2424  else
2425  {
2426  printf("Read weights from file weights.in\n");
2427  }
2428  }
2429  else if(*s=='S') /* STAT */
2430  {
2431  sscanf(s,"%s %d",cc,&(DIVERS.Stat));
2432  }
2433 /* else if(*s=='T') TFUN
2434  {
2435  sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
2436  SetTransFunc(il,in,tf);
2437  } */
2438  else if(*s=='H') /* HESS */
2439  {
2440  sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
2441  }
2442  else if(*s=='D')
2443  {
2444  if(*(s+1)=='C') /* DCAY */
2445  {
2446  sscanf(s,"%s %le",cc,&p);
2447  LEARN.Decay = p;
2448  printf("Learning parameter decay %f\n",
2449  (double) LEARN.Decay);
2450  }
2451  if(*(s+1)=='B') /* DBIN */
2452  {
2453  sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
2454  printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
2455  }
2456  if(*(s+1)=='E') /* DEBU */
2457  {
2458  sscanf(s,"%s %d",cc,&(NET.Debug));
2459  printf("Debug mode %d\n",NET.Debug);
2460  }
2461  }
2462  else if(*s=='P') /* POND */
2463  {
2464  npon = CountLexemes(s);
2465  if(npon==2)
2466  {
2467  sscanf(s,"%s %d",cc,&(PAT.Iponde));
2468  }
2469  else
2470  {
2471  ss = (char**) malloc((npon+1)*sizeof(char*));
2472  for(i=0;i<=npon;i++)
2473  ss[i]=(char*) malloc(40*sizeof(char));
2474  getnLexemes(npon,s,ss);
2475  sscanf(ss[1],"%d",&(PAT.Iponde));
2476  for(i=2;i<npon;i++)
2477  {
2478  sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
2479  }
2480  }
2481  if(PAT.Iponde==0)
2482  {
2483  npon = 0;
2484  }
2485  else
2486  {
2487  npon = 1;
2488  }
2489  }
2490  else if(*s=='#') /* comments */
2491  {
2492  }
2493  else /* exemple itself */
2494  {
2495  if(np==0) return 1;
2496  if(nin==0) return 2;
2497  if(nout==0) return 3;
2498 
2499 
2500 /* store number of exemples and allocate memory*/
2501  if(line==0)
2502  {
2503  PAT.Npat[ifile] = np;
2504  ierr = AllocPatterns(ifile,np,nin,nout,0);
2505  if(ierr != 0) return ierr;
2506  *iexamples = 1;
2507  }
2508 
2509 /* now get exemple */
2510 
2511  line++;
2512  ll = (line-1)%2;
2513  ipat = (line-1)/2;
2514  /* printf("Loading event \t %d\r",ipat);*/
2515 /* if(ipat>NPMAX)
2516  {
2517  printf("Too many examples in file\n");
2518  printf("Loading %d examples\n",NPMAX);
2519  PAT.Npat[ifile] = NPMAX;
2520  break;
2521  }
2522 */
2523 
2524 /* allocate the number of lines */
2525 
2526  if(line==1)
2527  {
2528 
2529  nmax = nin;
2530  if(nout>nin) nmax=nout;
2531  ss = (char**) malloc((nmax+1)*sizeof(char*));
2532  if(ss == 0) return -111;
2533  for(i=0;i<=nmax;i++)
2534  {
2535  ss[i]=(char*) malloc(40*sizeof(char));
2536  if(ss[i] == 0) return -111;
2537  }
2538  }
2539 
2540  if(ll==0) /* inputs */
2541  {
2542  getnLexemes(nin,s,ss);
2543  for(i=0;i<nin;i++)
2544  {
2545  sscanf(ss[i],"%le",&p);
2546  PAT.Rin[ifile][ipat][i] = (type_pat) p;
2547  }
2548  }
2549  else /* answers */
2550  {
2551  ntot=nout+npon;
2552  getnLexemes(ntot,s,ss);
2553  for(i=0;i<ntot;i++)
2554  {
2555  sscanf(ss[i],"%le",&p);
2556  if(i<nout)
2557  {
2558  PAT.Rans[ifile][ipat][i] = (type_pat) p;
2559  }
2560  else
2561  {
2562  if(PAT.Iponde==1)
2563  {
2564  PAT.Pond[ifile][ipat] =
2565  (type_pat) p;
2566  }
2567  else
2568  {
2569  PAT.Pond[ifile][ipat] =
2570  (type_pat) PAT.Ponds[(int) p -1];
2571  }
2572  }
2573  }
2574  }
2575  }
2576  }
2577  printf("%d examples loaded \n\n",PAT.Npat[ifile]);
2578  fclose(LVQpat);
2579  return 0;
2580 }
int i
Definition: DBlmapReader.cc:9
void getnLexemes(int n, char *s, char **ss)
Definition: mlp_gen.cc:2601
int ReadPatterns(char *filename, int ifile, int *inet, int *ilearn, int *iexamples)
Definition: mlp_gen.cc:2229
int OutputWeights
Definition: mlp_gen.cc:23
#define NLMAX
Definition: mlp_gen.cc:14
int MLP_SetNet(int *nl, int *nn)
Definition: mlp_gen.cc:3639
#define PAT
Definition: mlp_gen.h:45
tuple s2
Definition: indexGen.py:106
int nin
#define NET
Definition: mlp_gen.h:25
int np
Definition: AMPTWrapper.h:33
int GetNetStructure(char *s, int *Nlayer, int *Nneur)
Definition: mlp_gen.cc:3592
double type_pat
Definition: mlp_gen.h:13
int CountLexemes(char *s)
Definition: mlp_gen.cc:2585
int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
Definition: mlp_gen.cc:3077
#define DIVERS
Definition: mlp_gen.h:54
#define LEARN
Definition: mlp_gen.h:36
int nout
tuple filename
Definition: lut2db_cfg.py:20
double dbl
Definition: mlp_gen.h:12
#define CLEN
Definition: mlp_gen.cc:2226
int SaveWeights ( char *  filename,
int  iepoch 
)

Definition at line 2972 of file mlp_gen.cc.

References i, and NET.

Referenced by PhysicsTools::MLP::save().

2973 {
2974  FILE *W;
2975  int ilayer,ineur,i;
2976 
2977  W=fopen(filename,"w");
2978  if(W==0) return -1;
2979 
2980  fprintf(W,"# network structure ");
2981  for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
2982  {
2983  fprintf(W,"%d ",NET.Nneur[ilayer]);
2984  }
2985 
2986  fprintf(W,"\n %d\n",iepoch);
2987  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2988  {
2989  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2990  {
2991  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2992  {
2993  fprintf(W," %1.15e\n",
2994  (double) NET.Weights[ilayer][ineur][i]);
2995  }
2996  }
2997  }
2998  fclose(W);
2999  return 0;
3000 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
tuple filename
Definition: lut2db_cfg.py:20
void SetDefaultFuncs ( )

Definition at line 1223 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by MLP_SetNet().

1224 {
1225  int il,in;
1226  for(il=0; il<NET.Nlayer; il++) {
1227  for(in=0; in<NET.Nneur[il]; in++) {
1228  NET.T_func[il][in] = 2;
1229  if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
1230  }
1231  }
1232 
1233 }
#define NET
Definition: mlp_gen.h:25
void SetLambda ( double  Wmax)

Definition at line 1949 of file mlp_gen.cc.

References LEARN, MLP_Test(), and NET.

Referenced by MLP_Epoch().

1950 {
1951  dbl err;
1952  err = MLP_Test(0,0);
1953  LEARN.Alambda =
1954  LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
1955 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int SetTransFunc ( int  layer,
int  neuron,
int  func 
)

Definition at line 1200 of file mlp_gen.cc.

References RecoJets_EventContent_cff::func, NET, and NLMAX.

1202 {
1203  if(layer>NLMAX) return(1);
1204 /* if(neuron>NMAX) return(2);*/
1205 
1206  NET.T_func[layer-1][neuron-1] = func;
1207 
1208  return(0);
1209 }
#define NLMAX
Definition: mlp_gen.cc:14
#define NET
Definition: mlp_gen.h:25
int ShuffleExamples ( int  n,
int *  index 
)

Definition at line 2102 of file mlp_gen.cc.

References a, i, cuy::ii, MLP_Rand(), and gen::n.

Referenced by MLP_Epoch(), and MLP_Stochastic().

2103 {
2104  int i,ii,itmp;
2105  dbl a = (dbl) (n-1);
2106 
2107  for(i=0;i<n;i++)
2108  {
2109  ii = (int) MLP_Rand(0.,a);
2110  itmp = index[ii];
2111  index[ii] = index[i];
2112  index[i] = itmp;
2113  }
2114  return 0;
2115 }
int i
Definition: DBlmapReader.cc:9
int ii
Definition: cuy.py:588
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2131
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void SteepestDir ( )

Definition at line 1244 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1245 {
1246  int il,in,jn;
1247  for(il=1; il<NET.Nlayer; il++)
1248  for(in=0; in<NET.Nneur[il]; in++)
1249  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1250  dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
1251 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl *** dir
Definition: mlp_gen.cc:35
int StochStep ( )

Definition at line 965 of file mlp_gen.cc.

References eta, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

966 {
967  int il, in1, in;
968  dbl eta, eps, epseta;
969 
970  eta = -LEARN.eta;
971  eps = LEARN.epsilon;
972  epseta = eps/eta;
973  for(il=NET.Nlayer-1; il>0; il--) {
974  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
975 
976  /* compute delta weights */
977  for(in=0; in<NET.Nneur[il]; in++) {
978  LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
979  + epseta * LEARN.Odw[il][in][in1]);
980  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
981  }
982 
983  }
984  }
985 
986  return(0);
987 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
int StochStepHyb ( )

Definition at line 926 of file mlp_gen.cc.

References eta, recoMuon::in, LEARN, MLP_ResLin(), and NET.

927 {
928  int il, in1, in;
929  dbl eta, eps;
930 
931  eta = LEARN.eta;
932  eps = LEARN.epsilon;
933  for(il=NET.Nlayer-2; il>0; il--) {
934 
935  for(in=0; in<NET.Nneur[il]; in++) {
936 
937  /* compute delta weights */
938  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
939  LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
940  + eps * LEARN.Odw[il][in][in1];
941  }
942 
943  /* update weights */
944  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
945  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
946  }
947  }
948  }
949  MLP_ResLin();
950  return(0);
951 }
#define NET
Definition: mlp_gen.h:25
void MLP_ResLin()
Definition: mlp_gen.cc:1971
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12

Variable Documentation

dbl** BFGSH

Definition at line 37 of file mlp_gen.cc.

int BFGSMemory = 0

Definition at line 27 of file mlp_gen.cc.

dbl* delta

Definition at line 36 of file mlp_gen.cc.

Referenced by npstat::absDifference(), ReferenceTrajectory::addMaterialEffectsBrl(), FWMuonDetailView::addSceneInfo(), FWElectronDetailView::addSceneInfo(), PhotonsWithConversionsAnalyzer::analyze(), SimpleConvertedPhotonAnalyzer::analyze(), SimplePhotonAnalyzer::analyze(), DQMEventInfo::analyze(), V0Monitor::analyze(), TestHits::analyze(), TestSmoothHits::analyze(), TestTrackHits::analyze(), TestOutliers::analyze(), PhotonValidator::analyze(), TauTagValidation::analyze(), JetCrystalsAssociator::associate(), npstat::NUHistoAxis::binNumber(), FWPhotonLegoProxyBuilder::build(), FWElectronLegoProxyBuilder::build(), EcalShapeBase::buildMe(), PFRecoTauTagInfoAlgorithm::buildPFTauTagInfo(), ThirdHitPrediction::calculateRangesBarrel(), QGTagger::calcVariables(), FWGeometryTableManager::checkRegionOfInterest(), npstat::NUHistoAxis::closestValidBin(), HcalQie::codeToQ(), MultipleScatteringSimulator::compute(), VolumeEnergyLossEstimator::computeBetheBloch(), MVAJetPuId::computeIdVariables(), DTDigitizer::computeTime(), TGeoMgrFromDdd::createShape(), TGeoFromDddService::createShape(), ThirdHitPredictionFromInvLine::crossing(), InterpolatedPulse< 1500U >::derivative(), DTTMax::DTTMax(), CSCPairResidualsConstraint::error(), SiPixelSCurveCalibrationAnalysis::estimateSCurveParameters(), DDEcalBarrelAlgo::execute(), DDEcalBarrelNewAlgo::execute(), TFParams::f3deg(), fastProp(), FWECALCaloDataDetailViewBuilder::fillData(), RPCStripsRing::fillWithVirtualStrips(), PFFilter::filter(), PFMETFilter::filter(), JetHTJetPlusHOFilter::filter(), SinglePhotonJetPlusHOFilter::filter(), ThirdHitPredictionFromInvParabola::findPointAtCurve(), DTLinearFit::fit(), KinematicConstrainedVertexFitter::fit(), KinematicConstrainedVertexFitterT< nTrk, nConstraint >::fit(), hitfit::Chisq_Constrainer::fit(), PulseFitWithFunction::Fit_electronic(), TFParams::fitpj(), DTTimingExtractor::fitT0(), CSCOfflineMonitor::fitX(), CSCValidation::fitX(), npstat::NUHistoAxis::fltBinNumber(), TSFit::fpol3dg(), TrackerMap::getcolor(), TAPDPulse::getDelta(), TiXmlBase::GetEntity(), DTTimeBoxFitter::getFitSeeds(), GlobalTrackerMuonAlignment::gradientGlobal(), GlobalTrackerMuonAlignment::gradientLocal(), CkfDebugger::hasDelta(), fastTrackingUtilities::hitLocalError(), BaseParticlePropagator::increaseRCyl(), SiTrivialInduceChargeOnStrips::induceVector(), edm::StreamSchedule::initializeEarlyDelete(), CSCPairResidualsConstraint::isFiducial(), fftjetcms::LinInterpolatedTable1D::isMonotonous(), fireworks::localSiStrip(), HLTTauRefCombiner::match(), HLTTauDQMPlotter::match(), TotemRPIncludeAlignments::Merge(), heppy::RochCor::momcor_mc(), MuonMETAlgo::MuonMETAlgo_run(), heppy::Davismt2::nsols(), heppy::Davismt2::nsols_massless(), LowPassFilterTiming::operator()(), CSCDetIdSameDetLayerComparator::operator()(), fftjetcms::LinInterpolatedTable1D::operator()(), InterpolatedPulse< 1500U >::operator()(), operator<<(), VertexKinematicConstraint::parametersDerivative(), EcalUncalibRecHitFixedAlphaBetaAlgo< C >::PerformAnalyticFit(), PetrukhinFunc(), reco::EMIsolatedTauTagInfo::pIsol(), SiPixelTemplateReco::PixelTempReco2D(), SiPixelTemplateSplit::PixelTempSplit(), MuonBadTrackFilter::printMuonProperties(), PFMuonAlgo::printMuonProperties(), FSRWeightProducer::produce(), EgammaHLTCaloTowerProducer::produce(), SimpleTrackListMerger::produce(), TrackListMerger::produce(), CaloTowerFromL1TCreatorForTauHLT::produce(), CaloTowerCreatorForTauHLT::produce(), FastTrackerRecHitMatcher::projectOnly(), BaseParticlePropagator::propagate(), tauImpactParameter::ErrorMatrixPropagator::propagateError(), DDG4SolidConverter::pseudotrap(), DTSegmentUpdator::rejectBadHits(), Type1PFMET::run(), InterpolatedPulse< 1500U >::secondDerivative(), MuonHitsChamberResidual::segment_fit(), FFTJetProducer::selectTreeNodes(), Cone::side(), muon::sigmaSwitch(), sigmaSwitch(), SiStripTemplateReco::StripTempReco1D(), SiStripTemplateSplit::StripTempSplit(), heppy::mt2w_bisect::mt2w::teco(), TPedValues::terminate(), reco::IsolatedPixelTrackCandidate::towerIndex(), ValidationHcalIsoTrackAlCaReco::towerIndex(), funct::trapezoid_integral(), TrapezoidalCartesianMFGrid::TrapezoidalCartesianMFGrid(), TrapezoidalCylindricalMFGrid::TrapezoidalCylindricalMFGrid(), DAClusterizerInZ::update(), DAClusterizerInZ_vect::update(), VertexKinematicConstraint::value(), CSCPairResidualsConstraint::value(), and heppy::FSRWeightAlgo::weight().

dbl*** dir

Definition at line 35 of file mlp_gen.cc.

Referenced by Config.ProcessFragment::__dir__(), conddblib.EnumMetaclass::__init__(), python.rootplot.core.Options::__init__(), TFileDirectory::_cd(), usedOutput::_getModulesFromOp(), printPaths::_printOp(), Vispa.Plugins.ConfigEditor.ConfigDataAccessor.ConfigDataAccessor::_readRecursive(), python.rootplot.core::add_from_config_files(), helpers::addESProducers(), SiStripTFile::addPath(), reco::IsoDeposit::algoWithin(), l1t::L1TStage2CaloAnalyzer::analyze(), PatBJetVertexAnalyzer::analyze(), TrackAnalyzer::analyze(), DTTrigTest::analyze(), DTLocalTriggerSynchTask::analyze(), fwlite::AnalyzerWrapper< T >::AnalyzerWrapper(), python.rootplot.core.Options::append_from_package(), LayerCrossingSide::barrelSide(), GenMuonRadCorrAnalyzer::beginJob(), SiStripCommissioningSource::beginRun(), SiStripCorrelateNoise::beginRun(), DQMStore::book(), SUSYDQMAnalyzer::bookHistograms(), HiggsValidation::bookHistograms(), TTbarSpinCorrHepMCAnalyzer::bookHistograms(), IsolatedTracksNxN::BookHistograms(), FWCSCSegmentProxyBuilder::build(), FWDTSegmentProxyBuilder::buildViewType(), CalculatePt_FullPrecision(), EcalHitMaker::cellLine(), CalibrationScanTask::checkAndSave(), CalibrationTask::checkAndSave(), evf::DirManager::checkDirEmpty(), ExternalLHEProducer::closeDescriptors(), SignedDecayLength3D::closestApproachToJet(), SignedImpactParameter3D::closestApproachToJet(), IPTools::closestApproachToJet(), MEtoMEComparitor::compare(), NuclearInteractionFTFSimulator::compute(), DTDigitizer::computeTime(), pos::PixelConfigFile::configurationDataExists(), EcalHitMaker::configureGeometry(), EcalHitMaker::correspondingEdge(), SiStripCommissioningSource::createCablingTasks(), SiStripCommissioningSource::createTasks(), SimpleNavigableLayer::crossingState(), MaterialBudgetData::dataStartTrack(), dd_to_html(), reco::IsoDeposit::depositAndCountWithin(), MultipleScatteringGeometry::detLayers(), HelixArbitraryPlaneCrossing::direction(), HelixArbitraryPlaneCrossing2Order::direction(), TangentCircle::direction(), TangentHelix::directionAtVertex(), BeamHaloPropagator::directionCheck(), SignedImpactParameter3D::distanceWithJetAxis(), SiStripCorrelateNoise::DoPlots(), EmDQMPostProcessor::dqmEndJob(), JetMETDQMPostProcessor::dqmEndJob(), Comparator::DrawGaussSigmaOverMeanSlice(), Comparator::DrawGaussSigmaOverMeanXSlice(), Comparator::DrawGaussSigmaSlice(), Comparator::DrawMeanSlice(), Comparator::DrawSigmaSlice(), CaloTower::emP4(), FWLiteEnabler::enable(), SiStripCommissioningSource::endJob(), EcalPedHists::endJob(), MEtoEDMConverter::endJob(), EcalURecHitHists::endJob(), EcalCosmicsHists::endJob(), DQMHistNormalizer::endRun(), ZGlobalVsSAIsolationAnalyzer::evaluate(), FWGUIManager::exportImagesOfAllViews(), Config.Process::extend(), helpers::extendWithPrePostfix(), CombinedSVComputer::fillCommonVariables(), MaterialBudgetCastorHistos::fillStartTrack(), MaterialBudgetHcalHistos::fillStartTrack(), TrackCountingTagPlotter::finalize(), TrackProbabilityTagPlotter::finalize(), PFClusterShapeAlgo::find_e3x2(), evf::DirManager::findHighestRun(), evf::DirManager::findHighestRunDir(), LocalFileSystem::findMount(), DQMStore::findObject(), evf::DirManager::findRunDir(), InOutConversionSeedFinder::findSeeds(), SiStripElectronSeedGenerator::findSeedsFromCluster(), DTSegmentUpdator::fit(), util.rrapi.RRApi::get(), rrapi.RRApi::get(), pos::PixelConfigFile::get(), DQMStore::get(), EDMtoMEConverter::getData(), ClusterShapeHitFilter::getDrift(), ClusterShapeTrackFilter::getGlobalDirs(), CrossingPtBasedLinearizationPointFinder::getLinearizationPoint(), getObject(), Vispa.Plugins.EdmBrowser.EdmDataAccessor.EdmDataAccessor::getObjectProperties(), pos::PixelConfigFile::getPath(), getplot(), TotemRPGeometry::GlobalToLocalDirection(), CaloTower::hadP4(), EcalHitMaker::hcalCellLine(), HCovarianceVSxy::HCovarianceVSxy(), HelixBarrelPlaneCrossing2OrderLocal::HelixBarrelPlaneCrossing2OrderLocal(), Comparator::Histo(), RectangularEtaPhiTrackingRegion::hits(), CosmicTrackingRegion::hits_(), HResolution::HResolution(), reco::GhostTrackPrediction::init(), SiStripDetKey::initFromPath(), SiStripFecKey::initFromPath(), SiStripFedKey::initFromPath(), RunManager::initG4(), RunManagerMT::initG4(), DTChamberEfficiencyTask::interpolate(), RKPropagatorInS::invertDirection(), ThirdHitPrediction::isCompatibleWithMultipleScattering(), ZMuMuTrackUserData::isolation(), ZMuMuMuonUserData::isolation(), ZToMuMuIsoDepositSelector< Isolator >::isolation(), isolation(), StraightLinePropagator::jacobian(), L1TEfficiency_Harvesting::L1TEfficiency_Harvesting(), L1TMuonEndCapForestESProducer::L1TMuonEndCapForestESProducer(), L3NominalEfficiencyConfigurator::L3NominalEfficiencyConfigurator(), LightRay::LightRay(), IPTools::linearImpactParameter(), TotemRPGeometry::LocalToGlobalDirection(), ls_cert_type(), main(), Types::makeCppPSet(), SETSeedFinder::makeSeed(), mergePath(), CaloGeometryHelper::move(), PFECALHashNavigator::move(), Vispa.Plugins.ConfigEditor.ConfigDataAccessor.ConfigDataAccessor::open(), PropagationDirectionChooser::operator()(), GroupedCkfTrajectoryBuilder::oppositeDirection(), python.rootplot.core::option_diff(), TrajectoryFactoryBase::orderedTrajectoryMeasurements(), CaloTower::p4(), CaloTower::p4_HO(), edmplugin::PluginManager::PluginManager(), HelixBarrelPlaneCrossing2OrderLocal::positionOnly(), EcalHitMaker::preshowerCellLine(), ClusterShape::processColumn(), StraightLinePropagatorESProducer::produce(), SteppingHelixPropagatorESProducer::produce(), AnalyticalPropagatorESProducer::produce(), PropagatorWithMaterialESProducer::produce(), GeantPropagatorESProducer::produce(), TemplatedInclusiveVertexFinder< InputContainer, VTX >::produce(), SETMuonSeedProducer::produce(), L1MuGlobalMuonTrigger::produce(), TemplatedSecondaryVertexProducer< IPTI, VTX >::produce(), TestHits::projectHit(), TestSmoothHits::projectHit(), TestTrackHits::projectHit(), SiStripRecHitsValid::projectHit(), GlobalRecHitsAnalyzer::projectHit(), GlobalRecHitsProducer::projectHit(), SiStripTrackingRecHitsValid::projectHit(), FWGUIManager::promptForConfigurationFile(), FWViewBase::promptForSaveImageTo(), SteppingHelixPropagator::propagate(), StraightLinePropagator::propagateParametersOnCylinder(), RKPropagatorInS::propagateParametersOnPlane(), pos::PixelConfigFile::put(), DTLocalTriggerBaseTask::runTMAnalysis(), CommissioningHistograms::save(), Styles::SavePlot(), SeedFromGenericPairOrTriplet::seedFromTriplet(), ElectronSeedGenerator::seedsFromThisCluster(), DTCombinatorialPatternReco4D::segmentSpecialZed(), VariablePlotter::setDir(), DTSegmentCand::setDirection(), DTRecSegment4D::setDirection(), DTRecSegment2D::setDirection(), Benchmark::setDirectory(), Propagator::setPropagationDirection(), EcalHitMaker::setTrackParameters(), vid_id_tools::setupAllVIDIdsInModule(), RecoTauValidation_cfi::SetYmodulesToLog(), InOutConversionSeedFinder::startSeed(), LaserSorter::streamFileName(), reco::IsoDeposit::sumWithin(), TrajectorySeed::swap(), DQMStore::tag(), TangentCircle::TangentCircle(), TB06Tree::TB06Tree(), TB06TreeH2::TB06TreeH2(), reco::GhostTrackPrediction::track(), TrackCountingTagPlotter::TrackCountingTagPlotter(), TrackerDpgAnalysis::TrackerDpgAnalysis(), TrackProbabilityTagPlotter::TrackProbabilityTagPlotter(), reco::JetSignalVertexCompatibilityAlgo::trackVertexCompat(), trackVertexCompat(), CosmicMuonTrajectoryBuilder::trajectories(), DTTrigGeomUtils::trigDir(), DTSegmentUpdator::update(), rerunParticleFlow::updateInputTags(), L1MuonPixelTrackFitter::valPhi(), muonisolation::TrackExtractor::veto(), muonisolation::PixelTrackExtractor::veto(), egammaisolation::EgammaTrackExtractor::veto(), muonisolation::CandViewExtractor::veto(), PFCandWithSuperClusterExtractor::veto(), muonisolation::PixelTrackExtractor::vetos(), muonisolation::TrackExtractor::vetos(), egammaisolation::EgammaTrackExtractor::vetos(), WatcherStreamFileReader::WatcherStreamFileReader(), PhysicsTools::TrainerMonitoring::write(), TH1Store::write(), and ApeEstimatorSummary::writeHists().

int* ExamplesIndex

Definition at line 40 of file mlp_gen.cc.

int ExamplesMemory = 0

Definition at line 24 of file mlp_gen.cc.

dbl* Gamma
dbl** Hessian

Definition at line 41 of file mlp_gen.cc.

dbl** JacobianMatrix

Definition at line 39 of file mlp_gen.cc.

int JacobianMemory = 0

Definition at line 28 of file mlp_gen.cc.

dbl LastAlpha = 0

Definition at line 32 of file mlp_gen.cc.

Referenced by LineSearch(), and LineSearchHyb().

int LearnMemory = 0

Definition at line 29 of file mlp_gen.cc.

int MessLang = 0

Definition at line 22 of file mlp_gen.cc.

dbl **Hessian MLP_HIDDEN

Definition at line 16 of file mlp_gen.cc.

float MLPfitVersion = (float) 1.40

Definition at line 31 of file mlp_gen.cc.

int NetMemory = 0

Definition at line 30 of file mlp_gen.cc.

int NLineSearchFail = 0

Definition at line 33 of file mlp_gen.cc.

int OutputWeights = 100

Definition at line 23 of file mlp_gen.cc.

int PatMemory = {0,0}

Definition at line 26 of file mlp_gen.cc.

int WeightsMemory = 0

Definition at line 25 of file mlp_gen.cc.