CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Classes | Macros | Typedefs | Functions | Variables
mlp_gen.h File Reference

Go to the source code of this file.

Classes

struct  divers_
 
struct  learn_
 
struct  net_
 
struct  pat_
 
struct  stat_
 

Macros

#define DIVERS   divers_
 
#define LEARN   learn_
 
#define NET   net_
 
#define PAT   pat_
 
#define STAT   stat_
 

Typedefs

typedef double dbl
 
typedef double type_pat
 

Functions

int AllocNetwork (int Nlayer, int *Neurons) MLP_HIDDEN
 
int AllocPatterns (int ifile, int npat, int nin, int nout, int iadd) MLP_HIDDEN
 
void AllocWeights () MLP_HIDDEN
 
void BFGSdir (int Nweights) MLP_HIDDEN
 
void CGDir (dbl beta) MLP_HIDDEN
 
int CountLexemes (char *string) MLP_HIDDEN
 
int DecreaseSearch (dbl *alpmin, int *Ntest, dbl Err0) MLP_HIDDEN
 
dbl DeDwNorm () MLP_HIDDEN
 
dbl DeDwProd () MLP_HIDDEN
 
void DeDwSave () MLP_HIDDEN
 
void DeDwSaveZero () MLP_HIDDEN
 
void DeDwScale (int Nexamples) MLP_HIDDEN
 
int DeDwSum (type_pat *ans, dbl *out, int ipat) MLP_HIDDEN
 
void DeDwZero () MLP_HIDDEN
 
dbl DerivDir () MLP_HIDDEN
 
void EtaDecay () MLP_HIDDEN
 
int FixedStep (dbl alpha) MLP_HIDDEN
 
void FreeNetwork () MLP_HIDDEN
 
int FreePatterns (int ifile) MLP_HIDDEN
 
void FreeWeights () MLP_HIDDEN
 
int GetBFGSH (int Nweights) MLP_HIDDEN
 
void GetGammaDelta () MLP_HIDDEN
 
void getLexemes (char *s, char **ss) MLP_HIDDEN
 
int GetNetStructure (char *s, int *Nlayer, int *Nneur) MLP_HIDDEN
 
void getnLexemes (int n, char *s, char **ss) MLP_HIDDEN
 
void InitBFGSH (int Nweights) MLP_HIDDEN
 
void InitWeights () MLP_HIDDEN
 
int LearnAlloc () MLP_HIDDEN
 
void LearnFree () MLP_HIDDEN
 
int LineSearch (dbl *alpmin, int *Ntest, dbl Err0) MLP_HIDDEN
 
int LineSearchHyb (dbl *alpmin, int *Ntest) MLP_HIDDEN
 
int LoadWeights (char *filename, int *iepoch) MLP_HIDDEN
 
dbl MLP_Epoch (int iepoch, dbl *alpmin, int *ntest) MLP_HIDDEN
 
void MLP_Line (dbl ***w0, dbl alpha) MLP_HIDDEN
 
void MLP_LineHyb (dbl ***w0, dbl alpha) MLP_HIDDEN
 
void MLP_MatrixVector (dbl *M, type_pat *v, dbl *r, int n, int m) MLP_HIDDEN
 
void MLP_MatrixVectorBias (dbl *M, dbl *v, dbl *r, int n, int m) MLP_HIDDEN
 
void MLP_MM2rows (dbl *c, type_pat *a, dbl *b, int Ni, int Nj, int Nk, int NaOffs, int NbOffs) MLP_HIDDEN
 
void MLP_Out (type_pat *rrin, dbl *rrout) MLP_HIDDEN
 
void MLP_Out2 (type_pat *rrin) MLP_HIDDEN
 
void MLP_Out_T (type_pat *rrin) MLP_HIDDEN
 
int MLP_PrCFun (char *filename) MLP_HIDDEN
 
int MLP_PrFFun (char *filename) MLP_HIDDEN
 
int MLP_PrintInputStat () MLP_HIDDEN
 
double MLP_Rand (dbl min, dbl max) MLP_HIDDEN
 
void MLP_ResLin () MLP_HIDDEN
 
int MLP_SetNet (int *nl, int *nn) MLP_HIDDEN
 
int MLP_StatInputs (int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum) MLP_HIDDEN
 
dbl MLP_Stochastic () MLP_HIDDEN
 
dbl MLP_Test (int ifile, int regul) MLP_HIDDEN
 
int MLP_Train (int *ipat, dbl *err) MLP_HIDDEN
 
int NormalizeInputs () MLP_HIDDEN
 
void PrintWeights () MLP_HIDDEN
 
int ReadPatterns (char *filename, int ifile, int *inet, int *ilearn, int *iexamples) MLP_HIDDEN
 
int SaveWeights (char *filename, int iepoch) MLP_HIDDEN
 
void SetDefaultFuncs () MLP_HIDDEN
 
void SetLambda (double Wmax) MLP_HIDDEN
 
int SetTransFunc (int layer, int neuron, int func) MLP_HIDDEN
 
int ShuffleExamples (int n, int *index) MLP_HIDDEN
 
void SteepestDir () MLP_HIDDEN
 
int StochStep () MLP_HIDDEN
 
int StochStepHyb () MLP_HIDDEN
 

Variables

struct net_ MLP_HIDDEN
 

Macro Definition Documentation

#define DIVERS   divers_

Definition at line 54 of file mlp_gen.h.

Referenced by MLP_PrCFun(), MLP_PrFFun(), and ReadPatterns().

#define LEARN   learn_
#define NET   net_
#define PAT   pat_
#define STAT   stat_

Definition at line 60 of file mlp_gen.h.

Referenced by MLP_PrCFun(), MLP_PrFFun(), and NormalizeInputs().

Typedef Documentation

typedef double dbl

Definition at line 12 of file mlp_gen.h.

typedef double type_pat

Definition at line 13 of file mlp_gen.h.

Function Documentation

int AllocNetwork ( int  Nlayer,
int *  Neurons 
)

Definition at line 3445 of file mlp_gen.cc.

References FreeNetwork(), i, j, gen::k, prof2calltree::l, LEARN, and NET.

Referenced by MLP_SetNet().

3446 {
3447  int i, j, k, l;
3448 
3449  if(NetMemory != 0) FreeNetwork();
3450  NetMemory = 1;
3451 
3452  NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
3453  if(NET.Nneur == 0) return -111;
3454 
3455  NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
3456  NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
3457  NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3458  NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3459  NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
3460  if(NET.T_func == 0 || NET.Deriv1 == 0
3461  || NET.Inn == 0 || NET.Outn == 0
3462  || NET.Delta == 0) return -111;
3463 
3464  for(i=0; i<Nlayer; i++)
3465  {
3466  NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
3467  NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3468  NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3469  NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3470  NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3471  if(NET.T_func[i] == 0 || NET.Deriv1[i] == 0
3472  || NET.Inn[i] == 0 || NET.Outn[i] == 0
3473  || NET.Delta[i] ==0 ) return -111;
3474  }
3475 
3476  NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3477  NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
3478  LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3479  LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3480  LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3481  if(NET.Weights == 0 || NET.vWeights == 0
3482  || LEARN.Odw == 0 || LEARN.ODeDw == 0
3483  || LEARN.DeDw == 0) return -111;
3484 
3485  for(i=1; i<Nlayer; i++)
3486  {
3487  k = Neurons[i-1]+1;
3488  NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
3489  sizeof(dbl));
3490  NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3491  LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3492  LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3493  LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3494  if(NET.Weights[i] == 0 || NET.vWeights[i] == 0
3495  || LEARN.Odw[i] == 0 || LEARN.ODeDw[i] == 0
3496  || LEARN.DeDw[i] == 0) return -111;
3497 
3498  for(j=0; j<Neurons[i]; j++)
3499  {
3500  NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
3501  LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3502  LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3503  LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3504  if(LEARN.Odw[i][j] == 0
3505  || LEARN.ODeDw[i][j] == 0
3506  || LEARN.DeDw[i][j] == 0) return -111;
3507 
3508  for(l=0; l<k; l++)
3509  {
3510  LEARN.Odw[i][j][l] = 0;
3511  LEARN.ODeDw[i][j][l] = 0;
3512  }
3513  }
3514  }
3515  return 0;
3516 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
int NetMemory
Definition: mlp_gen.cc:30
int k[5][pyjets_maxn]
#define LEARN
Definition: mlp_gen.h:36
void FreeNetwork()
Definition: mlp_gen.cc:3527
double dbl
Definition: mlp_gen.h:12
int AllocPatterns ( int  ifile,
int  npat,
int  nin,
int  nout,
int  iadd 
)

Definition at line 3075 of file mlp_gen.cc.

References FreePatterns(), compare_using_db::ifile, j, PAT, and tmp.

Referenced by PhysicsTools::MLP::init(), and ReadPatterns().

3076 {
3077  int j;
3078  type_pat *tmp, *tmp3;
3079  type_pat **tmp2;
3080  int ntot;
3081 
3082  if(ifile>1 || ifile<0) return(1);
3083 /* scanf("%d",&j); */
3084  if(ExamplesMemory==0)
3085  {
3086  ExamplesMemory=1;
3087  PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
3088  PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
3089  PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
3090  PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
3091  if(PAT.Pond == 0 || PAT.Rin == 0
3092  || PAT.Rans == 0 || PAT.vRin == 0) return -111;
3093  }
3094 
3095 
3096 /* if iadd=0, check that memory not already allocated. Otherwise free it */
3097  if(iadd==0 && PatMemory[ifile]!=0)
3098  {
3100  }
3101 
3102 /* allocate memory and initialize ponderations */
3103  if(iadd==0 || PatMemory[ifile]==0)
3104  {
3105  PatMemory[ifile] = 1;
3106  PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
3107  if(PAT.Pond[ifile] == 0) return -111;
3108  for(j=0; j<npat; j++)
3109  PAT.Pond[ifile][j] = 1;
3110 
3111  PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3112  if(PAT.Rin[ifile] == 0) return -111;
3113  PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3114  if(PAT.Rans[ifile] == 0) return -111;
3115 
3116  PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
3117  sizeof(type_pat));
3118  if(PAT.vRin[ifile] == 0) return -111;
3119 
3120  for(j=0; j<npat; j++)
3121  {
3122  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3123  PAT.vRin[ifile][j*(nin+1)] = 1;
3124  }
3125  for(j=0; j<npat; j++)
3126  {
3127  PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
3128  if(PAT.Rans[ifile][j] == 0) return -111;
3129  }
3130  PAT.Npat[ifile] = npat;
3131 
3132  if(ifile==0)
3133  {
3134  ExamplesIndex = (int *) malloc(npat*sizeof(int));
3135  if(ExamplesIndex == 0) return -111;
3136  for(j=0; j<npat; j++) ExamplesIndex[j] = j;
3137  }
3138  }
3139  else /* add examples */
3140  {
3141  ntot = PAT.Npat[ifile]+npat;
3142 
3143 /* event weighting */
3144  tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
3145  if(tmp == 0) return -111;
3146 
3147  for(j=0; j<PAT.Npat[ifile]; j++)
3148  {
3149  tmp[j] = PAT.Pond[ifile][j];
3150  }
3151  for(j=PAT.Npat[ifile];j<ntot;j++)
3152  {
3153  tmp[j] = 1;
3154  }
3155  if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
3156  PAT.Pond[ifile] = tmp;
3157 
3158 /* examples */
3159 /* tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3160  for(j=0; j<PAT.Npat[ifile]; j++)
3161  {
3162  tmp2[j] = PAT.Rin[ifile][j];
3163  }
3164  for(j=PAT.Npat[ifile];j<ntot;j++)
3165  {
3166  tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
3167  }
3168  if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
3169  PAT.Rin[ifile] = tmp2; */
3170 
3171  tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
3172  if(tmp3 == 0) return -111;
3173 
3174  for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
3175  {
3176  tmp3[j] = PAT.vRin[ifile][j];
3177  }
3178  if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
3179  PAT.vRin[ifile] = tmp3;
3180  for(j=0; j<ntot; j++)
3181  {
3182  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3183  PAT.vRin[ifile][j*(nin+1)] = 1;
3184  }
3185 
3186  tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3187  if(tmp2 == 0) return -111;
3188  for(j=0; j<PAT.Npat[ifile]; j++)
3189  {
3190  tmp2[j] = PAT.Rans[ifile][j];
3191  }
3192  for(j=PAT.Npat[ifile];j<ntot;j++)
3193  {
3194  tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
3195  if(tmp2[j] == 0) return -111;
3196  }
3197  if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
3198  PAT.Rans[ifile] = tmp2;
3199  PAT.Npat[ifile] = ntot;
3200  PatMemory[ifile] = 1;
3201 
3202 /* indices */
3203  if(ifile==0)
3204  {
3205  free(ExamplesIndex);
3206  ExamplesIndex = (int *) malloc(ntot*sizeof(int));
3207  if(ExamplesIndex == 0) return -111;
3208  for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
3209  }
3210  }
3211 
3212  return 0;
3213 }
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
int nin
double type_pat
Definition: mlp_gen.h:13
int j
Definition: DBlmapReader.cc:9
int PatMemory[2]
Definition: mlp_gen.cc:26
int ExamplesMemory
Definition: mlp_gen.cc:24
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int nout
double dbl
Definition: mlp_gen.h:12
int FreePatterns(int ifile)
Definition: mlp_gen.cc:3230
void AllocWeights ( )
void BFGSdir ( int  Nweights)

Definition at line 1343 of file mlp_gen.cc.

References g, i, recoMuon::in, j, LEARN, NET, and alignCSCRings::s.

Referenced by MLP_Epoch().

1344 {
1345  dbl *g, *s;
1346  int kk=0;
1347  int il,i,j,in,jn;
1348 
1349  g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
1350  s = (dbl*) malloc(Nweights*sizeof(dbl));
1351 
1352  for(il=1; kk<Nweights; il++)
1353  for(in=0; in<NET.Nneur[il]; in++)
1354  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1355  {
1356  g[kk] = LEARN.DeDw[il][in][jn];
1357  kk++;
1358  }
1359  for(i=0; i<Nweights; i++)
1360  {
1361  s[i] = 0;
1362  for(j=0; j<Nweights; j++)
1363  {
1364  s[i] += BFGSH[i][j] * g[j];
1365  }
1366  }
1367 
1368  kk = 0;
1369  for(il=1; kk<Nweights; il++)
1370  for(in=0; in<NET.Nneur[il]; in++)
1371  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1372  {
1373  dir[il][in][jn] = -s[kk];
1374  kk++;
1375  }
1376  free(g);
1377  free(s);
1378 }
int i
Definition: DBlmapReader.cc:9
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e g
Definition: Activities.doc:4
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void CGDir ( dbl  beta)

Definition at line 1269 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1270 {
1271  int il,in,jn;
1272  for(il=1; il<NET.Nlayer; il++)
1273  for(in=0; in<NET.Nneur[il]; in++)
1274  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1275  {
1276  dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
1277  beta*dir[il][in][jn];
1278  }
1279 }
const double beta
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl *** dir
Definition: mlp_gen.cc:35
int CountLexemes ( char *  string)

Definition at line 2583 of file mlp_gen.cc.

References i, NULL, and tmp.

Referenced by getLexemes(), and ReadPatterns().

2584 {
2585  char tmp[1024];
2586  int i=0;
2587 
2588  strcpy(tmp,s);
2589  if (strtok(tmp," "))
2590  {
2591  i=1;
2592  while (strtok(NULL," ")) i++;
2593  }
2594  return i;
2595 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int DecreaseSearch ( dbl alpmin,
int *  Ntest,
dbl  Err0 
)

Definition at line 1614 of file mlp_gen.cc.

References recoMuon::in, LEARN, MLP_Line(), MLP_Test(), NET, and metsig::tau.

1615 {
1616  dbl ***w0;
1617  dbl alpha2;
1618  dbl err1, err2;
1619  dbl tau;
1620  int icount, il, in, jn;
1621 
1622  tau=LEARN.Tau;
1623 
1624 /* store weights before line search */
1625 
1626  *Ntest = 0;
1627  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1628  for(il=1; il<NET.Nlayer; il++)
1629  {
1630  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1631  for(in=0; in<NET.Nneur[il]; in++)
1632  {
1633  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1634  sizeof(dbl));
1635  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1636  {
1637  w0[il][in][jn] = NET.Weights[il][in][jn];
1638  }
1639  }
1640  }
1641 
1642 /* compute error(w0) */
1643 
1644 /* err1 = MLP_Test(0,0);
1645  (*Ntest) ++;*/
1646  err1 = Err0;
1647 
1648  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1649 
1650  *alpmin = 0;
1651  alpha2 = 0.05;
1652  MLP_Line(w0,alpha2);
1653  err2 = MLP_Test(0,0);
1654  (*Ntest) ++;
1655 
1656  if(err2<err1)
1657  {
1658  *alpmin = alpha2;
1659  }
1660  else
1661  {
1662 
1663 
1664  for(icount=1;icount<=100;icount++)
1665  {
1666  alpha2 = alpha2/tau;
1667  MLP_Line(w0,alpha2);
1668  err2 = MLP_Test(0,0);
1669  (*Ntest) ++;
1670  if(err1>err2) break;
1671  }
1672  if(icount>=100) /* line search fails */
1673  {
1674  MLP_Line(w0,0); /* reset weights */
1675  free(w0);
1676  return(1);
1677  }
1678  *alpmin = alpha2;
1679  }
1680 
1681 /* set the weights */
1682  MLP_Line(w0,*alpmin);
1683 
1684 /* store weight changes */
1685  for(il=1; il<NET.Nlayer; il++)
1686  for(in=0; in<NET.Nneur[il]; in++)
1687  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1688  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1689  - w0[il][in][jn];
1690 
1691  for(il=1; il<NET.Nlayer; il++)
1692  for(in=0; in<NET.Nneur[il]; in++)
1693  free(w0[il][in]);
1694  for(il=1; il<NET.Nlayer; il++)
1695  free(w0[il]);
1696  free(w0);
1697 
1698  return(0);
1699 }
#define NET
Definition: mlp_gen.h:25
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1755
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:449
dbl DeDwNorm ( )

Definition at line 1001 of file mlp_gen.cc.

References createTree::dd, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1002 {
1003  int il,in,jn;
1004  dbl dd=0;
1005  for(il=1; il<NET.Nlayer; il++)
1006  for(in=0; in<NET.Nneur[il]; in++)
1007  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1008  dd += LEARN.DeDw[il][in][jn]*
1009  LEARN.DeDw[il][in][jn];
1010  return(dd);
1011 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl DeDwProd ( )

Definition at line 1023 of file mlp_gen.cc.

References createTree::dd, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1024 {
1025  int il,in,jn;
1026  dbl dd=0;
1027  for(il=1; il<NET.Nlayer; il++)
1028  for(in=0; in<NET.Nneur[il]; in++)
1029  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1030  dd += LEARN.DeDw[il][in][jn]*
1031  LEARN.ODeDw[il][in][jn];
1032  return(dd);
1033 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
void DeDwSave ( )

Definition at line 1081 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

1082 {
1083  int il, in, jn;
1084  for(il=1; il<NET.Nlayer; il++)
1085  for(in=0; in<NET.Nneur[il]; in++)
1086  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1087  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1088 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
void DeDwSaveZero ( )

Definition at line 1100 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1101 {
1102  int il, in, jn;
1103  for(il=1; il<NET.Nlayer; il++)
1104  for(in=0; in<NET.Nneur[il]; in++)
1105  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1106  {
1107  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1108  LEARN.DeDw[il][in][jn] = 0;
1109  }
1110 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
void DeDwScale ( int  Nexamples)

Definition at line 1063 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1064 {
1065  int il, in, jn;
1066  for(il=1; il<NET.Nlayer; il++)
1067  for(in=0; in<NET.Nneur[il]; in++)
1068  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1069  LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
1070 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
int DeDwSum ( type_pat ans,
dbl out,
int  ipat 
)

Definition at line 1126 of file mlp_gen.cc.

References a, b, cuy::ii, recoMuon::in, LEARN, NET, and PAT.

Referenced by MLP_Train().

1127 {
1128  int il, in1, in, ii;
1129 /* dbl err[NMAX][4]; */
1130  dbl deriv;
1131  dbl *pout, *pdedw, *pdelta;
1132  register dbl a, b;
1133 /* char buf[50];*/
1134 
1135 /* output layer */
1136  b = (dbl) PAT.Pond[0][ipat];
1137  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
1138  {
1139  deriv = NET.Deriv1[NET.Nlayer-1][in];
1140  NET.Delta[NET.Nlayer-1][in] =
1141  (out[in] - (dbl) ans[in])*deriv*b;
1142  }
1143 
1144  for(il=NET.Nlayer-2; il>0; il--)
1145  {
1146 
1147  for(in=0; in<NET.Nneur[il]; in++)
1148  {
1149  deriv = NET.Deriv1[il][in];
1150  a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
1151  pdelta = &(NET.Delta[il+1][1]);
1152  for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++)
1153  {
1154  a += *pdelta * NET.Weights[il+1][in1][in+1];
1155  }
1156  NET.Delta[il][in] = a * deriv;
1157  }
1158  }
1159 
1160  for(il=1; il<NET.Nlayer; il++)
1161  {
1162  ii = NET.Nneur[il-1];
1163  for(in=0; in<NET.Nneur[il]; in++)
1164  {
1165  a = NET.Delta[il][in];
1166  LEARN.DeDw[il][in][0] += a;
1167  LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
1168  pout = &(NET.Outn[il-1][1]);
1169  pdedw = &(LEARN.DeDw[il][in][2]);
1170  for(in1=1; in1<ii; ++in1, ++pout, ++pdedw)
1171  {
1172  (*pdedw) += a * (*pout);
1173  }
1174  }
1175  }
1176 
1177  return(0);
1178 }
int ii
Definition: cuy.py:588
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
tuple out
Definition: dbtoconf.py:99
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void DeDwZero ( )

Definition at line 1044 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

1045 {
1046  int il, in, jn;
1047  for(il=1; il<NET.Nlayer; il++)
1048  for(in=0; in<NET.Nneur[il]; in++)
1049  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1050  LEARN.DeDw[il][in][jn] = 0;
1051 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl DerivDir ( )

Definition at line 1291 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1292 {
1293  int il,in,jn;
1294  dbl ddir = 0;
1295 
1296  for(il=1; il<NET.Nlayer; il++)
1297  for(in=0; in<NET.Nneur[il]; in++)
1298  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1299  {
1300  ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
1301  }
1302  return(ddir);
1303 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void EtaDecay ( )

Definition at line 2084 of file mlp_gen.cc.

References LEARN.

Referenced by MLP_Stochastic().

2085 {
2086  LEARN.eta *= LEARN.Decay;
2087 }
#define LEARN
Definition: mlp_gen.h:36
int FixedStep ( dbl  alpha)

Definition at line 1702 of file mlp_gen.cc.

References recoMuon::in, LEARN, MLP_Line(), and NET.

1703 {
1704  dbl ***w0;
1705  int il, in, jn;
1706 
1707  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1708  for(il=1; il<NET.Nlayer; il++)
1709  {
1710  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1711  for(in=0; in<NET.Nneur[il]; in++)
1712  {
1713  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1714  sizeof(dbl));
1715  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1716  {
1717  w0[il][in][jn] = NET.Weights[il][in][jn];
1718  }
1719  }
1720  }
1721 
1722 
1723 /* set the weights */
1724  MLP_Line(w0,alpha);
1725 
1726 /* store weight changes */
1727  for(il=1; il<NET.Nlayer; il++)
1728  for(in=0; in<NET.Nneur[il]; in++)
1729  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1730  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1731  - w0[il][in][jn];
1732 
1733  for(il=1; il<NET.Nlayer; il++)
1734  for(in=0; in<NET.Nneur[il]; in++)
1735  free(w0[il][in]);
1736  for(il=1; il<NET.Nlayer; il++)
1737  free(w0[il]);
1738  free(w0);
1739 
1740  return(0);
1741 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1755
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
void FreeNetwork ( )

Definition at line 3527 of file mlp_gen.cc.

References i, j, LEARN, and NET.

Referenced by AllocNetwork().

3528 {
3529  int i, j;
3530  for(i=1; i<NET.Nlayer; i++)
3531  {
3532  for(j=0; j<NET.Nneur[i]; j++)
3533  {
3534 /* free(NET.Weights[i][j]); */
3535  free(LEARN.Odw[i][j]);
3536  free(LEARN.ODeDw[i][j]);
3537  free(LEARN.DeDw[i][j]);
3538  }
3539  free(NET.vWeights[i]);
3540  free(NET.Weights[i]);
3541  free(LEARN.Odw[i]);
3542  free(LEARN.ODeDw[i]);
3543  free(LEARN.DeDw[i]);
3544  }
3545  free(NET.Weights);
3546  free(LEARN.Odw);
3547  free(LEARN.ODeDw);
3548  free(LEARN.DeDw);
3549 
3550  free(NET.Nneur);
3551 
3552  for(i=0; i<NET.Nlayer; i++)
3553  {
3554  free(NET.T_func[i]);
3555  free(NET.Deriv1[i]);
3556  free(NET.Inn[i]);
3557  free(NET.Outn[i]);
3558  free(NET.Delta[i]);
3559  }
3560  free(NET.T_func);
3561  free(NET.Deriv1);
3562  free(NET.Inn);
3563  free(NET.Outn);
3564  free(NET.Delta);
3565 
3566  NetMemory = 0;
3567 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
int NetMemory
Definition: mlp_gen.cc:30
#define LEARN
Definition: mlp_gen.h:36
int FreePatterns ( int  ifile)

Definition at line 3230 of file mlp_gen.cc.

References i, compare_using_db::ifile, and PAT.

Referenced by AllocPatterns(), and PhysicsTools::MLP::clear().

3231 {
3232  int i;
3233 
3234  if(ifile>1 || ifile<0) return 1;
3235 /* printf("%d %d \n",ifile,PatMemory[ifile]);*/
3236  if(PatMemory[ifile]==0) return 2;
3237 
3238  free(PAT.Pond[ifile]);
3239  for(i=0; i<PAT.Npat[ifile]; i++)
3240  {
3241 /* free(PAT.Rin[ifile][i]); */
3242  free(PAT.Rans[ifile][i]);
3243  }
3244  free(PAT.Rin[ifile]);
3245  free(PAT.Rans[ifile]);
3246  free(PAT.vRin[ifile]);
3247  PatMemory[ifile] = 0;
3248  PAT.Npat[ifile] = 0;
3249 
3250  return 0;
3251 }
int i
Definition: DBlmapReader.cc:9
#define PAT
Definition: mlp_gen.h:45
int PatMemory[2]
Definition: mlp_gen.cc:26
void FreeWeights ( )
int GetBFGSH ( int  Nweights)

Definition at line 1421 of file mlp_gen.cc.

References a, b, i, j, and tmp.

Referenced by MLP_Epoch().

1422 {
1423  typedef double dble;
1424  dble deltaTgamma=0;
1425  dble factor=0;
1426  dble *Hgamma;
1427  dble *tmp;
1428  register dble a, b;
1429  int i,j;
1430 
1431  Hgamma = (dble *) malloc(Nweights*sizeof(dble));
1432  tmp = (dble *) malloc(Nweights*sizeof(dble));
1433 
1434  for(i=0; i<Nweights; i++)
1435  {
1436  deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
1437  a = 0;
1438  b = 0;
1439  for(j=0; j<Nweights; j++)
1440  {
1441  a += (dble) BFGSH[i][j] * (dble) Gamma[j];
1442  b += (dble) Gamma[j] * (dble) BFGSH[j][i];
1443  }
1444  Hgamma[i] = a;
1445  tmp[i] = b;
1446  factor += (dble) Gamma[i]*Hgamma[i];
1447  }
1448  if(deltaTgamma == 0) return 1;
1449  a = 1 / deltaTgamma;
1450  factor = 1 + factor*a;
1451 
1452  for(i=0; i<Nweights; i++)
1453  {
1454  b = (dble) delta[i];
1455  for(j=0; j<Nweights; j++)
1456  BFGSH[i][j] += (dbl) (factor*b* (dble)
1457  delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;
1458  }
1459  free(Hgamma);
1460  free(tmp);
1461  return 0;
1462 }
dbl * delta
Definition: mlp_gen.cc:36
int i
Definition: DBlmapReader.cc:9
dbl ** BFGSH
Definition: mlp_gen.cc:37
int j
Definition: DBlmapReader.cc:9
double b
Definition: hdecay.h:120
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double a
Definition: hdecay.h:121
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
void GetGammaDelta ( )

Definition at line 1316 of file mlp_gen.cc.

References i, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1317 {
1318  int i=0;
1319  int il,in,jn;
1320  for(il=1; il<NET.Nlayer; il++)
1321  for(in=0; in<NET.Nneur[il]; in++)
1322  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1323  {
1324  Gamma[i] = LEARN.DeDw[il][in][jn]-
1325  LEARN.ODeDw[il][in][jn];
1326  delta[i] = LEARN.Odw[il][in][jn];
1327  i++;
1328  }
1329 }
dbl * delta
Definition: mlp_gen.cc:36
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl * Gamma
Definition: mlp_gen.cc:38
void getLexemes ( char *  s,
char **  ss 
)

Definition at line 2613 of file mlp_gen.cc.

References CountLexemes(), i, n, NULL, and tmp.

2614 {
2615  char tmp[1024];
2616  int i,n;
2617 
2618  strcpy(tmp,s);
2619  n=CountLexemes(tmp);
2620  if (n>0)
2621  {
2622  strcpy(ss[0],strtok(tmp," "));
2623  for (i=1;i<n;i++)
2624  strcpy(ss[i],strtok(NULL," "));
2625  }
2626 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
int CountLexemes(char *s)
Definition: mlp_gen.cc:2583
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int GetNetStructure ( char *  s,
int *  Nlayer,
int *  Nneur 
)

Definition at line 3588 of file mlp_gen.cc.

References i, NLMAX, NULL, and tmp.

Referenced by ReadPatterns().

3589 {
3590  int i=0;
3591  char tmp[1024];
3592 
3593  if(strlen(s)==0) return -1;
3594  if(strlen(s)>1024) return -2;
3595 
3596  strcpy(tmp,s);
3597  if (strtok(tmp,","))
3598  {
3599  i=1;
3600  while (strtok(NULL,",")) i++;
3601  }
3602  *Nlayer = i;
3603  if(i > NLMAX) return -3;
3604 
3605  strcpy(tmp,s);
3606  if (*Nlayer>0)
3607  {
3608  sscanf(strtok(tmp,","),"%d",&(Nneur[0]));
3609  for (i=1;i<*Nlayer;i++)
3610  sscanf(strtok(NULL,","),"%d",&(Nneur[i]));
3611  }
3612 
3613  return 0;
3614 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
#define NLMAX
Definition: mlp_gen.cc:14
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
void getnLexemes ( int  n,
char *  s,
char **  ss 
)

Definition at line 2599 of file mlp_gen.cc.

References i, n, NULL, and tmp.

Referenced by ReadPatterns().

2600 {
2601  char tmp[1024];
2602  int i;
2603  strcpy(tmp,s);
2604  if (n>0)
2605  {
2606  strcpy(ss[0],strtok(tmp," "));
2607  for (i=1;i<n;i++)
2608  strcpy(ss[i],strtok(NULL," "));
2609  }
2610 }
int i
Definition: DBlmapReader.cc:9
#define NULL
Definition: scimark2.h:8
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
void InitBFGSH ( int  Nweights)

Definition at line 1391 of file mlp_gen.cc.

References i, and j.

Referenced by MLP_Epoch().

1392 {
1393  int i,j;
1394  for(i=0; i<Nweights; i++)
1395  for(j=0; j<Nweights; j++)
1396  {
1397  BFGSH[i][j] = 0;
1398  if(i==j) BFGSH[i][j] = 1;
1399  }
1400 }
int i
Definition: DBlmapReader.cc:9
dbl ** BFGSH
Definition: mlp_gen.cc:37
int j
Definition: DBlmapReader.cc:9
void InitWeights ( )

Definition at line 2145 of file mlp_gen.cc.

References i, MLP_Rand(), and NET.

Referenced by PhysicsTools::MLP::MLP().

2146 {
2147  int ilayer,ineur,i;
2148 
2149  for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
2150  for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
2151  for(i=0;i<=NET.Nneur[ilayer-1];i++)
2152  NET.Weights[ilayer][ineur][i]=
2153  (dbl) MLP_Rand(-0.5, 0.5);
2154 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2129
double dbl
Definition: mlp_gen.h:12
int LearnAlloc ( )

Definition at line 2683 of file mlp_gen.cc.

References i, recoMuon::in, LEARN, LearnFree(), and NET.

Referenced by PhysicsTools::MLP::MLP().

2684 {
2685  int il,in,i;
2686  int Nweights = 0;
2687 
2688  if(LearnMemory != 0) LearnFree();
2689  LearnMemory = 1;
2690  dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
2691  if(dir == 0) return -111;
2692 
2693  for(il=0; il<NET.Nlayer; il++)
2694  {
2695  dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
2696  if(dir[il] == 0) return -111;
2697  for(in=0; in<NET.Nneur[il]; in++)
2698  {
2699  if(il==0)
2700  {
2701 /* TODO: understand implications of hard-coded 101 */
2702  dir[0][in] = (dbl *)
2703  malloc(101*sizeof(dbl));
2704  if(dir[0][in] == 0) return -111;
2705  }
2706  else
2707  {
2708  dir[il][in] = (dbl *)
2709  malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
2710  if(dir[il][in] == 0) return -111;
2711  Nweights += NET.Nneur[il-1]+1;
2712  }
2713  }
2714  }
2715  NET.Nweights = Nweights;
2716 
2717  if(BFGSMemory==0 && LEARN.Meth>= 6)
2718  {
2719  BFGSMemory = 1;
2720  Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
2721  delta = (dbl*) malloc(Nweights*sizeof(dbl));
2722  BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
2723  if(Gamma == 0 || delta == 0 || BFGSH == 0)
2724  return -111;
2725 
2726  for(i=0; i<Nweights; i++)
2727  {
2728  BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
2729  if(BFGSH[i] == 0) return -111;
2730  }
2731  }
2732 
2733 /* if(JacobianMemory==0)
2734  {
2735  JacobianMemory = 1;
2736  printf("JacobianMemory = %d\n",JacobianMemory);
2737  JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
2738  for(i=0; i<PAT.Npat[0]; i++)
2739  JacobianMatrix[i] =
2740  (dbl*) malloc(Nweights*sizeof(dbl));
2741  printf("end memory alloc\n");
2742  }
2743 
2744  if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
2745 
2746  return 0;
2747 }
dbl * delta
Definition: mlp_gen.cc:36
int i
Definition: DBlmapReader.cc:9
void LearnFree()
Definition: mlp_gen.cc:2638
int LearnMemory
Definition: mlp_gen.cc:29
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int BFGSMemory
Definition: mlp_gen.cc:27
#define LEARN
Definition: mlp_gen.h:36
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void LearnFree ( )

Definition at line 2638 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by LearnAlloc(), and PhysicsTools::MLP::~MLP().

2639 {
2640  int il,in;
2641  if(LearnMemory==0) return;
2642  LearnMemory = 0;
2643  for(il=0; il<NET.Nlayer; il++)
2644  {
2645  for(in=0; in<NET.Nneur[il]; in++)
2646  {
2647  free(dir[il][in]);
2648  }
2649  free(dir[il]);
2650  }
2651  free(dir);
2652  if(BFGSMemory==0) return;
2653  BFGSMemory = 0;
2654  for(il=0; il<NET.Nweights; il++)
2655  {
2656  free(BFGSH[il]);
2657  }
2658  free(BFGSH);
2659  free(Gamma);
2660  free(delta);
2661 
2662 /* if(JacobianMemory == 0) return;
2663  JacobianMemory = 0;
2664  for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
2665  free(JacobianMatrix); */
2666 }
dbl * delta
Definition: mlp_gen.cc:36
int LearnMemory
Definition: mlp_gen.cc:29
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int BFGSMemory
Definition: mlp_gen.cc:27
dbl * Gamma
Definition: mlp_gen.cc:38
dbl *** dir
Definition: mlp_gen.cc:35
int LineSearch ( dbl alpmin,
int *  Ntest,
dbl  Err0 
)

Definition at line 1476 of file mlp_gen.cc.

References recoMuon::in, LastAlpha, LEARN, MLP_Line(), MLP_Test(), NET, and metsig::tau.

Referenced by MLP_Epoch().

1477 {
1478  dbl ***w0;
1479  dbl alpha1, alpha2, alpha3;
1480  dbl err1, err2, err3;
1481  dbl tau;
1482  int icount, il, in, jn;
1483 
1484  tau=LEARN.Tau;
1485 
1486 /* store weights before line search */
1487 
1488  *Ntest = 0;
1489  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1490  for(il=1; il<NET.Nlayer; il++)
1491  {
1492  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1493  for(in=0; in<NET.Nneur[il]; in++)
1494  {
1495  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1496  sizeof(dbl));
1497  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1498  {
1499  w0[il][in][jn] = NET.Weights[il][in][jn];
1500  }
1501  }
1502  }
1503 
1504 /* compute error(w0) */
1505 
1506 /* err1 = MLP_Test(0,0);
1507  (*Ntest) ++;*/
1508  err1 = Err0;
1509 
1510  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1511 
1512  *alpmin = 0;
1513  alpha1 = 0;
1514 /* alpha2 = 0.05;
1515  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1516  alpha2 = LastAlpha;
1517  if(alpha2 < 0.01) alpha2 = 0.01;
1518  if(alpha2 > 2.0) alpha2 = 2.0;
1519  MLP_Line(w0,alpha2);
1520  err2 = MLP_Test(0,0);
1521  (*Ntest) ++;
1522  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1523 
1524  alpha3 = alpha2;
1525  err3 = err2;
1526 
1527 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1528  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1529 
1530  if(err1>err2)
1531  {
1532  for(icount=1;icount<=100;icount++)
1533  {
1534  alpha3 = alpha3*tau;
1535  MLP_Line(w0,alpha3);
1536  err3 =MLP_Test(0,0);
1537  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);
1538  (*Ntest) ++;
1539  if(err3>err2) break;
1540  alpha1 = alpha2;
1541  err1 = err2;
1542  alpha2 = alpha3;
1543  err2 = err3;
1544  }
1545  if(icount>=100) /* line search fails */
1546  {
1547  MLP_Line(w0,0); /* reset weights */
1548  free(w0);
1549  return(1);
1550  }
1551  }
1552  else
1553  {
1554  for(icount=1;icount<=100;icount++)
1555  {
1556  alpha2 = alpha2/tau;
1557  MLP_Line(w0,alpha2);
1558  err2 = MLP_Test(0,0);
1559  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1560  (*Ntest) ++;
1561  if(err1>err2) break;
1562  alpha3 = alpha2;
1563  err3 = err2;
1564  }
1565  if(icount>=100) /* line search fails */
1566  {
1567  MLP_Line(w0,0); /* reset weights */
1568  free(w0);
1569  LastAlpha = 0.05; /* try to be safe */
1570  return(1);
1571  }
1572  }
1573 
1574 /* find bottom of parabola */
1575 
1576  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1577  -(err2-err1)/(alpha2-alpha1)));
1578  if(*alpmin>10000) *alpmin=10000;
1579 
1580 /* set the weights */
1581  MLP_Line(w0,*alpmin);
1582  LastAlpha = *alpmin;
1583 
1584 /* store weight changes */
1585  for(il=1; il<NET.Nlayer; il++)
1586  for(in=0; in<NET.Nneur[il]; in++)
1587  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1588  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1589  - w0[il][in][jn];
1590 
1591  for(il=1; il<NET.Nlayer; il++)
1592  for(in=0; in<NET.Nneur[il]; in++)
1593  free(w0[il][in]);
1594  for(il=1; il<NET.Nlayer; il++)
1595  free(w0[il]);
1596  free(w0);
1597 
1598  return(0);
1599 }
#define NET
Definition: mlp_gen.h:25
dbl LastAlpha
Definition: mlp_gen.cc:32
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1755
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:449
int LineSearchHyb ( dbl alpmin,
int *  Ntest 
)

Definition at line 1779 of file mlp_gen.cc.

References recoMuon::in, LastAlpha, LEARN, MLP_LineHyb(), MLP_Test(), NET, and metsig::tau.

Referenced by MLP_Epoch().

1780 {
1781  dbl ***w0;
1782  dbl alpha1, alpha2, alpha3;
1783  dbl err1, err2, err3;
1784  dbl tau;
1785  int icount, il, in, jn;
1786 
1787 /* char buf [50];
1788  sprintf (buf,"entree linesearchhyb\n");
1789  MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
1790 
1791  if(NET.Debug>=4){
1792  printf(" entry LineSearchHyb \n");
1793  }
1794  tau=LEARN.Tau;
1795 
1796 /* store weights before line search */
1797 
1798  *Ntest = 0;
1799  w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
1800  for(il=1; il<NET.Nlayer-1; il++)
1801  {
1802  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1803  for(in=0; in<NET.Nneur[il]; in++)
1804  {
1805  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1806  sizeof(dbl));
1807  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1808  {
1809  w0[il][in][jn] = NET.Weights[il][in][jn];
1810  }
1811  }
1812  }
1813 
1814 /* compute error(w0) */
1815  err1 = MLP_Test(0,1);
1816  (*Ntest) ++;
1817  if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);
1818 
1819  *alpmin = 0;
1820  alpha1 = 0;
1821 /* alpha2 = 0.05;
1822  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1823  alpha2 = LastAlpha;
1824  if(alpha2 < 0.01) alpha2 = 0.01;
1825  if(alpha2 > 2.0) alpha2 = 2.0;
1826  MLP_LineHyb(w0,alpha2);
1827  err2 = MLP_Test(0,1);
1828  (*Ntest) ++;
1829 
1830  alpha3 = alpha2;
1831  err3 = err2;
1832 
1833 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1834  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1835 
1836  if(err1>err2)
1837  {
1838  for(icount=1;icount<=100;icount++)
1839  {
1840  alpha3 = alpha3*tau;
1841  MLP_LineHyb(w0,alpha3);
1842  err3 = MLP_Test(0,1);
1843  (*Ntest) ++;
1844  if(err3>err2) break;
1845  alpha1 = alpha2;
1846  err1 = err2;
1847  alpha2 = alpha3;
1848  err2 = err3;
1849  }
1850  if(icount>=100) /* line search fails */
1851  {
1852  MLP_LineHyb(w0,0); /* reset weights */
1853  free(w0);
1854  return(1);
1855  }
1856  }
1857  else
1858  {
1859  for(icount=1;icount<=100;icount++)
1860  {
1861  alpha2 = alpha2/tau;
1862  MLP_LineHyb(w0,alpha2);
1863  err2 = MLP_Test(0,1);
1864  (*Ntest) ++;
1865  if(err1>err2) break;
1866  alpha3 = alpha2;
1867  err3 = err2;
1868  }
1869  if(icount>=100) /* line search fails */
1870  {
1871  MLP_LineHyb(w0,0); /* reset weights */
1872  free(w0);
1873  return(1);
1874  }
1875  }
1876 
1877 /* find bottom of parabola */
1878 
1879  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1880  -(err2-err1)/(alpha2-alpha1)));
1881  if(*alpmin>10000) *alpmin=10000;
1882 
1883 /* set the weights */
1884  MLP_LineHyb(w0,*alpmin);
1885  LastAlpha = *alpmin;
1886 
1887 /* store weight changes */
1888  for(il=1; il<NET.Nlayer-1; il++)
1889  for(in=0; in<NET.Nneur[il]; in++)
1890  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1891  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1892  - w0[il][in][jn];
1893 
1894  for(il=1; il<NET.Nlayer-1; il++)
1895  for(in=0; in<NET.Nneur[il]; in++)
1896  free(w0[il][in]);
1897  for(il=1; il<NET.Nlayer-1; il++)
1898  free(w0[il]);
1899  free(w0);
1900  if(NET.Debug>=4){
1901  printf(" exit LineSearchHyb \n");
1902  }
1903 
1904  return(0);
1905 }
#define NET
Definition: mlp_gen.h:25
dbl LastAlpha
Definition: mlp_gen.cc:32
void MLP_LineHyb(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1921
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:449
int LoadWeights ( char *  filename,
int *  iepoch 
)

Definition at line 3020 of file mlp_gen.cc.

References i, NET, AlCaHLTBitMon_ParallelJobs::p, and alignCSCRings::s.

Referenced by PhysicsTools::MLP::load().

3021 {
3022  FILE *W;
3023  int ilayer,ineur,i;
3024  double p;
3025  char s[80];
3026 
3027  W=fopen(filename,"r");
3028  if(W==0) return -1;
3029  do
3030  {
3031  fgets(s,80,W);
3032  }
3033  while(*s == '#');
3034  sscanf(s," %d",iepoch);
3035  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
3036  {
3037  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
3038  {
3039  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
3040  {
3041  fscanf(W," %le",&p);
3042  NET.Weights[ilayer][ineur][i] = (dbl) p;
3043  }
3044  }
3045  }
3046 
3047  fclose(W);
3048  return 0;
3049 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
tuple filename
Definition: lut2db_cfg.py:20
double dbl
Definition: mlp_gen.h:12
dbl MLP_Epoch ( int  iepoch,
dbl alpmin,
int *  ntest 
)

Definition at line 709 of file mlp_gen.cc.

References beta, BFGSdir(), CGDir(), DeDwNorm(), DeDwProd(), DeDwSaveZero(), DeDwScale(), DerivDir(), GetBFGSH(), GetGammaDelta(), InitBFGSH(), LEARN, LineSearch(), LineSearchHyb(), MLP_ResLin(), MLP_Stochastic(), MLP_Train(), NET, PAT, PrintWeights(), parseEventContent::prod, SetLambda(), ShuffleExamples(), SteepestDir(), and StochStep().

Referenced by PhysicsTools::MLP::train().

710 {
711  dbl err, ONorm, beta, prod, ddir;
712 /* int *index;*/
713  int Nweights, Nlinear, ipat, ierr;
714  int nn;
715 
716  err = 0;
717  *alpmin = 0.;
718 
719  Nweights = NET.Nweights;
720  Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
721 
722  if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");
723 /* stochastic minimization */
724  if(LEARN.Meth==1)
725  {
726 
727  err = MLP_Stochastic();
728 
729  }
730  else
731  {
732  if(iepoch==1 && LEARN.Meth==7)
733  {
734  SetLambda(10000);
735  MLP_ResLin();
736  if(NET.Debug>=2) PrintWeights();
737  }
738 
739 /* save previous gradient and reset current one */
740  DeDwSaveZero();
741  if(LEARN.Meth==16)
742  {
744  nn = PAT.Npat[0];
745  PAT.Npat[0] = nn/10;
746  for(ipat=0;ipat<nn;ipat++)
747  {
748  ierr = MLP_Train(&ExamplesIndex[ipat],&err);
749  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
750  }
751  }
752  else
753  {
754  for(ipat=0;ipat<PAT.Npat[0];ipat++)
755  {
756  ierr = MLP_Train(&ipat,&err);
757  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
758  }
759  }
760  DeDwScale(PAT.Npat[0]);
761  if(LEARN.Meth==2) StochStep();
762  if(LEARN.Meth==3)
763  {
764  SteepestDir();
765  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
766  }
767 
768 /* Conjugate Gradients Ribiere - Polak */
769  if(LEARN.Meth==4)
770  {
771  if((iepoch-1)%LEARN.Nreset==0)
772  {
773  LEARN.Norm = DeDwNorm(); /* for next epoch */
774  SteepestDir();
775  }
776  else
777  {
778  ONorm = LEARN.Norm;
779  LEARN.Norm = DeDwNorm();
780  prod = DeDwProd();
781  beta = (LEARN.Norm-prod)/ONorm;
782  CGDir(beta);
783  }
784  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
785  }
786 
787 /* Conjugate Gradients Fletcher - Reeves */
788  if(LEARN.Meth==5)
789  {
790  if((iepoch-1)%LEARN.Nreset==0)
791  {
792  LEARN.Norm = DeDwNorm(); /* for next epoch */
793  SteepestDir();
794  }
795  else
796  {
797  ONorm = LEARN.Norm;
798  LEARN.Norm = DeDwNorm();
799  beta = LEARN.Norm/ONorm;
800  CGDir(beta);
801  }
802  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
803  }
804  if(LEARN.Meth==6)
805  {
806  if((iepoch-1)%LEARN.Nreset==0)
807  {
808  SteepestDir();
809  InitBFGSH(Nweights);
810  }
811  else
812  {
813  GetGammaDelta();
814  ierr = GetBFGSH(Nweights);
815  if(ierr)
816  {
817  SteepestDir();
818  InitBFGSH(Nweights);
819  }
820  else
821  {
822  BFGSdir(Nweights);
823  }
824  }
825  ddir = DerivDir();
826  if(ddir>0)
827  {
828  SteepestDir();
829  InitBFGSH(Nweights);
830  ddir = DerivDir();
831  }
832  if(LineSearch(alpmin,Ntest,err)==1)
833  {
834  InitBFGSH(Nweights);
835  SteepestDir();
836  if(LineSearch(alpmin,Ntest,err)==1)
837  {
838  printf("Line search fail \n");
839  }
840  }
841  }
842  if(LEARN.Meth==7)
843  {
844  if((iepoch-1)%LEARN.Nreset==0)
845  {
846  SteepestDir();
847  InitBFGSH(Nweights-Nlinear);
848  }
849  else
850  {
851  if(NET.Debug>=5) printf("Before GetGammaDelta \n");
852  GetGammaDelta();
853  if(NET.Debug>=5) printf("After GetGammaDelta \n");
854  ierr = GetBFGSH(Nweights-Nlinear);
855  if(NET.Debug>=5) printf("After GetBFGSH \n");
856  if(ierr)
857  {
858  SteepestDir();
859  InitBFGSH(Nweights-Nlinear);
860  }
861  else
862  {
863  BFGSdir(Nweights-Nlinear);
864  }
865  if(NET.Debug>=5) printf("After BFGSdir \n");
866  }
867  SetLambda(10000);
868  if(LineSearchHyb(alpmin,Ntest)==1)
869  {
870  InitBFGSH(Nweights-Nlinear);
871  SteepestDir();
872  if(LineSearchHyb(alpmin,Ntest)==1)
873  {
874  printf("Line search fail \n");
875  }
876  }
877  }
878  }
879 
880  if(NET.Debug>=5) printf(" End MLP_Epoch\n");
881  return(err);
882 }
const double beta
dbl DeDwProd()
Definition: mlp_gen.cc:1023
void CGDir(dbl beta)
Definition: mlp_gen.cc:1269
void DeDwSaveZero()
Definition: mlp_gen.cc:1100
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
void SetLambda(double Wmax)
Definition: mlp_gen.cc:1947
void SteepestDir()
Definition: mlp_gen.cc:1247
int GetBFGSH(int Nweights)
Definition: mlp_gen.cc:1421
void PrintWeights()
Definition: mlp_gen.cc:2166
int StochStep()
Definition: mlp_gen.cc:968
dbl MLP_Stochastic()
Definition: mlp_gen.cc:521
int MLP_Train(int *ipat, dbl *err)
Definition: mlp_gen.cc:898
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2100
dbl DeDwNorm()
Definition: mlp_gen.cc:1001
void InitBFGSH(int Nweights)
Definition: mlp_gen.cc:1391
void GetGammaDelta()
Definition: mlp_gen.cc:1316
void MLP_ResLin()
Definition: mlp_gen.cc:1969
void DeDwScale(int Nexamples)
Definition: mlp_gen.cc:1063
#define LEARN
Definition: mlp_gen.h:36
int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1476
void BFGSdir(int Nweights)
Definition: mlp_gen.cc:1343
double dbl
Definition: mlp_gen.h:12
int LineSearchHyb(dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:1779
dbl DerivDir()
Definition: mlp_gen.cc:1291
void MLP_Line ( dbl ***  w0,
dbl  alpha 
)

Definition at line 1755 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by DecreaseSearch(), FixedStep(), and LineSearch().

1756 {
1757  register int il,in,jn;
1758 
1759  for(il=1; il<NET.Nlayer; il++)
1760  for(in=0; in<NET.Nneur[il]; in++)
1761  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1762  NET.Weights[il][in][jn] = w0[il][in][jn]+
1763  alpha*dir[il][in][jn];
1764 
1765 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_LineHyb ( dbl ***  w0,
dbl  alpha 
)

Definition at line 1921 of file mlp_gen.cc.

References recoMuon::in, MLP_ResLin(), and NET.

Referenced by LineSearchHyb().

1922 {
1923  int il,in,jn;
1924  for(il=1; il<NET.Nlayer-1; il++)
1925  for(in=0; in<NET.Nneur[il]; in++)
1926  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1927  {
1928  NET.Weights[il][in][jn] = w0[il][in][jn]+
1929  alpha*dir[il][in][jn];
1930  }
1931  MLP_ResLin();
1932 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
void MLP_ResLin()
Definition: mlp_gen.cc:1969
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_MatrixVector ( dbl M,
type_pat v,
dbl r,
int  n,
int  m 
)

Definition at line 3744 of file mlp_gen.cc.

References trackerHits::c, i, j, m, n, and alignCSCRings::r.

Referenced by MLP_Test_MM().

3745 {
3746  int i,j;
3747  register dbl a1, a2, a3, a4, c, d;
3748  dbl *pM1 = M;
3749  dbl *pM2 = &(M[m]);
3750  dbl *pM3 = &(M[2*m]);
3751  dbl *pM4 = &(M[3*m]);
3752  dbl *pr = r;
3753  int mp1 = m;
3754 
3755  for(i=0; i<n-3;
3756  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3757  pr+=4)
3758  {
3759  a1 = 0;
3760  a2 = 0;
3761  a3 = 0;
3762  a4 = 0;
3763  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3764  {
3765  c = v[j];
3766  d = v[j+1];
3767  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3768  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3769  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3770  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3771  }
3772  for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3773  {
3774  c = v[j];
3775  a1 = a1 + *pM1 * c;
3776  a2 = a2 + *pM2 * c;
3777  a3 = a3 + *pM3 * c;
3778  a4 = a4 + *pM4 * c;
3779  }
3780  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3781  }
3782  for(i=i; i<n; i++)
3783  {
3784  pM1 = &(M[i*m]);
3785  a1 = 0;
3786  for(j=0; j<m; j++, pM1++)
3787  {
3788  a1 = a1 + *pM1 * v[j];
3789  }
3790  r[i] = a1;
3791  }
3792 }
int i
Definition: DBlmapReader.cc:9
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
void MLP_MatrixVectorBias ( dbl M,
dbl v,
dbl r,
int  n,
int  m 
)

Definition at line 3678 of file mlp_gen.cc.

References trackerHits::c, i, j, m, n, and alignCSCRings::r.

Referenced by MLP_Out(), and MLP_Out2().

3679 {
3680  int i,j;
3681  register dbl a1, a2, a3, a4, c, d;
3682  dbl *pM1 = M;
3683  dbl *pM2 = &(M[m+1]);
3684  dbl *pM3 = &(M[2*(m+1)]);
3685  dbl *pM4 = &(M[3*(m+1)]);
3686  dbl *pr = r;
3687  int mp1 = m+1;
3688 
3689  for(i=0; i<n-3;
3690  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3691  pr+=4)
3692  {
3693  a1 = *pM1;
3694  a2 = *pM2;
3695  a3 = *pM3;
3696  a4 = *pM4;
3697  pM1++; pM2++; pM3++; pM4++;
3698  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3699  {
3700  c = v[j];
3701  d = v[j+1];
3702  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3703  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3704  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3705  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3706  }
3707  for(j=j; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3708  {
3709  c = v[j];
3710  a1 = a1 + *pM1 * c;
3711  a2 = a2 + *pM2 * c;
3712  a3 = a3 + *pM3 * c;
3713  a4 = a4 + *pM4 * c;
3714  }
3715  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3716  }
3717  for(i=i; i<n; i++)
3718  {
3719  pM1 = &(M[i*(m+1)]);
3720  a1 = *pM1;
3721  pM1++;
3722  for(j=0; j<m; j++, pM1++)
3723  {
3724  a1 = a1 + *pM1 * v[j];
3725  }
3726  r[i] = a1;
3727  }
3728 }
int i
Definition: DBlmapReader.cc:9
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
void MLP_MM2rows ( dbl c,
type_pat a,
dbl b,
int  Ni,
int  Nj,
int  Nk,
int  NaOffs,
int  NbOffs 
)

Definition at line 3813 of file mlp_gen.cc.

References j, and gen::k.

Referenced by MLP_Test_MM().

3815 {
3816 //int i,j,k;
3817 int j,k;
3818 dbl s00,s01,s10,s11;
3819 type_pat *pa0,*pa1;
3820 dbl *pb0,*pb1,*pc0,*pc1;
3821 
3822  for (j=0; j<=Nj-2; j+=2)
3823  {
3824  pc0 = c+j;
3825  pc1 = c+j+Nj;
3826  s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
3827 
3828  for (k=0,pb0=b+k+NbOffs*j,
3829  pb1=b+k+NbOffs*(j+1),
3830  pa0=a+k,
3831  pa1=a+k+NaOffs;
3832  k<Nk;
3833  k++,pa0++,
3834  pa1++,
3835  pb0++,
3836  pb1++)
3837  {
3838  s00 += (*pa0)*(*pb0);
3839  s01 += (*pa0)*(*pb1);
3840  s10 += (*pa1)*(*pb0);
3841  s11 += (*pa1)*(*pb1);
3842  }
3843  *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
3844  }
3845  for (j=j; j<Nj; j++)
3846  {
3847  pc0 = c+j;
3848  pc1 = c+j+Nj;
3849  s00 = 0.0; s10 = 0.0;
3850  for (k=0,pb0=b+k+NbOffs*j,
3851  pa0=a+k,
3852  pa1=a+k+NaOffs;
3853  k<Nk;
3854  k++,pa0++,
3855  pa1++,
3856  pb0++)
3857  {
3858  s00 += (*pa0)*(*pb0);
3859  s10 += (*pa1)*(*pb0);
3860  }
3861  *pc0 = s00; *pc1 = s10;
3862  }
3863 }
double type_pat
Definition: mlp_gen.h:13
int j
Definition: DBlmapReader.cc:9
int k[5][pyjets_maxn]
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void MLP_Out ( type_pat rrin,
dbl rrout 
)

Definition at line 63 of file mlp_gen.cc.

References i, recoMuon::in, j, m, MLP_MatrixVectorBias(), MLP_vSigmoideDeriv(), and NET.

Referenced by MLP_ResLin().

64 {
65 // static int i, il, in, j, ilm1, m, mp1;
66  static int i, il, in, j, m, mp1;
67  dbl **deriv1;
68 
69 /* input layer */
70 
71  deriv1 = NET.Deriv1;
72  m = NET.Nneur[0]%4;
73  if(m==0) goto L10;
74  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
75 L10:
76  mp1 = m+1;
77  for(i=mp1; i<=NET.Nneur[0]; i+=4)
78  {
79  NET.Outn[0][i-1] = rrin[i-1];
80  NET.Outn[0][i] = rrin[i];
81  NET.Outn[0][i+1] = rrin[i+1];
82  NET.Outn[0][i+2] = rrin[i+2];
83  }
84 
85 /* hidden and output layers */
86 
87  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
88  NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
89 
90  for(il=2; il<NET.Nlayer; il++)
91  {
92  MLP_vSigmoideDeriv(NET.Outn[il-1],
93  deriv1[il-1],NET.Nneur[il-1]);
94  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
95  NET.Outn[il],NET.Nneur[il],
96  NET.Nneur[il-1]);
97  }
98  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
99  {
100  deriv1[NET.Nlayer-1][in] = 1;
101  }
102 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
int j
Definition: DBlmapReader.cc:9
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3678
double dbl
Definition: mlp_gen.h:12
void MLP_Out2 ( type_pat rrin)

Definition at line 186 of file mlp_gen.cc.

References i, recoMuon::in, m, MLP_MatrixVectorBias(), MLP_vSigmoideDeriv(), NET, and nin.

Referenced by MLP_Stochastic(), and MLP_Train().

187 {
188 // static int il, in, m, mp1, i0, ilm1;
189  static int il, in, m, mp1;
190  register int i;
191  dbl **rrout, **deriv1;
192  register dbl *prrout;
193  type_pat *prrin;
194  int nhid = NET.Nneur[1];
195  int nin = NET.Nneur[0];
196 
197  rrout = NET.Outn;
198  deriv1 = NET.Deriv1;
199 
200  m = NET.Nneur[0]%4;
201  if(m==0) goto L10;
202  if(m==1)
203  {
204  rrout[0][0] = rrin[1];
205  goto L10;
206  }
207  else if(m==2)
208  {
209  rrout[0][0] = rrin[1];
210  rrout[0][1] = rrin[2];
211  goto L10;
212  }
213  else if(m==3)
214  {
215  rrout[0][0] = rrin[1];
216  rrout[0][1] = rrin[2];
217  rrout[0][2] = rrin[3];
218  goto L10;
219  }
220 L10:
221  mp1 = m+1;
222  prrout = &(rrout[0][mp1]);
223  prrin = &(rrin[mp1+1]);
224  for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
225  {
226  *(prrout-1) = *(prrin-1);
227  *prrout = *prrin;
228  *(prrout+1)= *(prrin+1);
229  *(prrout+2) = *(prrin+2);
230  }
231 
232 /* input layer */
233 
234  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
235  NET.Outn[1],nhid,nin);
236 
237 
238 /* hidden and output layers */
239 
240  for(il=2; il<NET.Nlayer; il++)
241  {
242  MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
243  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
244  NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
245  }
246  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
247  deriv1[NET.Nlayer-1][in] = 1;
248 }
int i
Definition: DBlmapReader.cc:9
int nin
#define NET
Definition: mlp_gen.h:25
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
double type_pat
Definition: mlp_gen.h:13
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3678
double dbl
Definition: mlp_gen.h:12
void MLP_Out_T ( type_pat rrin)

Definition at line 115 of file mlp_gen.cc.

References a, i, recoMuon::in, j, m, MLP_Sigmoide(), and NET.

Referenced by PhysicsTools::MLP::eval(), and MLP_Test().

116 {
117  static int i, il, in, j, ilm1, m, mp1;
118  register dbl a;
119 
120 /* input layer */
121 
122  m = NET.Nneur[0]%4;
123  if(m==0) goto L10;
124  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
125 L10:
126  mp1 = m+1;
127  for(i=mp1; i<=NET.Nneur[0]; i+=4)
128  {
129  NET.Outn[0][i-1] = rrin[i-1];
130  NET.Outn[0][i] = rrin[i];
131  NET.Outn[0][i+1] = rrin[i+1];
132  NET.Outn[0][i+2] = rrin[i+2];
133  }
134 
135 /* hidden and output layers */
136 
137 /* for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
138  NET.Outn[0][in],NET.Weights[1][0][in]);
139  printf("\n"); */
140  for(il=1; il<NET.Nlayer; il++)
141  {
142  ilm1 = il-1;
143  m = NET.Nneur[ilm1]%4;
144  for(in=0; in<NET.Nneur[il]; in++)
145  {
146  a = NET.Weights[il][in][0];
147  if(m==0) goto L20;
148  for(j=1;j<=m;j++) a +=
149  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
150 L20:
151  mp1 = m+1;
152  for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
153  {
154  a +=
155  NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
156  NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
157  NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
158  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
159  }
160  switch(NET.T_func[il][in])
161  {
162  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
163  break;
164  case 1: NET.Outn[il][in] = a;
165  break;
166  case 0: NET.Outn[il][in] = 0;
167  break;
168  }
169  }
170  }
171 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
int j
Definition: DBlmapReader.cc:9
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
dbl MLP_Sigmoide(dbl x)
Definition: mlp_sigmoide.cc:27
int MLP_PrCFun ( char *  filename)

Definition at line 2876 of file mlp_gen.cc.

References DIVERS, recoMuon::in, NET, and STAT.

2877 {
2878  int il,in,jn;
2879  FILE *W;
2880 
2881  W=fopen(filename,"w");
2882  if(W==0) return -1;
2883 
2884  fprintf(W,"double sigmoid(double x)\n");
2885  fprintf(W,"{\n");
2886  fprintf(W,"return 1/(1+exp(-x));\n");
2887  fprintf(W,"}\n");
2888  fprintf(W,"void rnnfun(double *rin,double *rout)\n");
2889  fprintf(W,"{\n");
2890  fprintf(W," double out1[%d];\n",NET.Nneur[0]);
2891  fprintf(W," double out2[%d];\n",NET.Nneur[1]);
2892  if(NET.Nlayer>=3) fprintf(W," double out3[%d];\n",NET.Nneur[2]);
2893  if(NET.Nlayer>=4) fprintf(W," double out4[%d];\n",NET.Nneur[3]);
2894  fprintf(W,"\n");
2895 
2896  for(in=0; in<NET.Nneur[0]; in++)
2897  {
2898  if(DIVERS.Norm==0)
2899  {
2900  fprintf(W," out1[%d] = rin[%d];\n",in,in);
2901  }
2902  else
2903  {
2904  fprintf(W," out1[%d] = (rin[%d]-%e)/%e;\n",
2905  in,in,
2906  STAT.mean[in],STAT.sigma[in]);
2907  }
2908  }
2909 
2910  for(il=1; il<=NET.Nlayer-1; il++)
2911  {
2912  fprintf(W,"\n");
2913  fprintf(W,"/* layer %d */\n",il+1);
2914  for(in=0; in<NET.Nneur[il]; in++)
2915  {
2916  fprintf(W," out%d[%d] = %e\n",il+1,in,
2917  (double) NET.Weights[il][in][0]);
2918  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2919  fprintf(W," +(%e) * out%d[%d]\n",
2920  (double) NET.Weights[il][in][jn],il,jn-1);
2921  fprintf(W," ;\n");
2922  }
2923  fprintf(W,"\n");
2924  for(in=0; in<NET.Nneur[il]; in++)
2925  {
2926  if(NET.T_func[il][in]==0)
2927  {
2928  fprintf(W," out%d[%d] = 0;\n",il+1,in);
2929  }
2930  else if(NET.T_func[il][in]==1)
2931  {
2932  }
2933  else if(NET.T_func[il][in]==2)
2934  {
2935  fprintf(W," out%d[%d] = sigmoid(out%d[%d]);\n",
2936  il+1,in,il+1,in);
2937  }
2938  }
2939  }
2940  il = NET.Nlayer-1;
2941  for(in=0; in<NET.Nneur[il]; in++)
2942  {
2943  fprintf(W," rout[%d] = out%d[%d];\n",in,il+1,in);
2944  }
2945  fprintf(W,"}\n");
2946  fclose(W);
2947  return 0;
2948 }
#define STAT
Definition: mlp_gen.h:60
#define NET
Definition: mlp_gen.h:25
#define DIVERS
Definition: mlp_gen.h:54
tuple filename
Definition: lut2db_cfg.py:20
int MLP_PrFFun ( char *  filename)

Definition at line 2767 of file mlp_gen.cc.

References DIVERS, recoMuon::in, NET, and STAT.

2768 {
2769  int il,in,jn;
2770  FILE *W;
2771 
2772  W=fopen(filename,"w");
2773  if(W==0) return -1;
2774  fprintf(W," SUBROUTINE RNNFUN(rin,rout)\n");
2775  fprintf(W," DIMENSION RIN(%d)\n",NET.Nneur[0]);
2776  fprintf(W," DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
2777  fprintf(W,"C\n");
2778 
2779  for(in=0; in<NET.Nneur[0]; in++)
2780  {
2781  if(DIVERS.Norm==0)
2782  {
2783  fprintf(W," OUT%d = RIN(%d)\n",in+1,in+1);
2784  }
2785  else
2786  {
2787  fprintf(W," OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
2788  STAT.mean[in],STAT.sigma[in]);
2789  }
2790  }
2791  for(il=1; il<NET.Nlayer-1; il++)
2792  {
2793  fprintf(W,"C\n");
2794  fprintf(W,"C layer %d\n",il+1);
2795  for(in=0; in<NET.Nneur[il]; in++)
2796  {
2797  fprintf(W," RIN%d = %e\n",in+1,
2798  (double) NET.Weights[il][in][0]);
2799  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2800  fprintf(W," > +(%e) * OUT%d\n",
2801  (double) NET.Weights[il][in][jn],jn);
2802  }
2803  fprintf(W,"C\n");
2804  for(in=0; in<NET.Nneur[il]; in++)
2805  {
2806  if(NET.T_func[il][in]==0)
2807  {
2808  fprintf(W," OUT%d = 0\n",in+1);
2809  }
2810  else if(NET.T_func[il][in]==1)
2811  {
2812  fprintf(W," OUT%d = RIN%d\n",in+1,in+1);
2813  }
2814  else if(NET.T_func[il][in]==2)
2815  {
2816  fprintf(W," OUT%d = SIGMOID(RIN%d)\n",
2817  in+1,in+1);
2818  }
2819  }
2820  }
2821  il = NET.Nlayer-1;
2822  fprintf(W,"C\n");
2823  fprintf(W,"C layer %d\n",il+1);
2824  for(in=0; in<NET.Nneur[il]; in++)
2825  {
2826  fprintf(W," RIN%d = %e\n",in+1,
2827  (double) NET.Weights[il][in][0]);
2828  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2829  fprintf(W," > +(%e) * OUT%d\n",
2830  (double) NET.Weights[il][in][jn],jn);
2831  }
2832  fprintf(W,"C\n");
2833  for(in=0; in<NET.Nneur[il]; in++)
2834  {
2835  if(NET.T_func[il][in]==0)
2836  {
2837  fprintf(W," ROUT(%d) = 0\n",in+1);
2838  }
2839  else if(NET.T_func[il][in]==1)
2840  {
2841  fprintf(W," ROUT(%d) = RIN%d\n",in+1,in+1);
2842  }
2843  else if(NET.T_func[il][in]==2)
2844  {
2845  fprintf(W," ROUT(%d) = SIGMOID(RIN%d)\n",
2846  in+1,in+1);
2847  }
2848  }
2849 
2850  fprintf(W,"C\n");
2851  fprintf(W," END\n");
2852  fprintf(W," REAL FUNCTION SIGMOID(X)\n");
2853  fprintf(W," SIGMOID = 1./(1.+EXP(-X))\n");
2854  fprintf(W," END\n");
2855 
2856  fclose(W);
2857  return 0;
2858 }
#define STAT
Definition: mlp_gen.h:60
#define NET
Definition: mlp_gen.h:25
#define DIVERS
Definition: mlp_gen.h:54
tuple filename
Definition: lut2db_cfg.py:20
int MLP_PrintInputStat ( )

Definition at line 3330 of file mlp_gen.cc.

References j, timingPdfMaker::mean, MLP_StatInputs(), NET, PAT, and stat_::sigma.

3331 {
3332  int j;
3333  dbl *mean, *sigma, *minimum, *maximum;
3334 
3335 /* allocate memory */
3336  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3337  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3338  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3339  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3340 
3341  if(mean == 0 || sigma == 0 || minimum == 0
3342  || maximum == 0) return -111;
3343 
3344  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
3345  mean,sigma,minimum,maximum);
3346 
3347  printf("\t mean \t\t RMS \t\t min \t\t max\n");
3348  for(j=0;j<NET.Nneur[0];j++)
3349  {
3350  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3351  mean[j],sigma[j],minimum[j],maximum[j]);
3352  }
3353 
3354  free(mean);
3355  free(sigma);
3356  free(minimum);
3357  free(maximum);
3358  printf("\n");
3359  return 0;
3360 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3273
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
double MLP_Rand ( dbl  min,
dbl  max 
)

Definition at line 2129 of file mlp_gen.cc.

References random.

Referenced by InitWeights(), and ShuffleExamples().

2130 {
2131 return mini+(maxi-mini)*random()/RAND_MAX;
2132 }
TRandom random
Definition: MVATrainer.cc:138
void MLP_ResLin ( )

Definition at line 1969 of file mlp_gen.cc.

References dgels_(), recoMuon::in, LEARN, MLP_Out(), MLP_Test(), NET, PAT, and mathSSE::sqrt().

Referenced by MLP_Epoch(), MLP_LineHyb(), and StochStepHyb().

1970 {
1971 /* dbl rrans[NMAX], rrout[NMAX];*/
1972 /* type_pat rrin[NMAX];*/
1973  doublereal *HR,*dpat; //,*wlin,*SV;
1974  double err,lambda,lambda2;
1975  integer Nl,M,Nhr,khr,nrhs,iret,ierr;
1976  int il, in, inl, ipat;
1977  /*register dbl a;*/ //a unused
1978  char Trans = 'N';
1979 
1980 
1981 /* integer rank; */
1982 // doublereal rcond = -1; /* use machine precision */
1983 
1984  lambda2 = LEARN.Alambda;
1985 
1986 /* Nl = number of linear weights
1987  M = number of terms in linear system = number of examples + regularisation*/
1988  Nl = NET.Nneur[NET.Nlayer-2] + 1;
1989  M = PAT.Npat[0]+Nl;
1990 
1991  integer Lwork = 5 * M;
1992  doublereal *Work = (doublereal*) malloc((int) Lwork*sizeof(doublereal));
1993 
1994 /* memory allocation */
1995  dpat = (doublereal*) malloc((int) M*sizeof(doublereal));
1996 // wlin = (doublereal*) malloc((int) Nl*sizeof(doublereal));
1997 // SV = (doublereal*) malloc((int) Nl*sizeof(doublereal));
1998 
1999  Nhr = M * Nl;
2000  HR = (doublereal*) malloc((int) Nhr*sizeof(doublereal));
2001  err = 0.;
2002  for(ipat=0;ipat<PAT.Npat[0];ipat++)
2003  {
2004 /* *** Filling dpat and HR *** */
2005 /* for(in=0; in<NET.Nneur[0]; in++)
2006  {
2007  rrin[in] = PAT.Rin[0][ipat][in];
2008  }*/
2009 
2010  MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
2011 /* MLP_Out(rrin,rrout);*/
2012  /*for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
2013  {
2014  a = (dbl) PAT.Rans[0][ipat][in]; //a was not used
2015  } */
2016  il = NET.Nlayer-2;
2017  dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
2018  khr = ipat;
2019  HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
2020  for(in=0;in<NET.Nneur[il];in++)
2021  {
2022  khr = M *(in+1) + ipat;
2023  HR[khr] = NET.Outn[il][in]*
2024  (dbl) sqrt(PAT.Pond[0][ipat]);
2025  }
2026  }
2027  il = NET.Nlayer-2;
2028  lambda = sqrt(lambda2);
2029  for(ipat=0;ipat<=NET.Nneur[il];ipat++)
2030  {
2031  dpat[ipat+PAT.Npat[0]] = 0;
2032  for(in=0;in<=NET.Nneur[il];in++)
2033  {
2034  khr = M *in + ipat + PAT.Npat[0];
2035  HR[khr] = 0;
2036  if(in==ipat) HR[khr]=lambda;
2037  }
2038  }
2039  if(NET.Debug>=4)
2040  {
2041  err = MLP_Test(0,0);
2042  printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err);
2043  }
2044 /* */
2045 /* Trouve les poids lineaires par resolution lineaire */
2046 /* */
2047  nrhs = 1;
2048  ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
2049  &Lwork,&iret);
2050  if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
2051  if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
2052 
2053 /* ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
2054  &iret);
2055  if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
2056  if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
2057 
2058  il = NET.Nlayer-1;
2059  for (inl=0; inl<=NET.Nneur[il-1];inl++)
2060  {
2061  NET.Weights[il][0][inl] = dpat[inl];
2062  }
2063  if(NET.Debug>=4)
2064  {
2065  err = MLP_Test(0,0);
2066  printf("ResLin, apres tlsfor, err= %f\n",err);
2067  }
2068  free(Work);
2069  free(dpat);
2070 // free(wlin);
2071  free(HR);
2072 // free(SV);
2073 }
long int integer
Definition: mlp_lapack.h:12
void MLP_Out(type_pat *rrin, dbl *rrout)
Definition: mlp_gen.cc:63
double doublereal
Definition: mlp_lapack.h:17
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
T sqrt(T t)
Definition: SSEVec.h:48
#define LEARN
Definition: mlp_gen.h:36
int dgels_(char *trans, integer *m, integer *n, integer *nrhs, doublereal *a, integer *lda, doublereal *b, integer *ldb, doublereal *work, integer *lwork, integer *info)
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:449
int MLP_SetNet ( int *  nl,
int *  nn 
)

Definition at line 3635 of file mlp_gen.cc.

References AllocNetwork(), NET, NLMAX, and SetDefaultFuncs().

Referenced by PhysicsTools::MLP::MLP(), and ReadPatterns().

3636 {
3637  int il,ierr;
3638 
3639  if((*nl)>NLMAX) return(1);
3640  if((*nl)<2) return(2);
3641 
3642 /* LearnFree(); */
3643 /* allocate memory */
3644  ierr = AllocNetwork(*nl,nn);
3645  if(ierr != 0) return ierr;
3646 
3647 /* set number of layers */
3648  NET.Nlayer = (int) *nl;
3649 
3650 /* set number of neurons */
3651  for(il=0; il<NET.Nlayer; il++) {
3652  NET.Nneur[il] = nn[il];
3653  }
3654 
3655 /* set transfer functions */
3656  SetDefaultFuncs();
3657 /* LearnAlloc(); */
3658 
3659  return(0);
3660 }
#define NLMAX
Definition: mlp_gen.cc:14
#define NET
Definition: mlp_gen.h:25
void SetDefaultFuncs()
Definition: mlp_gen.cc:1226
int AllocNetwork(int Nlayer, int *Neurons)
Definition: mlp_gen.cc:3445
int MLP_StatInputs ( int  Nexamples,
int  Ninputs,
type_pat **  inputs,
dbl mean,
dbl sigma,
dbl minimum,
dbl maximum 
)

Definition at line 3273 of file mlp_gen.cc.

References j, and mathSSE::sqrt().

Referenced by MLP_PrintInputStat(), and NormalizeInputs().

3275 {
3276  dbl *fmean;
3277  int j, ipat, nmax;
3278 
3279 /* first compute a fast rough mean using the first 100 events */
3280  fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
3281  nmax = 100;
3282  if(Nexamples<100) nmax=Nexamples;
3283 
3284  for(j=0;j<Ninputs;j++)
3285  {
3286  fmean[j] = 0;
3287  for(ipat=0;ipat<nmax;ipat++)
3288  {
3289  fmean[j] += (dbl) inputs[ipat][j];
3290  }
3291  fmean[j] = fmean[j]/(dbl) nmax;
3292 
3293 /* now compute real mean and sigma, min and max */
3294  mean[j] = 0;
3295  sigma[j] = 0;
3296  minimum[j] = 99999;
3297  maximum[j] = -99999;
3298  for(ipat=0;ipat<Nexamples;ipat++)
3299  {
3300  mean[j] += (dbl) inputs[ipat][j];
3301  sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
3302  ((dbl) inputs[ipat][j]-fmean[j]);
3303  if((dbl) inputs[ipat][j] > maximum[j])
3304  maximum[j]=(dbl) inputs[ipat][j];
3305  if((dbl) inputs[ipat][j] < minimum[j])
3306  minimum[j]=(dbl) inputs[ipat][j];
3307  }
3308  mean[j] = mean[j]/(dbl) Nexamples;
3309  sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples -
3310  (mean[j]-fmean[j])*
3311  (mean[j]-fmean[j]));
3312  }
3313  free(fmean);
3314  return 0;
3315 }
T sqrt(T t)
Definition: SSEVec.h:48
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
dbl MLP_Stochastic ( )

Definition at line 521 of file mlp_gen.cc.

References a, b, createTree::dd, eta(), EtaDecay(), cuy::ii, recoMuon::in, LEARN, MLP_Out2(), NET, PAT, ShuffleExamples(), and create_public_pileup_plots::weights.

Referenced by MLP_Epoch().

522 {
523  int ipat, ii, inm1;
524  dbl err = 0;
525  int il, in1, in, itest2;
526  dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
527  dbl eta, eps;
528  register dbl a, b, dd, a1, a2, a3, a4;
529  dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
530  dbl ***weights;
531 
532  if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");
533  weights = NET.Weights;
534 /* shuffle patterns */
536 
537 /* reduce learning parameter */
538  if(LEARN.Decay<1) EtaDecay();
539 
540  eta = -LEARN.eta;
541  eps = LEARN.epsilon;
542 
543 /* loop on the examples */
544  for(ipat=0;ipat<PAT.Npat[0];ipat++)
545  {
546  ii = ExamplesIndex[ipat];
547  pond = PAT.Pond[0][ii];
548 
549  MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)]));
550 
551 /* next lines are equivalent to DeDwSum */
552  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
553  {
554  deriv = NET.Deriv1[NET.Nlayer-1][in];
555  a = (dbl) PAT.Rans[0][ii][in];
556  b = NET.Outn[NET.Nlayer-1][in]-a;
557  err += b*b*pond;
558  NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
559  }
560 
561  for(il=NET.Nlayer-2; il>0; il--)
562  {
563  dd = NET.Delta[il+1][0];
564  for(in=0; in<NET.Nneur[il]-3; in+=4)
565  {
566  deriv1 = NET.Deriv1[il][in];
567  deriv2 = NET.Deriv1[il][in+1];
568  deriv3 = NET.Deriv1[il][in+2];
569  deriv4 = NET.Deriv1[il][in+3];
570  itest2 = (NET.Nneur[il+1]==1);
571  a1 = dd*weights[il+1][0][in+1];
572  a2 = dd*weights[il+1][0][in+2];
573  a3 = dd*weights[il+1][0][in+3];
574  a4 = dd*weights[il+1][0][in+4];
575  if(itest2) goto L1;
576  pdelta = &(NET.Delta[il+1][1]);
577  for(in1=1; in1<NET.Nneur[il+1];
578  in1++, pdelta++)
579  {
580  a1 += *pdelta * weights[il+1][in1][in+1];
581  a2 += *pdelta * weights[il+1][in1][in+2];
582  a3 += *pdelta * weights[il+1][in1][in+3];
583  a4 += *pdelta * weights[il+1][in1][in+4];
584  }
585 L1: NET.Delta[il][in] = a1*deriv1;
586  NET.Delta[il][in+1] = a2*deriv2;
587  NET.Delta[il][in+2] = a3*deriv3;
588  NET.Delta[il][in+3] = a4*deriv4;
589  }
590  for(in=in; in<NET.Nneur[il]; in++)
591  {
592  deriv = NET.Deriv1[il][in];
593  itest2 = (NET.Nneur[il+1]==1);
594  a = dd*weights[il+1][0][in+1];
595  if(itest2) goto L2;
596  pdelta = &(NET.Delta[il+1][1]);
597  for(in1=1; in1<NET.Nneur[il+1];
598  in1++, pdelta++)
599  {
600  a += *pdelta *
601  weights[il+1][in1][in+1];
602  }
603 L2: NET.Delta[il][in] = a*deriv;
604  }
605 
606  } /* end of loop on layers */
607 
608 
609 /* update the weights */
610  if(eps==0)
611  {
612  for(il=1; il<NET.Nlayer; il++)
613  {
614  inm1 = NET.Nneur[il-1];
615  for(in=0; in<NET.Nneur[il]-3; in+=4)
616  {
617  a1 = NET.Delta[il][in];
618  a2 = NET.Delta[il][in+1];
619  a3 = NET.Delta[il][in+2];
620  a4 = NET.Delta[il][in+3];
621  pout = &(NET.Outn[il-1][0]);
622  weights[il][in][0] += a1;
623  weights[il][in+1][0] += a2;
624  weights[il][in+2][0] += a3;
625  weights[il][in+3][0] += a4;
626  weights[il][in][1] += a1* (*pout);
627  weights[il][in+1][1] += a2* (*pout);
628  weights[il][in+2][1] += a3* (*pout);
629  weights[il][in+3][1] += a4* (*pout);
630  pout++;
631  pw1 = &(weights[il][in][2]);
632  pw2 = &(weights[il][in+1][2]);
633  pw3 = &(weights[il][in+2][2]);
634  pw4 = &(weights[il][in+3][2]);
635  for(in1=2; in1<=inm1;
636  ++in1, ++pout, ++pw1, ++pw2,
637  ++pw3, ++pw4)
638  {
639  *pw1 += a1 * *pout;
640  *pw2 += a2 * *pout;
641  *pw3 += a3 * *pout;
642  *pw4 += a4 * *pout;
643  }
644  }
645  for(in=in; in<NET.Nneur[il]; in++)
646  {
647  a1 = NET.Delta[il][in];
648  pout = &(NET.Outn[il-1][0]);
649  weights[il][in][0] += a1;
650  weights[il][in][1] += a1* (*pout);
651  pout++;
652  pw1 = &(weights[il][in][2]);
653  for(in1=2; in1<=inm1;
654  ++in1, ++pout, ++pw1)
655  {
656  *pw1 += a1 * *pout;
657  }
658  }
659  }
660  }
661  else
662  {
663  for(il=1; il<NET.Nlayer; il++)
664  {
665  for(in=0; in<NET.Nneur[il]; in++)
666  {
667 
668  a = NET.Delta[il][in];
669  LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
670  NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
671 
672  b = a*NET.Outn[il-1][0];
673  LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
674  NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
675 
676  for(in1=2; in1<=NET.Nneur[il-1]; in1++)
677  {
678  b = a*NET.Outn[il-1][in1-1];
679  LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
680  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
681  }
682  }
683  }
684  }
685 
686  } /* end of loop on examples */
687  return(err);
688 }
int * ExamplesIndex
Definition: mlp_gen.cc:40
T eta() const
int ii
Definition: cuy.py:588
void EtaDecay()
Definition: mlp_gen.cc:2084
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2100
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:186
dbl MLP_Test ( int  ifile,
int  regul 
)

Definition at line 449 of file mlp_gen.cc.

References compare_using_db::ifile, recoMuon::in, LEARN, MLP_Out_T(), MLP_Test_MM(), NET, PAT, and tmp.

Referenced by DecreaseSearch(), LineSearch(), LineSearchHyb(), MLP_ResLin(), and SetLambda().

450 {
451  dbl err, rrans;
452  int in,jn,ipat,ipati;
453 
454  dbl *tmp;
455 
456  tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl));
457  if(tmp == 0) /* not enough memory */
458  {
459  printf("not enough memory in MLP_Test\n");
460  err = 0;
461  for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
462  {
463  if(ifile==0)
464  {
465  ipati = ExamplesIndex[ipat];
466  }
467  else
468  {
469  ipati = ipat;
470  }
471  MLP_Out_T(PAT.Rin[ifile][ipati]);
472  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
473  {
474  rrans = (dbl) PAT.Rans[ifile][ipati][in];
475  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
476  (rrans-NET.Outn[NET.Nlayer-1][in])*
477  PAT.Pond[ifile][ipati];
478  }
479  }
480 
481  if(regul>=1)
482  {
483  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
484  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
485  {
486  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
487  NET.Weights[NET.Nlayer-1][in][jn];
488  }
489  }
490  free(tmp);
491  return(err);
492  }
493  else /* computation using matrix - matrix multiply */
494  {
495  err = MLP_Test_MM(ifile, tmp);
496  if(regul>=1)
497  {
498  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
499  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
500  {
501  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
502  NET.Weights[NET.Nlayer-1][in][jn];
503  }
504  }
505  free(tmp);
506  return(err);
507  }
508 }
dbl MLP_Test_MM(int ifile, dbl *tmp)
Definition: mlp_gen.cc:266
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
void MLP_Out_T(type_pat *rrin)
Definition: mlp_gen.cc:115
#define LEARN
Definition: mlp_gen.h:36
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double dbl
Definition: mlp_gen.h:12
int MLP_Train ( int *  ipat,
dbl err 
)

Definition at line 898 of file mlp_gen.cc.

References DeDwSum(), recoMuon::in, MLP_Out2(), NET, and PAT.

Referenced by MLP_Epoch().

899 {
900  int in;
901 
902 /* if(*ipat>=PAT.Npat[0]) return(1);*/
903  if(*ipat<0) return(2);
904 
905 /* MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
906  MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
907  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
908  {
909  *err += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
910  *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
911  PAT.Pond[0][*ipat];
912  }
913  DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);
914  return(0);
915 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int DeDwSum(type_pat *ans, dbl *out, int ipat)
Definition: mlp_gen.cc:1126
double dbl
Definition: mlp_gen.h:12
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:186
int NormalizeInputs ( )

Definition at line 3376 of file mlp_gen.cc.

References j, timingPdfMaker::mean, MLP_StatInputs(), NET, PAT, stat_::sigma, and STAT.

3377 {
3378  int j, ipat;
3379  dbl *mean, *sigma, *minimum, *maximum;
3380 
3381 /* allocate memory */
3382  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3383  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3384  STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3385  STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3386  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3387  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3388 
3389  if(mean == 0 || sigma == 0 || minimum == 0
3390  || maximum == 0 || STAT.mean == 0 ||
3391  STAT.sigma == 0) return -111;
3392 
3393  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],
3394  mean,sigma,minimum,maximum);
3395 
3396  if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
3397  for(j=0;j<NET.Nneur[0];j++)
3398  {
3399  if(NET.Debug>=1)
3400  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3401  mean[j],sigma[j],minimum[j],maximum[j]);
3402 
3403 /* store mean and sigma for output function */
3404  STAT.mean[j] = mean[j];
3405  STAT.sigma[j] = sigma[j];
3406 
3407 /* finally apply the normalization */
3408  for(ipat=0;ipat<PAT.Npat[0];ipat++)
3409  {
3410  PAT.Rin[0][ipat][j] =
3411  (PAT.Rin[0][ipat][j]-(float) mean[j])/
3412  (float) sigma[j];
3413  }
3414  for(ipat=0;ipat<PAT.Npat[1];ipat++)
3415  {
3416  PAT.Rin[1][ipat][j] =
3417  (PAT.Rin[1][ipat][j]-(float) mean[j])/
3418  (float) sigma[j];
3419  }
3420  }
3421 
3422  free(mean);
3423  free(sigma);
3424  free(minimum);
3425  free(maximum);
3426  if(NET.Debug>=1) printf("\n");
3427  return 0;
3428 }
#define STAT
Definition: mlp_gen.h:60
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3273
int j
Definition: DBlmapReader.cc:9
double dbl
Definition: mlp_gen.h:12
void PrintWeights ( )

Definition at line 2166 of file mlp_gen.cc.

References i, and NET.

Referenced by MLP_Epoch().

2167 {
2168  int ilayer,ineur,i;
2169 
2170  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2171  {
2172  if(MessLang==1)
2173  {
2174  printf("Couche %d\n",ilayer);
2175  }
2176  else
2177  {
2178  printf("Layer %d\n",ilayer);
2179  }
2180  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2181  {
2182  if(MessLang==1)
2183  {
2184  printf("Neurone %d",ineur);
2185  }
2186  else
2187  {
2188  printf("Neuron %d",ineur);
2189  }
2190  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2191  {
2192  printf(" %f",
2193  (double) NET.Weights[ilayer][ineur][i]);
2194  }
2195  printf("\n");
2196  }
2197  printf("\n");
2198  }
2199 }
int i
Definition: DBlmapReader.cc:9
int MessLang
Definition: mlp_gen.cc:22
#define NET
Definition: mlp_gen.h:25
int ReadPatterns ( char *  filename,
int  ifile,
int *  inet,
int *  ilearn,
int *  iexamples 
)

Definition at line 2227 of file mlp_gen.cc.

References AllocPatterns(), CLEN, CountLexemes(), DIVERS, GetNetStructure(), getnLexemes(), i, compare_using_db::ifile, prof2calltree::l, LEARN, geometryCSVtoXML::line, MLP_SetNet(), NET, nin, NLMAX, nout, np, AlCaHLTBitMon_ParallelJobs::p, PAT, ReadPatterns(), alignCSCRings::s, and indexGen::s2.

Referenced by ReadPatterns().

2229 {
2230 char s[CLEN], s2[CLEN], cc[6], cc2[6];
2231 char otherfile[CLEN];
2232 double p;
2233 //int line,i,j;
2234 int line,i;
2235 //int l,ll,ipat,nmax,il,in,tf;
2236 int l,ll,ipat,nmax;
2237 int np=0; /* nombre d'exemples */
2238 int nin=0; /* nombre d'entrees */
2239 int nout=0; /* nombre de sorties */
2240 int npon=0;
2241 int ntot, ierr;
2242 //char **ss;
2243 char **ss=0;
2244 FILE *LVQpat;
2245 int nlayer, nneur[NLMAX];
2246 
2247 printf("\nLoading file %s\n",filename);
2248 LVQpat=fopen(filename,"r");
2249 if(LVQpat == 0) return -1;
2250 
2251 line=0;
2252 
2253 while(fgets(s,CLEN,LVQpat))
2254  {
2255  if(*s=='N')
2256  {
2257  if(*(s+1)=='N') /* NNEU */
2258  {
2259  printf("Number of neurons %s",s);
2260  *inet = 1;
2261  sscanf(s,"%s %s",cc,s2);
2262  ierr = GetNetStructure(s2,&nlayer,nneur);
2263  if(ierr != 0) return ierr;
2264  ierr = MLP_SetNet(&nlayer,nneur);
2265  if(ierr != 0) return ierr;
2266  }
2267  else
2268  {
2269  sscanf(s,"%s %d",cc,&l);
2270  if(*(cc+1)=='P') /* NPAT */
2271  {
2272  np=l;
2273  printf("Number of patterns %d\n",np);
2274  }
2275  else if(*(cc+1)=='I') /* NINP */
2276  {
2277  nin=l;
2278  PAT.Nin = nin;
2279  printf("Number of inputs %d\n",nin);
2280  }
2281  else if(*(cc+1)=='O' && *(cc+2)=='U') /* NOUT */
2282  {
2283  nout=l;
2284  PAT.Nout = nout;
2285  printf("Number of outputs %d\n",nout);
2286  }
2287  else if(*(cc+1)=='O' && *(cc+2)=='R') /* NORM */
2288  {
2289  DIVERS.Norm=l;
2290  if(l==1) printf("Normalize inputs\n");
2291  }
2292 /* obsolete datacard NLAY */
2293  else if(*(cc+1)=='L')
2294  {
2295  printf("NLAY datacard is no longer needed\n");
2296  }
2297  else if(*(cc+1)=='E') /* NEPO */
2298  {
2299  LEARN.Nepoch=l;
2300  printf("Number of epochs %d\n",l);
2301  }
2302  else if(*(cc+1)=='R') /* NRES */
2303  {
2304  LEARN.Nreset=l;
2305  printf(
2306  "Reset to steepest descent every %d epochs\n",
2307  l);
2308  }
2309  }
2310  }
2311  else if(*s=='L')
2312  {
2313  if(*(s+1)=='P') /* LPAR */
2314  {
2315  sscanf(s,"%s %le",cc,&p);
2316  printf("Learning parameter %f\n",p);
2317  LEARN.eta = (dbl) p;
2318  }
2319  else if(*(s+1)=='M') /* LMET */
2320  {
2321  *ilearn = 1;
2322  sscanf(s,"%s %d",cc,&(LEARN.Meth));
2323  printf("Learning method = ");
2324  switch(LEARN.Meth)
2325  {
2326  case 1: printf("Stochastic Minimization\n");
2327  break;
2328  case 2: printf("Steepest descent with fixed step\n");
2329  break;
2330  case 3: printf("Steepest descent with line search\n"); break;
2331  case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
2332  case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
2333  break;
2334  case 6: printf("BFGS\n");
2335  break;
2336  case 7: printf("Hybrid BFGS-linear\n");
2337  break;
2338  default: printf("Error: unknown method\n"); break;
2339  }
2340 
2341  }
2342  else if(*(s+1)=='T') /* LTAU */
2343  {
2344  sscanf(s,"%s %lf",cc,&p);
2345  printf("Tau %f\n",p);
2346  LEARN.Tau = (dbl) p;
2347  }
2348  else if(*(s+1)=='A') /* LAMB */
2349  {
2350  sscanf(s,"%s %lf",cc,&p);
2351  printf("Lambda %f\n",p);
2352  LEARN.Lambda = (dbl) p;
2353  }
2354  }
2355  else if(*s=='F')
2356  {
2357  if(*(s+1)=='S') /* FSPO */
2358  {
2359  sscanf(s,"%s %le",cc,&p);
2360  printf("Flat spot elimination parameter %f\n",p);
2361  LEARN.delta = (dbl) p;
2362  }
2363  else if(*(s+1)=='I') /* FILE */
2364  {
2365  sscanf(s,"%s %s",cc,otherfile);
2366  ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
2367  if(ierr != 0) return ierr;
2368  }
2369  }
2370  else if(*s=='M') /* momentum */
2371  {
2372  sscanf(s,"%s %le",cc,&p);
2373  printf("Momentum term %f\n",p);
2374  LEARN.epsilon = (dbl) p;
2375  }
2376  else if(*s=='O') /* OUTx */
2377  {
2378  if(*(s+3)=='W') /* OUTW */
2379  {
2380  sscanf(s,"%s %d",cc,&OutputWeights);
2381  if(OutputWeights == 0)
2382  {
2383  printf("Never write file weights.out\n");
2384  }
2385  else if(OutputWeights == -1)
2386  {
2387  printf("Write weights to output file at the end\n");
2388  }
2389  else
2390  {
2391  printf("Write weights to file every %d epochs\n",
2392  OutputWeights);
2393  }
2394  }
2395  else if(*(s+3)=='F') /* OUTF */
2396  {
2397  sscanf(s,"%s %s",cc,cc2);
2398  if(*cc2=='F' || *cc2=='C')
2399  {
2400  DIVERS.Outf = *cc2;
2401  }
2402  else
2403  {
2404  printf(" *** Error while loading file %s at line %s :",
2405  filename,s);
2406  printf(" unknown language\n");
2407  }
2408  }
2409  else
2410  {
2411  printf(" *** Error while loading file %s at line %s\n",
2412  filename,s);
2413  }
2414  }
2415  else if(*s=='R') /* RDWT */
2416  {
2417  sscanf(s,"%s %d",cc,&(NET.Rdwt));
2418  if(NET.Rdwt == 0)
2419  {
2420  printf("Random weights \n");
2421  }
2422  else
2423  {
2424  printf("Read weights from file weights.in\n");
2425  }
2426  }
2427  else if(*s=='S') /* STAT */
2428  {
2429  sscanf(s,"%s %d",cc,&(DIVERS.Stat));
2430  }
2431 /* else if(*s=='T') TFUN
2432  {
2433  sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
2434  SetTransFunc(il,in,tf);
2435  } */
2436  else if(*s=='H') /* HESS */
2437  {
2438  sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
2439  }
2440  else if(*s=='D')
2441  {
2442  if(*(s+1)=='C') /* DCAY */
2443  {
2444  sscanf(s,"%s %le",cc,&p);
2445  LEARN.Decay = p;
2446  printf("Learning parameter decay %f\n",
2447  (double) LEARN.Decay);
2448  }
2449  if(*(s+1)=='B') /* DBIN */
2450  {
2451  sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
2452  printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
2453  }
2454  if(*(s+1)=='E') /* DEBU */
2455  {
2456  sscanf(s,"%s %d",cc,&(NET.Debug));
2457  printf("Debug mode %d\n",NET.Debug);
2458  }
2459  }
2460  else if(*s=='P') /* POND */
2461  {
2462  npon = CountLexemes(s);
2463  if(npon==2)
2464  {
2465  sscanf(s,"%s %d",cc,&(PAT.Iponde));
2466  }
2467  else
2468  {
2469  ss = (char**) malloc((npon+1)*sizeof(char*));
2470  for(i=0;i<=npon;i++)
2471  ss[i]=(char*) malloc(40*sizeof(char));
2472  getnLexemes(npon,s,ss);
2473  sscanf(ss[1],"%d",&(PAT.Iponde));
2474  for(i=2;i<npon;i++)
2475  {
2476  sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
2477  }
2478  }
2479  if(PAT.Iponde==0)
2480  {
2481  npon = 0;
2482  }
2483  else
2484  {
2485  npon = 1;
2486  }
2487  }
2488  else if(*s=='#') /* comments */
2489  {
2490  }
2491  else /* exemple itself */
2492  {
2493  if(np==0) return 1;
2494  if(nin==0) return 2;
2495  if(nout==0) return 3;
2496 
2497 
2498 /* store number of exemples and allocate memory*/
2499  if(line==0)
2500  {
2501  PAT.Npat[ifile] = np;
2502  ierr = AllocPatterns(ifile,np,nin,nout,0);
2503  if(ierr != 0) return ierr;
2504  *iexamples = 1;
2505  }
2506 
2507 /* now get exemple */
2508 
2509  line++;
2510  ll = (line-1)%2;
2511  ipat = (line-1)/2;
2512  /* printf("Loading event \t %d\r",ipat);*/
2513 /* if(ipat>NPMAX)
2514  {
2515  printf("Too many examples in file\n");
2516  printf("Loading %d examples\n",NPMAX);
2517  PAT.Npat[ifile] = NPMAX;
2518  break;
2519  }
2520 */
2521 
2522 /* allocate the number of lines */
2523 
2524  if(line==1)
2525  {
2526 
2527  nmax = nin;
2528  if(nout>nin) nmax=nout;
2529  ss = (char**) malloc((nmax+1)*sizeof(char*));
2530  if(ss == 0) return -111;
2531  for(i=0;i<=nmax;i++)
2532  {
2533  ss[i]=(char*) malloc(40*sizeof(char));
2534  if(ss[i] == 0) return -111;
2535  }
2536  }
2537 
2538  if(ll==0) /* inputs */
2539  {
2540  getnLexemes(nin,s,ss);
2541  for(i=0;i<nin;i++)
2542  {
2543  sscanf(ss[i],"%le",&p);
2544  PAT.Rin[ifile][ipat][i] = (type_pat) p;
2545  }
2546  }
2547  else /* answers */
2548  {
2549  ntot=nout+npon;
2550  getnLexemes(ntot,s,ss);
2551  for(i=0;i<ntot;i++)
2552  {
2553  sscanf(ss[i],"%le",&p);
2554  if(i<nout)
2555  {
2556  PAT.Rans[ifile][ipat][i] = (type_pat) p;
2557  }
2558  else
2559  {
2560  if(PAT.Iponde==1)
2561  {
2562  PAT.Pond[ifile][ipat] =
2563  (type_pat) p;
2564  }
2565  else
2566  {
2567  PAT.Pond[ifile][ipat] =
2568  (type_pat) PAT.Ponds[(int) p -1];
2569  }
2570  }
2571  }
2572  }
2573  }
2574  }
2575  printf("%d examples loaded \n\n",PAT.Npat[ifile]);
2576  fclose(LVQpat);
2577  return 0;
2578 }
int i
Definition: DBlmapReader.cc:9
void getnLexemes(int n, char *s, char **ss)
Definition: mlp_gen.cc:2599
int ReadPatterns(char *filename, int ifile, int *inet, int *ilearn, int *iexamples)
Definition: mlp_gen.cc:2227
int OutputWeights
Definition: mlp_gen.cc:23
#define NLMAX
Definition: mlp_gen.cc:14
int MLP_SetNet(int *nl, int *nn)
Definition: mlp_gen.cc:3635
#define PAT
Definition: mlp_gen.h:45
tuple s2
Definition: indexGen.py:106
int nin
#define NET
Definition: mlp_gen.h:25
int np
Definition: AMPTWrapper.h:33
int GetNetStructure(char *s, int *Nlayer, int *Nneur)
Definition: mlp_gen.cc:3588
double type_pat
Definition: mlp_gen.h:13
int CountLexemes(char *s)
Definition: mlp_gen.cc:2583
int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
Definition: mlp_gen.cc:3075
#define DIVERS
Definition: mlp_gen.h:54
#define LEARN
Definition: mlp_gen.h:36
int nout
tuple filename
Definition: lut2db_cfg.py:20
double dbl
Definition: mlp_gen.h:12
#define CLEN
Definition: mlp_gen.cc:2224
int SaveWeights ( char *  filename,
int  iepoch 
)

Definition at line 2970 of file mlp_gen.cc.

References i, and NET.

Referenced by PhysicsTools::MLP::save().

2971 {
2972  FILE *W;
2973  int ilayer,ineur,i;
2974 
2975  W=fopen(filename,"w");
2976  if(W==0) return -1;
2977 
2978  fprintf(W,"# network structure ");
2979  for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
2980  {
2981  fprintf(W,"%d ",NET.Nneur[ilayer]);
2982  }
2983 
2984  fprintf(W,"\n %d\n",iepoch);
2985  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2986  {
2987  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2988  {
2989  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2990  {
2991  fprintf(W," %1.15e\n",
2992  (double) NET.Weights[ilayer][ineur][i]);
2993  }
2994  }
2995  }
2996  fclose(W);
2997  return 0;
2998 }
int i
Definition: DBlmapReader.cc:9
#define NET
Definition: mlp_gen.h:25
tuple filename
Definition: lut2db_cfg.py:20
void SetDefaultFuncs ( )

Definition at line 1226 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by MLP_SetNet().

1227 {
1228  int il,in;
1229  for(il=0; il<NET.Nlayer; il++) {
1230  for(in=0; in<NET.Nneur[il]; in++) {
1231  NET.T_func[il][in] = 2;
1232  if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
1233  }
1234  }
1235 
1236 }
#define NET
Definition: mlp_gen.h:25
void SetLambda ( double  Wmax)

Definition at line 1947 of file mlp_gen.cc.

References LEARN, MLP_Test(), and NET.

Referenced by MLP_Epoch().

1948 {
1949  dbl err;
1950  err = MLP_Test(0,0);
1951  LEARN.Alambda =
1952  LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
1953 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:449
int SetTransFunc ( int  layer,
int  neuron,
int  func 
)

Definition at line 1203 of file mlp_gen.cc.

References NET, and NLMAX.

1205 {
1206  if(layer>NLMAX) return(1);
1207 /* if(neuron>NMAX) return(2);*/
1208 
1209  NET.T_func[layer-1][neuron-1] = func;
1210 
1211  return(0);
1212 }
#define NLMAX
Definition: mlp_gen.cc:14
#define NET
Definition: mlp_gen.h:25
int ShuffleExamples ( int  n,
int *  index 
)

Definition at line 2100 of file mlp_gen.cc.

References a, i, cuy::ii, MLP_Rand(), and n.

Referenced by MLP_Epoch(), and MLP_Stochastic().

2101 {
2102  int i,ii,itmp;
2103  dbl a = (dbl) (n-1);
2104 
2105  for(i=0;i<n;i++)
2106  {
2107  ii = (int) MLP_Rand(0.,a);
2108  itmp = index[ii];
2109  index[ii] = index[i];
2110  index[i] = itmp;
2111  }
2112  return 0;
2113 }
int i
Definition: DBlmapReader.cc:9
int ii
Definition: cuy.py:588
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2129
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void SteepestDir ( )

Definition at line 1247 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1248 {
1249  int il,in,jn;
1250  for(il=1; il<NET.Nlayer; il++)
1251  for(in=0; in<NET.Nneur[il]; in++)
1252  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1253  dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
1254 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl *** dir
Definition: mlp_gen.cc:35
int StochStep ( )

Definition at line 968 of file mlp_gen.cc.

References eta(), recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

969 {
970  int il, in1, in;
971  dbl eta, eps, epseta;
972 
973  eta = -LEARN.eta;
974  eps = LEARN.epsilon;
975  epseta = eps/eta;
976  for(il=NET.Nlayer-1; il>0; il--) {
977  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
978 
979  /* compute delta weights */
980  for(in=0; in<NET.Nneur[il]; in++) {
981  LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
982  + epseta * LEARN.Odw[il][in][in1]);
983  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
984  }
985 
986  }
987  }
988 
989  return(0);
990 }
T eta() const
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
int StochStepHyb ( )

Definition at line 929 of file mlp_gen.cc.

References eta(), recoMuon::in, LEARN, MLP_ResLin(), and NET.

930 {
931  int il, in1, in;
932  dbl eta, eps;
933 
934  eta = LEARN.eta;
935  eps = LEARN.epsilon;
936  for(il=NET.Nlayer-2; il>0; il--) {
937 
938  for(in=0; in<NET.Nneur[il]; in++) {
939 
940  /* compute delta weights */
941  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
942  LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
943  + eps * LEARN.Odw[il][in][in1];
944  }
945 
946  /* update weights */
947  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
948  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
949  }
950  }
951  }
952  MLP_ResLin();
953  return(0);
954 }
T eta() const
#define NET
Definition: mlp_gen.h:25
void MLP_ResLin()
Definition: mlp_gen.cc:1969
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12

Variable Documentation

dbl** Hessian MLP_HIDDEN

Definition at line 62 of file mlp_gen.h.