CMS 3D CMS Logo

Classes | Macros | Typedefs | Functions | Variables
mlp_gen.h File Reference

Go to the source code of this file.

Classes

struct  divers_
 
struct  learn_
 
struct  net_
 
struct  pat_
 
struct  stat_
 

Macros

#define DIVERS   divers_
 
#define LEARN   learn_
 
#define NET   net_
 
#define PAT   pat_
 
#define STAT   stat_
 

Typedefs

typedef double dbl
 
typedef double type_pat
 

Functions

int AllocNetwork (int Nlayer, int *Neurons) MLP_HIDDEN
 
int AllocPatterns (int ifile, int npat, int nin, int nout, int iadd) MLP_HIDDEN
 
void AllocWeights () MLP_HIDDEN
 
void BFGSdir (int Nweights) MLP_HIDDEN
 
void CGDir (dbl beta) MLP_HIDDEN
 
int CountLexemes (char *string) MLP_HIDDEN
 
int DecreaseSearch (dbl *alpmin, int *Ntest, dbl Err0) MLP_HIDDEN
 
dbl DeDwNorm () MLP_HIDDEN
 
dbl DeDwProd () MLP_HIDDEN
 
void DeDwSave () MLP_HIDDEN
 
void DeDwSaveZero () MLP_HIDDEN
 
void DeDwScale (int Nexamples) MLP_HIDDEN
 
int DeDwSum (type_pat *ans, dbl *out, int ipat) MLP_HIDDEN
 
void DeDwZero () MLP_HIDDEN
 
dbl DerivDir () MLP_HIDDEN
 
void EtaDecay () MLP_HIDDEN
 
int FixedStep (dbl alpha) MLP_HIDDEN
 
void FreeNetwork () MLP_HIDDEN
 
int FreePatterns (int ifile) MLP_HIDDEN
 
void FreeWeights () MLP_HIDDEN
 
int GetBFGSH (int Nweights) MLP_HIDDEN
 
void GetGammaDelta () MLP_HIDDEN
 
void getLexemes (char *s, char **ss) MLP_HIDDEN
 
int GetNetStructure (char *s, int *Nlayer, int *Nneur) MLP_HIDDEN
 
void getnLexemes (int n, char *s, char **ss) MLP_HIDDEN
 
void InitBFGSH (int Nweights) MLP_HIDDEN
 
void InitWeights () MLP_HIDDEN
 
int LearnAlloc () MLP_HIDDEN
 
void LearnFree () MLP_HIDDEN
 
int LineSearch (dbl *alpmin, int *Ntest, dbl Err0) MLP_HIDDEN
 
int LineSearchHyb (dbl *alpmin, int *Ntest) MLP_HIDDEN
 
int LoadWeights (char *filename, int *iepoch) MLP_HIDDEN
 
dbl MLP_Epoch (int iepoch, dbl *alpmin, int *ntest) MLP_HIDDEN
 
void MLP_Line (dbl ***w0, dbl alpha) MLP_HIDDEN
 
void MLP_LineHyb (dbl ***w0, dbl alpha) MLP_HIDDEN
 
void MLP_MatrixVector (dbl *M, type_pat *v, dbl *r, int n, int m) MLP_HIDDEN
 
void MLP_MatrixVectorBias (dbl *M, dbl *v, dbl *r, int n, int m) MLP_HIDDEN
 
void MLP_MM2rows (dbl *c, type_pat *a, dbl *b, int Ni, int Nj, int Nk, int NaOffs, int NbOffs) MLP_HIDDEN
 
void MLP_Out (type_pat *rrin, dbl *rrout) MLP_HIDDEN
 
void MLP_Out2 (type_pat *rrin) MLP_HIDDEN
 
void MLP_Out_T (type_pat *rrin) MLP_HIDDEN
 
int MLP_PrCFun (char *filename) MLP_HIDDEN
 
int MLP_PrFFun (char *filename) MLP_HIDDEN
 
int MLP_PrintInputStat () MLP_HIDDEN
 
double MLP_Rand (dbl min, dbl max) MLP_HIDDEN
 
void MLP_ResLin () MLP_HIDDEN
 
int MLP_SetNet (int *nl, int *nn) MLP_HIDDEN
 
int MLP_StatInputs (int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum) MLP_HIDDEN
 
dbl MLP_Stochastic () MLP_HIDDEN
 
dbl MLP_Test (int ifile, int regul) MLP_HIDDEN
 
int MLP_Train (int *ipat, dbl *err) MLP_HIDDEN
 
int NormalizeInputs () MLP_HIDDEN
 
void PrintWeights () MLP_HIDDEN
 
int ReadPatterns (char *filename, int ifile, int *inet, int *ilearn, int *iexamples) MLP_HIDDEN
 
int SaveWeights (char *filename, int iepoch) MLP_HIDDEN
 
void SetDefaultFuncs () MLP_HIDDEN
 
void SetLambda (double Wmax) MLP_HIDDEN
 
int SetTransFunc (int layer, int neuron, int func) MLP_HIDDEN
 
int ShuffleExamples (int n, int *index) MLP_HIDDEN
 
void SteepestDir () MLP_HIDDEN
 
int StochStep () MLP_HIDDEN
 
int StochStepHyb () MLP_HIDDEN
 

Variables

struct net_ MLP_HIDDEN
 

Macro Definition Documentation

#define DIVERS   divers_

Definition at line 54 of file mlp_gen.h.

Referenced by MLP_PrCFun(), MLP_PrFFun(), and ReadPatterns().

#define LEARN   learn_
#define NET   net_
#define PAT   pat_
#define STAT   stat_

Definition at line 60 of file mlp_gen.h.

Referenced by MLP_PrCFun(), MLP_PrFFun(), and NormalizeInputs().

Typedef Documentation

typedef double dbl

Definition at line 12 of file mlp_gen.h.

typedef double type_pat

Definition at line 13 of file mlp_gen.h.

Function Documentation

int AllocNetwork ( int  Nlayer,
int *  Neurons 
)

Definition at line 3451 of file mlp_gen.cc.

References FreeNetwork(), mps_fire::i, gen::k, checklumidiff::l, LEARN, and NET.

Referenced by MLP_SetNet().

3452 {
3453  int i, j, k, l;
3454 
3455  if(NetMemory != 0) FreeNetwork();
3456  NetMemory = 1;
3457 
3458  NET.Nneur = (int *) malloc(Nlayer*sizeof(int));
3459  if(NET.Nneur == nullptr) return -111;
3460 
3461  NET.T_func = (int **) malloc(Nlayer*sizeof(int *));
3462  NET.Deriv1 = (dbl **) malloc(Nlayer*sizeof(dbl *));
3463  NET.Inn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3464  NET.Outn = (dbl **) malloc(Nlayer*sizeof(dbl *));
3465  NET.Delta = (dbl **) malloc(Nlayer*sizeof(dbl *));
3466  if(NET.T_func == nullptr || NET.Deriv1 == nullptr
3467  || NET.Inn == nullptr || NET.Outn == nullptr
3468  || NET.Delta == nullptr) return -111;
3469 
3470  for(i=0; i<Nlayer; i++)
3471  {
3472  NET.T_func[i] = (int *) malloc(Neurons[i]*sizeof(int));
3473  NET.Deriv1[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3474  NET.Inn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3475  NET.Outn[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3476  NET.Delta[i] = (dbl *) malloc(Neurons[i]*sizeof(dbl));
3477  if(NET.T_func[i] == nullptr || NET.Deriv1[i] == nullptr
3478  || NET.Inn[i] == nullptr || NET.Outn[i] == nullptr
3479  || NET.Delta[i] ==nullptr ) return -111;
3480  }
3481 
3482  NET.Weights = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3483  NET.vWeights = (dbl **) malloc(Nlayer*sizeof(dbl *));
3484  LEARN.Odw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3485  LEARN.ODeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3486  LEARN.DeDw = (dbl ***) malloc(Nlayer*sizeof(dbl **));
3487  if(NET.Weights == nullptr || NET.vWeights == nullptr
3488  || LEARN.Odw == nullptr || LEARN.ODeDw == nullptr
3489  || LEARN.DeDw == nullptr) return -111;
3490 
3491  for(i=1; i<Nlayer; i++)
3492  {
3493  k = Neurons[i-1]+1;
3494  NET.vWeights[i] = (dbl *) malloc(k * Neurons[i] *
3495  sizeof(dbl));
3496  NET.Weights[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3497  LEARN.Odw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3498  LEARN.ODeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3499  LEARN.DeDw[i] = (dbl **) malloc(Neurons[i]*sizeof(dbl *));
3500  if(NET.Weights[i] == nullptr || NET.vWeights[i] == nullptr
3501  || LEARN.Odw[i] == nullptr || LEARN.ODeDw[i] == nullptr
3502  || LEARN.DeDw[i] == nullptr) return -111;
3503 
3504  for(j=0; j<Neurons[i]; j++)
3505  {
3506  NET.Weights[i][j] = &(NET.vWeights[i][j*k]);
3507  LEARN.Odw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3508  LEARN.ODeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3509  LEARN.DeDw[i][j] = (dbl *) malloc(k*sizeof(dbl));
3510  if(LEARN.Odw[i][j] == nullptr
3511  || LEARN.ODeDw[i][j] == nullptr
3512  || LEARN.DeDw[i][j] == nullptr) return -111;
3513 
3514  for(l=0; l<k; l++)
3515  {
3516  LEARN.Odw[i][j][l] = 0;
3517  LEARN.ODeDw[i][j][l] = 0;
3518  }
3519  }
3520  }
3521  return 0;
3522 }
#define NET
Definition: mlp_gen.h:25
int NetMemory
Definition: mlp_gen.cc:30
int k[5][pyjets_maxn]
#define LEARN
Definition: mlp_gen.h:36
void FreeNetwork()
Definition: mlp_gen.cc:3533
double dbl
Definition: mlp_gen.h:12
int AllocPatterns ( int  ifile,
int  npat,
int  nin,
int  nout,
int  iadd 
)

Definition at line 3080 of file mlp_gen.cc.

References FreePatterns(), compare_using_db::ifile, PAT, and tmp.

Referenced by PhysicsTools::MLP::init(), and ReadPatterns().

3081 {
3082  int j;
3083  type_pat *tmp, *tmp3;
3084  type_pat **tmp2;
3085  int ntot;
3086 
3087  if(ifile>1 || ifile<0) return(1);
3088 /* scanf("%d",&j); */
3089  if(ExamplesMemory==0)
3090  {
3091  ExamplesMemory=1;
3092  PAT.Pond = (type_pat **) malloc(2*sizeof(dbl*));
3093  PAT.Rin = (type_pat***) malloc(2*sizeof(type_pat**));
3094  PAT.Rans = (type_pat***) malloc(2*sizeof(type_pat**));
3095  PAT.vRin = (type_pat**) malloc(2*sizeof(type_pat*));
3096  if(PAT.Pond == nullptr || PAT.Rin == nullptr
3097  || PAT.Rans == nullptr || PAT.vRin == nullptr) return -111;
3098  }
3099 
3100 
3101 /* if iadd=0, check that memory not already allocated. Otherwise free it */
3102  if(iadd==0 && PatMemory[ifile]!=0)
3103  {
3105  }
3106 
3107 /* allocate memory and initialize ponderations */
3108  if(iadd==0 || PatMemory[ifile]==0)
3109  {
3110  PatMemory[ifile] = 1;
3111  PAT.Pond[ifile] = (type_pat*) malloc(npat*sizeof(type_pat));
3112  if(PAT.Pond[ifile] == nullptr) return -111;
3113  for(j=0; j<npat; j++)
3114  PAT.Pond[ifile][j] = 1;
3115 
3116  PAT.Rin[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3117  if(PAT.Rin[ifile] == nullptr) return -111;
3118  PAT.Rans[ifile] = (type_pat**) malloc(npat*sizeof(type_pat*));
3119  if(PAT.Rans[ifile] == nullptr) return -111;
3120 
3121  PAT.vRin[ifile] = (type_pat *) malloc(npat*(nin+1)*
3122  sizeof(type_pat));
3123  if(PAT.vRin[ifile] == nullptr) return -111;
3124 
3125  for(j=0; j<npat; j++)
3126  {
3127  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3128  PAT.vRin[ifile][j*(nin+1)] = 1;
3129  }
3130  for(j=0; j<npat; j++)
3131  {
3132  PAT.Rans[ifile][j] = (type_pat*) malloc(nout*sizeof(type_pat));
3133  if(PAT.Rans[ifile][j] == nullptr) return -111;
3134  }
3135  PAT.Npat[ifile] = npat;
3136 
3137  if(ifile==0)
3138  {
3139  ExamplesIndex = (int *) malloc(npat*sizeof(int));
3140  if(ExamplesIndex == nullptr) return -111;
3141  for(j=0; j<npat; j++) ExamplesIndex[j] = j;
3142  }
3143  }
3144  else /* add examples */
3145  {
3146  ntot = PAT.Npat[ifile]+npat;
3147 
3148 /* event weighting */
3149  tmp = (type_pat *) malloc(ntot*sizeof(type_pat));
3150  if(tmp == nullptr) return -111;
3151 
3152  for(j=0; j<PAT.Npat[ifile]; j++)
3153  {
3154  tmp[j] = PAT.Pond[ifile][j];
3155  }
3156  for(j=PAT.Npat[ifile];j<ntot;j++)
3157  {
3158  tmp[j] = 1;
3159  }
3160  if(PatMemory[ifile]==1) free(PAT.Pond[ifile]);
3161  PAT.Pond[ifile] = tmp;
3162 
3163 /* examples */
3164 /* tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3165  for(j=0; j<PAT.Npat[ifile]; j++)
3166  {
3167  tmp2[j] = PAT.Rin[ifile][j];
3168  }
3169  for(j=PAT.Npat[ifile];j<ntot;j++)
3170  {
3171  tmp2[j] = (type_pat*) malloc(nin*sizeof(type_pat));
3172  }
3173  if(PatMemory[ifile]==1) free(PAT.Rin[ifile]);
3174  PAT.Rin[ifile] = tmp2; */
3175 
3176  tmp3 = (type_pat *) malloc(ntot*(nin+1)*sizeof(type_pat));
3177  if(tmp3 == nullptr) return -111;
3178 
3179  for(j=0; j<PAT.Npat[ifile]*(nin+1); j++)
3180  {
3181  tmp3[j] = PAT.vRin[ifile][j];
3182  }
3183  if(PatMemory[ifile]==1) free(PAT.vRin[ifile]);
3184  PAT.vRin[ifile] = tmp3;
3185  for(j=0; j<ntot; j++)
3186  {
3187  PAT.Rin[ifile][j] = &(PAT.vRin[ifile][j*(nin+1)+1]);
3188  PAT.vRin[ifile][j*(nin+1)] = 1;
3189  }
3190 
3191  tmp2 = (type_pat **) malloc(ntot*sizeof(type_pat*));
3192  if(tmp2 == nullptr) return -111;
3193  for(j=0; j<PAT.Npat[ifile]; j++)
3194  {
3195  tmp2[j] = PAT.Rans[ifile][j];
3196  }
3197  for(j=PAT.Npat[ifile];j<ntot;j++)
3198  {
3199  tmp2[j] = (type_pat*) malloc(nout*sizeof(type_pat));
3200  if(tmp2[j] == nullptr) return -111;
3201  }
3202  if(PatMemory[ifile]==1) free(PAT.Rans[ifile]);
3203  PAT.Rans[ifile] = tmp2;
3204  PAT.Npat[ifile] = ntot;
3205  PatMemory[ifile] = 1;
3206 
3207 /* indices */
3208  if(ifile==0)
3209  {
3210  free(ExamplesIndex);
3211  ExamplesIndex = (int *) malloc(ntot*sizeof(int));
3212  if(ExamplesIndex == nullptr) return -111;
3213  for(j=0; j<ntot; j++) ExamplesIndex[j] = j;
3214  }
3215  }
3216 
3217  return 0;
3218 }
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
int nin
double type_pat
Definition: mlp_gen.h:13
int PatMemory[2]
Definition: mlp_gen.cc:26
int ExamplesMemory
Definition: mlp_gen.cc:24
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int nout
double dbl
Definition: mlp_gen.h:12
int FreePatterns(int ifile)
Definition: mlp_gen.cc:3235
void AllocWeights ( )
void BFGSdir ( int  Nweights)

Definition at line 1340 of file mlp_gen.cc.

References g, mps_fire::i, recoMuon::in, GetRecoTauVFromDQM_MC_cff::kk, LEARN, NET, and alignCSCRings::s.

Referenced by MLP_Epoch().

1341 {
1342  dbl *g, *s;
1343  int kk=0;
1344  int il,i,j,in,jn;
1345 
1346  g = (dbl*) malloc(NET.Nweights*sizeof(dbl));
1347  s = (dbl*) malloc(Nweights*sizeof(dbl));
1348 
1349  for(il=1; kk<Nweights; il++)
1350  for(in=0; in<NET.Nneur[il]; in++)
1351  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1352  {
1353  g[kk] = LEARN.DeDw[il][in][jn];
1354  kk++;
1355  }
1356  for(i=0; i<Nweights; i++)
1357  {
1358  s[i] = 0;
1359  for(j=0; j<Nweights; j++)
1360  {
1361  s[i] += BFGSH[i][j] * g[j];
1362  }
1363  }
1364 
1365  kk = 0;
1366  for(il=1; kk<Nweights; il++)
1367  for(in=0; in<NET.Nneur[il]; in++)
1368  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1369  {
1370  dir[il][in][jn] = -s[kk];
1371  kk++;
1372  }
1373  free(g);
1374  free(s);
1375 }
The Signals That Services Can Subscribe To This is based on ActivityRegistry and is current per Services can connect to the signals distributed by the ActivityRegistry in order to monitor the activity of the application Each possible callback has some defined which we here list in angle e g
Definition: Activities.doc:4
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void CGDir ( dbl  beta)

Definition at line 1266 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1267 {
1268  int il,in,jn;
1269  for(il=1; il<NET.Nlayer; il++)
1270  for(in=0; in<NET.Nneur[il]; in++)
1271  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1272  {
1273  dir[il][in][jn] = -LEARN.DeDw[il][in][jn]+
1274  beta*dir[il][in][jn];
1275  }
1276 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl *** dir
Definition: mlp_gen.cc:35
int CountLexemes ( char *  string)

Definition at line 2585 of file mlp_gen.cc.

References mps_fire::i, and tmp.

Referenced by getLexemes(), and ReadPatterns().

2586 {
2587  char tmp[1024];
2588  int i=0;
2589 
2590  strcpy(tmp,s);
2591  char* saveptr;
2592  if (strtok_r(tmp," ",&saveptr))
2593  {
2594  i=1;
2595  while (strtok_r(nullptr," ",&saveptr)) i++;
2596  }
2597  return i;
2598 }
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int DecreaseSearch ( dbl alpmin,
int *  Ntest,
dbl  Err0 
)

Definition at line 1616 of file mlp_gen.cc.

References recoMuon::in, LEARN, MLP_Line(), MLP_Test(), NET, and metsig::tau.

1617 {
1618  dbl ***w0;
1619  dbl alpha2;
1620  dbl err1, err2;
1621  dbl tau;
1622  int icount, il, in, jn;
1623 
1624  tau=LEARN.Tau;
1625 
1626 /* store weights before line search */
1627 
1628  *Ntest = 0;
1629  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1630  for(il=1; il<NET.Nlayer; il++)
1631  {
1632  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1633  for(in=0; in<NET.Nneur[il]; in++)
1634  {
1635  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1636  sizeof(dbl));
1637  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1638  {
1639  w0[il][in][jn] = NET.Weights[il][in][jn];
1640  }
1641  }
1642  }
1643 
1644 /* compute error(w0) */
1645 
1646 /* err1 = MLP_Test(0,0);
1647  (*Ntest) ++;*/
1648  err1 = Err0;
1649 
1650  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1651 
1652  *alpmin = 0;
1653  alpha2 = 0.05;
1654  MLP_Line(w0,alpha2);
1655  err2 = MLP_Test(0,0);
1656  (*Ntest) ++;
1657 
1658  if(err2<err1)
1659  {
1660  *alpmin = alpha2;
1661  }
1662  else
1663  {
1664 
1665 
1666  for(icount=1;icount<=100;icount++)
1667  {
1668  alpha2 = alpha2/tau;
1669  MLP_Line(w0,alpha2);
1670  err2 = MLP_Test(0,0);
1671  (*Ntest) ++;
1672  if(err1>err2) break;
1673  }
1674  if(icount>=100) /* line search fails */
1675  {
1676  MLP_Line(w0,0); /* reset weights */
1677  free(w0);
1678  return(1);
1679  }
1680  *alpmin = alpha2;
1681  }
1682 
1683 /* set the weights */
1684  MLP_Line(w0,*alpmin);
1685 
1686 /* store weight changes */
1687  for(il=1; il<NET.Nlayer; il++)
1688  for(in=0; in<NET.Nneur[il]; in++)
1689  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1690  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1691  - w0[il][in][jn];
1692 
1693  for(il=1; il<NET.Nlayer; il++)
1694  for(in=0; in<NET.Nneur[il]; in++)
1695  free(w0[il][in]);
1696  for(il=1; il<NET.Nlayer; il++)
1697  free(w0[il]);
1698  free(w0);
1699 
1700  return(0);
1701 }
#define NET
Definition: mlp_gen.h:25
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
dbl DeDwNorm ( )

Definition at line 998 of file mlp_gen.cc.

References createTree::dd, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

999 {
1000  int il,in,jn;
1001  dbl dd=0;
1002  for(il=1; il<NET.Nlayer; il++)
1003  for(in=0; in<NET.Nneur[il]; in++)
1004  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1005  dd += LEARN.DeDw[il][in][jn]*
1006  LEARN.DeDw[il][in][jn];
1007  return(dd);
1008 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl DeDwProd ( )

Definition at line 1020 of file mlp_gen.cc.

References createTree::dd, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1021 {
1022  int il,in,jn;
1023  dbl dd=0;
1024  for(il=1; il<NET.Nlayer; il++)
1025  for(in=0; in<NET.Nneur[il]; in++)
1026  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1027  dd += LEARN.DeDw[il][in][jn]*
1028  LEARN.ODeDw[il][in][jn];
1029  return(dd);
1030 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
void DeDwSave ( )

Definition at line 1078 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

1079 {
1080  int il, in, jn;
1081  for(il=1; il<NET.Nlayer; il++)
1082  for(in=0; in<NET.Nneur[il]; in++)
1083  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1084  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1085 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
void DeDwSaveZero ( )

Definition at line 1097 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1098 {
1099  int il, in, jn;
1100  for(il=1; il<NET.Nlayer; il++)
1101  for(in=0; in<NET.Nneur[il]; in++)
1102  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1103  {
1104  LEARN.ODeDw[il][in][jn] = LEARN.DeDw[il][in][jn];
1105  LEARN.DeDw[il][in][jn] = 0;
1106  }
1107 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
void DeDwScale ( int  Nexamples)

Definition at line 1060 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1061 {
1062  int il, in, jn;
1063  for(il=1; il<NET.Nlayer; il++)
1064  for(in=0; in<NET.Nneur[il]; in++)
1065  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1066  LEARN.DeDw[il][in][jn] /= (dbl) Nexamples;
1067 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
int DeDwSum ( type_pat ans,
dbl out,
int  ipat 
)

Definition at line 1123 of file mlp_gen.cc.

References a, b, cuy::ii, recoMuon::in, LEARN, NET, and PAT.

Referenced by MLP_Train().

1124 {
1125  int il, in1, in, ii;
1126 /* dbl err[NMAX][4]; */
1127  dbl deriv;
1128  dbl *pout, *pdedw, *pdelta;
1129  dbl a, b;
1130 /* char buf[50];*/
1131 
1132 /* output layer */
1133  b = (dbl) PAT.Pond[0][ipat];
1134  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
1135  {
1136  deriv = NET.Deriv1[NET.Nlayer-1][in];
1137  NET.Delta[NET.Nlayer-1][in] =
1138  (out[in] - (dbl) ans[in])*deriv*b;
1139  }
1140 
1141  for(il=NET.Nlayer-2; il>0; il--)
1142  {
1143 
1144  for(in=0; in<NET.Nneur[il]; in++)
1145  {
1146  deriv = NET.Deriv1[il][in];
1147  a = NET.Delta[il+1][0] * NET.Weights[il+1][0][in+1];
1148  pdelta = &(NET.Delta[il+1][1]);
1149  for(in1=1; in1<NET.Nneur[il+1]; in1++, pdelta++)
1150  {
1151  a += *pdelta * NET.Weights[il+1][in1][in+1];
1152  }
1153  NET.Delta[il][in] = a * deriv;
1154  }
1155  }
1156 
1157  for(il=1; il<NET.Nlayer; il++)
1158  {
1159  ii = NET.Nneur[il-1];
1160  for(in=0; in<NET.Nneur[il]; in++)
1161  {
1162  a = NET.Delta[il][in];
1163  LEARN.DeDw[il][in][0] += a;
1164  LEARN.DeDw[il][in][1] += a * NET.Outn[il-1][0];
1165  pout = &(NET.Outn[il-1][1]);
1166  pdedw = &(LEARN.DeDw[il][in][2]);
1167  for(in1=1; in1<ii; ++in1, ++pout, ++pdedw)
1168  {
1169  (*pdedw) += a * (*pout);
1170  }
1171  }
1172  }
1173 
1174  return(0);
1175 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
ii
Definition: cuy.py:590
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void DeDwZero ( )

Definition at line 1041 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

1042 {
1043  int il, in, jn;
1044  for(il=1; il<NET.Nlayer; il++)
1045  for(in=0; in<NET.Nneur[il]; in++)
1046  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1047  LEARN.DeDw[il][in][jn] = 0;
1048 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl DerivDir ( )

Definition at line 1288 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1289 {
1290  int il,in,jn;
1291  dbl ddir = 0;
1292 
1293  for(il=1; il<NET.Nlayer; il++)
1294  for(in=0; in<NET.Nneur[il]; in++)
1295  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1296  {
1297  ddir += LEARN.DeDw[il][in][jn]*dir[il][in][jn];
1298  }
1299  return(ddir);
1300 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void EtaDecay ( )

Definition at line 2086 of file mlp_gen.cc.

References LEARN.

Referenced by MLP_Stochastic().

2087 {
2088  LEARN.eta *= LEARN.Decay;
2089 }
#define LEARN
Definition: mlp_gen.h:36
int FixedStep ( dbl  alpha)

Definition at line 1704 of file mlp_gen.cc.

References recoMuon::in, LEARN, MLP_Line(), and NET.

1705 {
1706  dbl ***w0;
1707  int il, in, jn;
1708 
1709  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1710  for(il=1; il<NET.Nlayer; il++)
1711  {
1712  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1713  for(in=0; in<NET.Nneur[il]; in++)
1714  {
1715  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1716  sizeof(dbl));
1717  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1718  {
1719  w0[il][in][jn] = NET.Weights[il][in][jn];
1720  }
1721  }
1722  }
1723 
1724 
1725 /* set the weights */
1726  MLP_Line(w0,alpha);
1727 
1728 /* store weight changes */
1729  for(il=1; il<NET.Nlayer; il++)
1730  for(in=0; in<NET.Nneur[il]; in++)
1731  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1732  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1733  - w0[il][in][jn];
1734 
1735  for(il=1; il<NET.Nlayer; il++)
1736  for(in=0; in<NET.Nneur[il]; in++)
1737  free(w0[il][in]);
1738  for(il=1; il<NET.Nlayer; il++)
1739  free(w0[il]);
1740  free(w0);
1741 
1742  return(0);
1743 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
void FreeNetwork ( )

Definition at line 3533 of file mlp_gen.cc.

References mps_fire::i, LEARN, and NET.

Referenced by AllocNetwork().

3534 {
3535  int i, j;
3536  for(i=1; i<NET.Nlayer; i++)
3537  {
3538  for(j=0; j<NET.Nneur[i]; j++)
3539  {
3540 /* free(NET.Weights[i][j]); */
3541  free(LEARN.Odw[i][j]);
3542  free(LEARN.ODeDw[i][j]);
3543  free(LEARN.DeDw[i][j]);
3544  }
3545  free(NET.vWeights[i]);
3546  free(NET.Weights[i]);
3547  free(LEARN.Odw[i]);
3548  free(LEARN.ODeDw[i]);
3549  free(LEARN.DeDw[i]);
3550  }
3551  free(NET.Weights);
3552  free(LEARN.Odw);
3553  free(LEARN.ODeDw);
3554  free(LEARN.DeDw);
3555 
3556  free(NET.Nneur);
3557 
3558  for(i=0; i<NET.Nlayer; i++)
3559  {
3560  free(NET.T_func[i]);
3561  free(NET.Deriv1[i]);
3562  free(NET.Inn[i]);
3563  free(NET.Outn[i]);
3564  free(NET.Delta[i]);
3565  }
3566  free(NET.T_func);
3567  free(NET.Deriv1);
3568  free(NET.Inn);
3569  free(NET.Outn);
3570  free(NET.Delta);
3571 
3572  NetMemory = 0;
3573 }
#define NET
Definition: mlp_gen.h:25
int NetMemory
Definition: mlp_gen.cc:30
#define LEARN
Definition: mlp_gen.h:36
int FreePatterns ( int  ifile)

Definition at line 3235 of file mlp_gen.cc.

References mps_fire::i, compare_using_db::ifile, and PAT.

Referenced by AllocPatterns(), and PhysicsTools::MLP::clear().

3236 {
3237  int i;
3238 
3239  if(ifile>1 || ifile<0) return 1;
3240 /* printf("%d %d \n",ifile,PatMemory[ifile]);*/
3241  if(PatMemory[ifile]==0) return 2;
3242 
3243  free(PAT.Pond[ifile]);
3244  for(i=0; i<PAT.Npat[ifile]; i++)
3245  {
3246 /* free(PAT.Rin[ifile][i]); */
3247  free(PAT.Rans[ifile][i]);
3248  }
3249  free(PAT.Rin[ifile]);
3250  free(PAT.Rans[ifile]);
3251  free(PAT.vRin[ifile]);
3252  PatMemory[ifile] = 0;
3253  PAT.Npat[ifile] = 0;
3254 
3255  return 0;
3256 }
#define PAT
Definition: mlp_gen.h:45
int PatMemory[2]
Definition: mlp_gen.cc:26
void FreeWeights ( )
int GetBFGSH ( int  Nweights)

Definition at line 1418 of file mlp_gen.cc.

References a, b, mps_fire::i, and tmp.

Referenced by MLP_Epoch().

1419 {
1420  typedef double dble;
1421  dble deltaTgamma=0;
1422  dble factor=0;
1423  dble *Hgamma;
1424  dble *tmp;
1425  dble a, b;
1426  int i,j;
1427 
1428  Hgamma = (dble *) malloc(Nweights*sizeof(dble));
1429  tmp = (dble *) malloc(Nweights*sizeof(dble));
1430 
1431  for(i=0; i<Nweights; i++)
1432  {
1433  deltaTgamma += (dble) delta[i] * (dble) Gamma[i];
1434  a = 0;
1435  b = 0;
1436  for(j=0; j<Nweights; j++)
1437  {
1438  a += (dble) BFGSH[i][j] * (dble) Gamma[j];
1439  b += (dble) Gamma[j] * (dble) BFGSH[j][i];
1440  }
1441  Hgamma[i] = a;
1442  tmp[i] = b;
1443  factor += (dble) Gamma[i]*Hgamma[i];
1444  }
1445  if(deltaTgamma == 0)
1446  {
1447  free(tmp);
1448  free(Hgamma);
1449  return 1;
1450  }
1451  a = 1 / deltaTgamma;
1452  factor = 1 + factor*a;
1453 
1454  for(i=0; i<Nweights; i++)
1455  {
1456  b = (dble) delta[i];
1457  for(j=0; j<Nweights; j++)
1458  BFGSH[i][j] += (dbl) (factor*b* (dble)
1459  delta[j]-(tmp[j]*b+Hgamma[i]*(dble)delta[j]))*a;
1460  }
1461  free(Hgamma);
1462  free(tmp);
1463  return 0;
1464 }
dbl * delta
Definition: mlp_gen.cc:36
dbl ** BFGSH
Definition: mlp_gen.cc:37
return((rh^lh)&mask)
double b
Definition: hdecay.h:120
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double a
Definition: hdecay.h:121
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
void GetGammaDelta ( )

Definition at line 1313 of file mlp_gen.cc.

References mps_fire::i, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1314 {
1315  int i=0;
1316  int il,in,jn;
1317  for(il=1; il<NET.Nlayer; il++)
1318  for(in=0; in<NET.Nneur[il]; in++)
1319  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1320  {
1321  Gamma[i] = LEARN.DeDw[il][in][jn]-
1322  LEARN.ODeDw[il][in][jn];
1323  delta[i] = LEARN.Odw[il][in][jn];
1324  i++;
1325  }
1326 }
dbl * delta
Definition: mlp_gen.cc:36
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl * Gamma
Definition: mlp_gen.cc:38
void getLexemes ( char *  s,
char **  ss 
)

Definition at line 2617 of file mlp_gen.cc.

References CountLexemes(), mps_fire::i, gen::n, and tmp.

2618 {
2619  char tmp[1024];
2620  int i,n;
2621 
2622  strcpy(tmp,s);
2623  n=CountLexemes(tmp);
2624  if (n>0)
2625  {
2626  char* saveptr;
2627  strcpy(ss[0],strtok_r(tmp," ",&saveptr));
2628  for (i=1;i<n;i++)
2629  strcpy(ss[i],strtok_r(nullptr," ",&saveptr));
2630  }
2631 }
int CountLexemes(char *s)
Definition: mlp_gen.cc:2585
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
int GetNetStructure ( char *  s,
int *  Nlayer,
int *  Nneur 
)

Definition at line 3594 of file mlp_gen.cc.

References mps_fire::i, NLMAX, and tmp.

Referenced by ReadPatterns().

3595 {
3596  int i=0;
3597  char tmp[1024];
3598 
3599  if(strlen(s)==0) return -1;
3600  if(strlen(s)>1024) return -2;
3601 
3602  strcpy(tmp,s);
3603  char* saveptr;
3604  if (strtok_r(tmp,",",&saveptr))
3605  {
3606  i=1;
3607  while (strtok_r(nullptr,",",&saveptr)) i++;
3608  }
3609  *Nlayer = i;
3610  if(i > NLMAX) return -3;
3611 
3612  strcpy(tmp,s);
3613  if (*Nlayer>0)
3614  {
3615  sscanf(strtok_r(tmp,",",&saveptr),"%d",&(Nneur[0]));
3616  for (i=1;i<*Nlayer;i++)
3617  sscanf(strtok_r(nullptr,",",&saveptr),"%d",&(Nneur[i]));
3618  }
3619 
3620  return 0;
3621 }
#define NLMAX
Definition: mlp_gen.cc:14
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
void getnLexemes ( int  n,
char *  s,
char **  ss 
)

Definition at line 2602 of file mlp_gen.cc.

References mps_fire::i, gen::n, and tmp.

Referenced by ReadPatterns().

2603 {
2604  char tmp[1024];
2605  int i;
2606  strcpy(tmp,s);
2607  if (n>0)
2608  {
2609  char* saveptr;
2610  strcpy(ss[0],strtok_r(tmp," ",&saveptr));
2611  for (i=1;i<n;i++)
2612  strcpy(ss[i],strtok_r(nullptr," ",&saveptr));
2613  }
2614 }
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
void InitBFGSH ( int  Nweights)

Definition at line 1388 of file mlp_gen.cc.

References mps_fire::i.

Referenced by MLP_Epoch().

1389 {
1390  int i,j;
1391  for(i=0; i<Nweights; i++)
1392  for(j=0; j<Nweights; j++)
1393  {
1394  BFGSH[i][j] = 0;
1395  if(i==j) BFGSH[i][j] = 1;
1396  }
1397 }
dbl ** BFGSH
Definition: mlp_gen.cc:37
void InitWeights ( )

Definition at line 2147 of file mlp_gen.cc.

References mps_fire::i, MLP_Rand(), and NET.

Referenced by PhysicsTools::MLP::MLP().

2148 {
2149  int ilayer,ineur,i;
2150 
2151  for(ilayer=1;ilayer<NET.Nlayer;ilayer++)
2152  for(ineur=0;ineur<NET.Nneur[ilayer];ineur++)
2153  for(i=0;i<=NET.Nneur[ilayer-1];i++)
2154  NET.Weights[ilayer][ineur][i]=
2155  (dbl) MLP_Rand(-0.5, 0.5);
2156 }
#define NET
Definition: mlp_gen.h:25
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2131
double dbl
Definition: mlp_gen.h:12
int LearnAlloc ( )

Definition at line 2688 of file mlp_gen.cc.

References mps_fire::i, recoMuon::in, LEARN, LearnFree(), and NET.

Referenced by PhysicsTools::MLP::MLP().

2689 {
2690  int il,in,i;
2691  int Nweights = 0;
2692 
2693  if(LearnMemory != 0) LearnFree();
2694  LearnMemory = 1;
2695  dir = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
2696  if(dir == nullptr) return -111;
2697 
2698  for(il=0; il<NET.Nlayer; il++)
2699  {
2700  dir[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
2701  if(dir[il] == nullptr) return -111;
2702  for(in=0; in<NET.Nneur[il]; in++)
2703  {
2704  if(il==0)
2705  {
2706 /* TODO: understand implications of hard-coded 101 */
2707  dir[0][in] = (dbl *)
2708  malloc(101*sizeof(dbl));
2709  if(dir[0][in] == nullptr) return -111;
2710  }
2711  else
2712  {
2713  dir[il][in] = (dbl *)
2714  malloc((NET.Nneur[il-1]+1)*sizeof(dbl));
2715  if(dir[il][in] == nullptr) return -111;
2716  Nweights += NET.Nneur[il-1]+1;
2717  }
2718  }
2719  }
2720  NET.Nweights = Nweights;
2721 
2722  if(BFGSMemory==0 && LEARN.Meth>= 6)
2723  {
2724  BFGSMemory = 1;
2725  Gamma = (dbl*) malloc(Nweights*sizeof(dbl));
2726  delta = (dbl*) malloc(Nweights*sizeof(dbl));
2727  BFGSH = (dbl**) malloc(Nweights*sizeof(dbl*));
2728  if(Gamma == nullptr || delta == nullptr || BFGSH == nullptr)
2729  return -111;
2730 
2731  for(i=0; i<Nweights; i++)
2732  {
2733  BFGSH[i] = (dbl*) malloc(Nweights*sizeof(dbl));
2734  if(BFGSH[i] == nullptr) return -111;
2735  }
2736  }
2737 
2738 /* if(JacobianMemory==0)
2739  {
2740  JacobianMemory = 1;
2741  printf("JacobianMemory = %d\n",JacobianMemory);
2742  JacobianMatrix = (dbl **) malloc(PAT.Npat[0]*sizeof(dbl *));
2743  for(i=0; i<PAT.Npat[0]; i++)
2744  JacobianMatrix[i] =
2745  (dbl*) malloc(Nweights*sizeof(dbl));
2746  printf("end memory alloc\n");
2747  }
2748 
2749  if(DIVERS.Ihess==1) HessianAlloc(Nweights);*/
2750 
2751  return 0;
2752 }
dbl * delta
Definition: mlp_gen.cc:36
void LearnFree()
Definition: mlp_gen.cc:2643
int LearnMemory
Definition: mlp_gen.cc:29
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int BFGSMemory
Definition: mlp_gen.cc:27
#define LEARN
Definition: mlp_gen.h:36
dbl * Gamma
Definition: mlp_gen.cc:38
double dbl
Definition: mlp_gen.h:12
dbl *** dir
Definition: mlp_gen.cc:35
void LearnFree ( )

Definition at line 2643 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by LearnAlloc(), and PhysicsTools::MLP::~MLP().

2644 {
2645  int il,in;
2646  if(LearnMemory==0) return;
2647  LearnMemory = 0;
2648  for(il=0; il<NET.Nlayer; il++)
2649  {
2650  for(in=0; in<NET.Nneur[il]; in++)
2651  {
2652  free(dir[il][in]);
2653  }
2654  free(dir[il]);
2655  }
2656  free(dir);
2657  if(BFGSMemory==0) return;
2658  BFGSMemory = 0;
2659  for(il=0; il<NET.Nweights; il++)
2660  {
2661  free(BFGSH[il]);
2662  }
2663  free(BFGSH);
2664  free(Gamma);
2665  free(delta);
2666 
2667 /* if(JacobianMemory == 0) return;
2668  JacobianMemory = 0;
2669  for(il=0; il<PAT.Npat[0]; il++) free(JacobianMatrix[il]);
2670  free(JacobianMatrix); */
2671 }
dbl * delta
Definition: mlp_gen.cc:36
int LearnMemory
Definition: mlp_gen.cc:29
dbl ** BFGSH
Definition: mlp_gen.cc:37
#define NET
Definition: mlp_gen.h:25
int BFGSMemory
Definition: mlp_gen.cc:27
dbl * Gamma
Definition: mlp_gen.cc:38
dbl *** dir
Definition: mlp_gen.cc:35
int LineSearch ( dbl alpmin,
int *  Ntest,
dbl  Err0 
)

Definition at line 1478 of file mlp_gen.cc.

References recoMuon::in, LastAlpha, LEARN, MLP_Line(), MLP_Test(), NET, and metsig::tau.

Referenced by MLP_Epoch().

1479 {
1480  dbl ***w0;
1481  dbl alpha1, alpha2, alpha3;
1482  dbl err1, err2, err3;
1483  dbl tau;
1484  int icount, il, in, jn;
1485 
1486  tau=LEARN.Tau;
1487 
1488 /* store weights before line search */
1489 
1490  *Ntest = 0;
1491  w0 = (dbl ***) malloc(NET.Nlayer*sizeof(dbl**));
1492  for(il=1; il<NET.Nlayer; il++)
1493  {
1494  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1495  for(in=0; in<NET.Nneur[il]; in++)
1496  {
1497  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1498  sizeof(dbl));
1499  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1500  {
1501  w0[il][in][jn] = NET.Weights[il][in][jn];
1502  }
1503  }
1504  }
1505 
1506 /* compute error(w0) */
1507 
1508 /* err1 = MLP_Test(0,0);
1509  (*Ntest) ++;*/
1510  err1 = Err0;
1511 
1512  if(NET.Debug>=4) printf("err depart= %f\n",err1);
1513 
1514  *alpmin = 0;
1515  alpha1 = 0;
1516 /* alpha2 = 0.05;
1517  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1518  alpha2 = LastAlpha;
1519  if(alpha2 < 0.01) alpha2 = 0.01;
1520  if(alpha2 > 2.0) alpha2 = 2.0;
1521  MLP_Line(w0,alpha2);
1522  err2 = MLP_Test(0,0);
1523  (*Ntest) ++;
1524  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1525 
1526  alpha3 = alpha2;
1527  err3 = err2;
1528 
1529 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1530  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1531 
1532  if(err1>err2)
1533  {
1534  for(icount=1;icount<=100;icount++)
1535  {
1536  alpha3 = alpha3*tau;
1537  MLP_Line(w0,alpha3);
1538  err3 =MLP_Test(0,0);
1539  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha3,err3);
1540  (*Ntest) ++;
1541  if(err3>err2) break;
1542  alpha1 = alpha2;
1543  err1 = err2;
1544  alpha2 = alpha3;
1545  err2 = err3;
1546  }
1547  if(icount>=100) /* line search fails */
1548  {
1549  MLP_Line(w0,0); /* reset weights */
1550  free(w0);
1551  return(1);
1552  }
1553  }
1554  else
1555  {
1556  for(icount=1;icount<=100;icount++)
1557  {
1558  alpha2 = alpha2/tau;
1559  MLP_Line(w0,alpha2);
1560  err2 = MLP_Test(0,0);
1561  if(NET.Debug>=4) printf("alpha, err= %e %e\n",alpha2,err2);
1562  (*Ntest) ++;
1563  if(err1>err2) break;
1564  alpha3 = alpha2;
1565  err3 = err2;
1566  }
1567  if(icount>=100) /* line search fails */
1568  {
1569  MLP_Line(w0,0); /* reset weights */
1570  free(w0);
1571  LastAlpha = 0.05; /* try to be safe */
1572  return(1);
1573  }
1574  }
1575 
1576 /* find bottom of parabola */
1577 
1578  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1579  -(err2-err1)/(alpha2-alpha1)));
1580  if(*alpmin>10000) *alpmin=10000;
1581 
1582 /* set the weights */
1583  MLP_Line(w0,*alpmin);
1584  LastAlpha = *alpmin;
1585 
1586 /* store weight changes */
1587  for(il=1; il<NET.Nlayer; il++)
1588  for(in=0; in<NET.Nneur[il]; in++)
1589  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1590  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1591  - w0[il][in][jn];
1592 
1593  for(il=1; il<NET.Nlayer; il++)
1594  for(in=0; in<NET.Nneur[il]; in++)
1595  free(w0[il][in]);
1596  for(il=1; il<NET.Nlayer; il++)
1597  free(w0[il]);
1598  free(w0);
1599 
1600  return(0);
1601 }
#define NET
Definition: mlp_gen.h:25
dbl LastAlpha
Definition: mlp_gen.cc:32
void MLP_Line(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1757
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int LineSearchHyb ( dbl alpmin,
int *  Ntest 
)

Definition at line 1781 of file mlp_gen.cc.

References recoMuon::in, LastAlpha, LEARN, MLP_LineHyb(), MLP_Test(), NET, and metsig::tau.

Referenced by MLP_Epoch().

1782 {
1783  dbl ***w0;
1784  dbl alpha1, alpha2, alpha3;
1785  dbl err1, err2, err3;
1786  dbl tau;
1787  int icount, il, in, jn;
1788 
1789 /* char buf [50];
1790  sprintf (buf,"entree linesearchhyb\n");
1791  MessageBoxA (0,buf,"dans FreePatterns",MB_OK);*/
1792 
1793  if(NET.Debug>=4){
1794  printf(" entry LineSearchHyb \n");
1795  }
1796  tau=LEARN.Tau;
1797 
1798 /* store weights before line search */
1799 
1800  *Ntest = 0;
1801  w0 = (dbl ***) malloc((NET.Nlayer-1)*sizeof(dbl**));
1802  for(il=1; il<NET.Nlayer-1; il++)
1803  {
1804  w0[il] = (dbl **) malloc(NET.Nneur[il]*sizeof(dbl*));
1805  for(in=0; in<NET.Nneur[il]; in++)
1806  {
1807  w0[il][in] = (dbl *) malloc((NET.Nneur[il-1]+1)*
1808  sizeof(dbl));
1809  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1810  {
1811  w0[il][in][jn] = NET.Weights[il][in][jn];
1812  }
1813  }
1814  }
1815 
1816 /* compute error(w0) */
1817  err1 = MLP_Test(0,1);
1818  (*Ntest) ++;
1819  if(NET.Debug>=4) printf("LinesearchHyb err depart= %f\n",err1);
1820 
1821  *alpmin = 0;
1822  alpha1 = 0;
1823 /* alpha2 = 0.05;
1824  if(LastAlpha != 0) alpha2 = LastAlpha;*/
1825  alpha2 = LastAlpha;
1826  if(alpha2 < 0.01) alpha2 = 0.01;
1827  if(alpha2 > 2.0) alpha2 = 2.0;
1828  MLP_LineHyb(w0,alpha2);
1829  err2 = MLP_Test(0,1);
1830  (*Ntest) ++;
1831 
1832  alpha3 = alpha2;
1833  err3 = err2;
1834 
1835 /* try to find a triplet (alpha1, alpha2, alpha3) such that
1836  Error(alpha1)>Error(alpha2)<Error(alpha3) */
1837 
1838  if(err1>err2)
1839  {
1840  for(icount=1;icount<=100;icount++)
1841  {
1842  alpha3 = alpha3*tau;
1843  MLP_LineHyb(w0,alpha3);
1844  err3 = MLP_Test(0,1);
1845  (*Ntest) ++;
1846  if(err3>err2) break;
1847  alpha1 = alpha2;
1848  err1 = err2;
1849  alpha2 = alpha3;
1850  err2 = err3;
1851  }
1852  if(icount>=100) /* line search fails */
1853  {
1854  MLP_LineHyb(w0,0); /* reset weights */
1855  free(w0);
1856  return(1);
1857  }
1858  }
1859  else
1860  {
1861  for(icount=1;icount<=100;icount++)
1862  {
1863  alpha2 = alpha2/tau;
1864  MLP_LineHyb(w0,alpha2);
1865  err2 = MLP_Test(0,1);
1866  (*Ntest) ++;
1867  if(err1>err2) break;
1868  alpha3 = alpha2;
1869  err3 = err2;
1870  }
1871  if(icount>=100) /* line search fails */
1872  {
1873  MLP_LineHyb(w0,0); /* reset weights */
1874  free(w0);
1875  return(1);
1876  }
1877  }
1878 
1879 /* find bottom of parabola */
1880 
1881  *alpmin = 0.5*(alpha1+alpha3-(err3-err1)/((err3-err2)/(alpha3-alpha2)
1882  -(err2-err1)/(alpha2-alpha1)));
1883  if(*alpmin>10000) *alpmin=10000;
1884 
1885 /* set the weights */
1886  MLP_LineHyb(w0,*alpmin);
1887  LastAlpha = *alpmin;
1888 
1889 /* store weight changes */
1890  for(il=1; il<NET.Nlayer-1; il++)
1891  for(in=0; in<NET.Nneur[il]; in++)
1892  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1893  LEARN.Odw[il][in][jn] = NET.Weights[il][in][jn]
1894  - w0[il][in][jn];
1895 
1896  for(il=1; il<NET.Nlayer-1; il++)
1897  for(in=0; in<NET.Nneur[il]; in++)
1898  free(w0[il][in]);
1899  for(il=1; il<NET.Nlayer-1; il++)
1900  free(w0[il]);
1901  free(w0);
1902  if(NET.Debug>=4){
1903  printf(" exit LineSearchHyb \n");
1904  }
1905 
1906  return(0);
1907 }
#define NET
Definition: mlp_gen.h:25
dbl LastAlpha
Definition: mlp_gen.cc:32
void MLP_LineHyb(dbl ***w0, dbl alpha)
Definition: mlp_gen.cc:1923
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int LoadWeights ( char *  filename,
int *  iepoch 
)

Definition at line 3025 of file mlp_gen.cc.

References mps_fire::i, NET, AlCaHLTBitMon_ParallelJobs::p, and alignCSCRings::s.

Referenced by PhysicsTools::MLP::load().

3026 {
3027  FILE *W;
3028  int ilayer,ineur,i;
3029  double p;
3030  char s[80];
3031 
3032  W=fopen(filename,"r");
3033  if(W==nullptr) return -1;
3034  do
3035  {
3036  fgets(s,80,W);
3037  }
3038  while(*s == '#');
3039  sscanf(s," %d",iepoch);
3040  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
3041  {
3042  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
3043  {
3044  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
3045  {
3046  fscanf(W," %le",&p);
3047  NET.Weights[ilayer][ineur][i] = (dbl) p;
3048  }
3049  }
3050  }
3051 
3052  fclose(W);
3053  return 0;
3054 }
#define NET
Definition: mlp_gen.h:25
double dbl
Definition: mlp_gen.h:12
dbl MLP_Epoch ( int  iepoch,
dbl alpmin,
int *  ntest 
)

Definition at line 706 of file mlp_gen.cc.

References pfBoostedDoubleSVAK8TagInfos_cfi::beta, BFGSdir(), CGDir(), DeDwNorm(), DeDwProd(), DeDwSaveZero(), DeDwScale(), DerivDir(), GetBFGSH(), GetGammaDelta(), InitBFGSH(), LEARN, LineSearch(), LineSearchHyb(), MLP_ResLin(), MLP_Stochastic(), MLP_Train(), NET, groupFilesInBlocks::nn, PAT, PrintWeights(), parseEventContent::prod, SetLambda(), ShuffleExamples(), SteepestDir(), and StochStep().

Referenced by PhysicsTools::MLP::train().

707 {
708  dbl err, ONorm, beta, prod, ddir;
709 /* int *index;*/
710  int Nweights, Nlinear, ipat, ierr;
711  int nn;
712 
713  err = 0;
714  *alpmin = 0.;
715 
716  Nweights = NET.Nweights;
717  Nlinear = NET.Nneur[NET.Nlayer-2] + 1;
718 
719  if(NET.Debug>=5) printf(" Entry MLP_Epoch\n");
720 /* stochastic minimization */
721  if(LEARN.Meth==1)
722  {
723 
724  err = MLP_Stochastic();
725 
726  }
727  else
728  {
729  if(iepoch==1 && LEARN.Meth==7)
730  {
731  SetLambda(10000);
732  MLP_ResLin();
733  if(NET.Debug>=2) PrintWeights();
734  }
735 
736 /* save previous gradient and reset current one */
737  DeDwSaveZero();
738  if(LEARN.Meth==16)
739  {
741  nn = PAT.Npat[0];
742  PAT.Npat[0] = nn/10;
743  for(ipat=0;ipat<nn;ipat++)
744  {
745  ierr = MLP_Train(&ExamplesIndex[ipat],&err);
746  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
747  }
748  }
749  else
750  {
751  for(ipat=0;ipat<PAT.Npat[0];ipat++)
752  {
753  ierr = MLP_Train(&ipat,&err);
754  if(ierr!=0) printf("Epoch: ierr= %d\n",ierr);
755  }
756  }
757  DeDwScale(PAT.Npat[0]);
758  if(LEARN.Meth==2) StochStep();
759  if(LEARN.Meth==3)
760  {
761  SteepestDir();
762  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
763  }
764 
765 /* Conjugate Gradients Ribiere - Polak */
766  if(LEARN.Meth==4)
767  {
768  if((iepoch-1)%LEARN.Nreset==0)
769  {
770  LEARN.Norm = DeDwNorm(); /* for next epoch */
771  SteepestDir();
772  }
773  else
774  {
775  ONorm = LEARN.Norm;
776  LEARN.Norm = DeDwNorm();
777  prod = DeDwProd();
778  beta = (LEARN.Norm-prod)/ONorm;
779  CGDir(beta);
780  }
781  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
782  }
783 
784 /* Conjugate Gradients Fletcher - Reeves */
785  if(LEARN.Meth==5)
786  {
787  if((iepoch-1)%LEARN.Nreset==0)
788  {
789  LEARN.Norm = DeDwNorm(); /* for next epoch */
790  SteepestDir();
791  }
792  else
793  {
794  ONorm = LEARN.Norm;
795  LEARN.Norm = DeDwNorm();
796  beta = LEARN.Norm/ONorm;
797  CGDir(beta);
798  }
799  if(LineSearch(alpmin,Ntest,err)==1) StochStep();
800  }
801  if(LEARN.Meth==6)
802  {
803  if((iepoch-1)%LEARN.Nreset==0)
804  {
805  SteepestDir();
806  InitBFGSH(Nweights);
807  }
808  else
809  {
810  GetGammaDelta();
811  ierr = GetBFGSH(Nweights);
812  if(ierr)
813  {
814  SteepestDir();
815  InitBFGSH(Nweights);
816  }
817  else
818  {
819  BFGSdir(Nweights);
820  }
821  }
822  ddir = DerivDir();
823  if(ddir>0)
824  {
825  SteepestDir();
826  InitBFGSH(Nweights);
827  ddir = DerivDir();
828  }
829  if(LineSearch(alpmin,Ntest,err)==1)
830  {
831  InitBFGSH(Nweights);
832  SteepestDir();
833  if(LineSearch(alpmin,Ntest,err)==1)
834  {
835  printf("Line search fail \n");
836  }
837  }
838  }
839  if(LEARN.Meth==7)
840  {
841  if((iepoch-1)%LEARN.Nreset==0)
842  {
843  SteepestDir();
844  InitBFGSH(Nweights-Nlinear);
845  }
846  else
847  {
848  if(NET.Debug>=5) printf("Before GetGammaDelta \n");
849  GetGammaDelta();
850  if(NET.Debug>=5) printf("After GetGammaDelta \n");
851  ierr = GetBFGSH(Nweights-Nlinear);
852  if(NET.Debug>=5) printf("After GetBFGSH \n");
853  if(ierr)
854  {
855  SteepestDir();
856  InitBFGSH(Nweights-Nlinear);
857  }
858  else
859  {
860  BFGSdir(Nweights-Nlinear);
861  }
862  if(NET.Debug>=5) printf("After BFGSdir \n");
863  }
864  SetLambda(10000);
865  if(LineSearchHyb(alpmin,Ntest)==1)
866  {
867  InitBFGSH(Nweights-Nlinear);
868  SteepestDir();
869  if(LineSearchHyb(alpmin,Ntest)==1)
870  {
871  printf("Line search fail \n");
872  }
873  }
874  }
875  }
876 
877  if(NET.Debug>=5) printf(" End MLP_Epoch\n");
878  return(err);
879 }
dbl DeDwProd()
Definition: mlp_gen.cc:1020
void CGDir(dbl beta)
Definition: mlp_gen.cc:1266
void DeDwSaveZero()
Definition: mlp_gen.cc:1097
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
void SetLambda(double Wmax)
Definition: mlp_gen.cc:1949
void SteepestDir()
Definition: mlp_gen.cc:1244
int GetBFGSH(int Nweights)
Definition: mlp_gen.cc:1418
void PrintWeights()
Definition: mlp_gen.cc:2168
int StochStep()
Definition: mlp_gen.cc:965
dbl MLP_Stochastic()
Definition: mlp_gen.cc:518
int MLP_Train(int *ipat, dbl *err)
Definition: mlp_gen.cc:895
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2102
dbl DeDwNorm()
Definition: mlp_gen.cc:998
void InitBFGSH(int Nweights)
Definition: mlp_gen.cc:1388
void GetGammaDelta()
Definition: mlp_gen.cc:1313
void MLP_ResLin()
Definition: mlp_gen.cc:1971
void DeDwScale(int Nexamples)
Definition: mlp_gen.cc:1060
#define LEARN
Definition: mlp_gen.h:36
int LineSearch(dbl *alpmin, int *Ntest, dbl Err0)
Definition: mlp_gen.cc:1478
void BFGSdir(int Nweights)
Definition: mlp_gen.cc:1340
double dbl
Definition: mlp_gen.h:12
int LineSearchHyb(dbl *alpmin, int *Ntest)
Definition: mlp_gen.cc:1781
dbl DerivDir()
Definition: mlp_gen.cc:1288
void MLP_Line ( dbl ***  w0,
dbl  alpha 
)

Definition at line 1757 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by DecreaseSearch(), FixedStep(), and LineSearch().

1758 {
1759  int il,in,jn;
1760 
1761  for(il=1; il<NET.Nlayer; il++)
1762  for(in=0; in<NET.Nneur[il]; in++)
1763  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1764  NET.Weights[il][in][jn] = w0[il][in][jn]+
1765  alpha*dir[il][in][jn];
1766 
1767 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_LineHyb ( dbl ***  w0,
dbl  alpha 
)

Definition at line 1923 of file mlp_gen.cc.

References recoMuon::in, MLP_ResLin(), and NET.

Referenced by LineSearchHyb().

1924 {
1925  int il,in,jn;
1926  for(il=1; il<NET.Nlayer-1; il++)
1927  for(in=0; in<NET.Nneur[il]; in++)
1928  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1929  {
1930  NET.Weights[il][in][jn] = w0[il][in][jn]+
1931  alpha*dir[il][in][jn];
1932  }
1933  MLP_ResLin();
1934 }
float alpha
Definition: AMPTWrapper.h:95
#define NET
Definition: mlp_gen.h:25
void MLP_ResLin()
Definition: mlp_gen.cc:1971
dbl *** dir
Definition: mlp_gen.cc:35
void MLP_MatrixVector ( dbl M,
type_pat v,
dbl r,
int  n,
int  m 
)

Definition at line 3751 of file mlp_gen.cc.

References EnergyCorrector::c, edmIntegrityCheck::d, mps_fire::i, funct::m, gen::n, and alignCSCRings::r.

Referenced by MLP_Test_MM().

3752 {
3753  int i,j;
3754  dbl a1, a2, a3, a4, c, d;
3755  dbl *pM1 = M;
3756  dbl *pM2 = &(M[m]);
3757  dbl *pM3 = &(M[2*m]);
3758  dbl *pM4 = &(M[3*m]);
3759  dbl *pr = r;
3760  int mp1 = m;
3761 
3762  for(i=0; i<n-3;
3763  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3764  pr+=4)
3765  {
3766  a1 = 0;
3767  a2 = 0;
3768  a3 = 0;
3769  a4 = 0;
3770  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3771  {
3772  c = v[j];
3773  d = v[j+1];
3774  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3775  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3776  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3777  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3778  }
3779  for(/*j set above*/; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3780  {
3781  c = v[j];
3782  a1 = a1 + *pM1 * c;
3783  a2 = a2 + *pM2 * c;
3784  a3 = a3 + *pM3 * c;
3785  a4 = a4 + *pM4 * c;
3786  }
3787  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3788  }
3789  for(/*i set above*/; i<n; i++)
3790  {
3791  pM1 = &(M[i*m]);
3792  a1 = 0;
3793  for(j=0; j<m; j++, pM1++)
3794  {
3795  a1 = a1 + *pM1 * v[j];
3796  }
3797  r[i] = a1;
3798  }
3799 }
double dbl
Definition: mlp_gen.h:12
void MLP_MatrixVectorBias ( dbl M,
dbl v,
dbl r,
int  n,
int  m 
)

Definition at line 3685 of file mlp_gen.cc.

References EnergyCorrector::c, edmIntegrityCheck::d, mps_fire::i, funct::m, gen::n, and alignCSCRings::r.

Referenced by MLP_Out(), and MLP_Out2().

3686 {
3687  int i,j;
3688  dbl a1, a2, a3, a4, c, d;
3689  dbl *pM1 = M;
3690  dbl *pM2 = &(M[m+1]);
3691  dbl *pM3 = &(M[2*(m+1)]);
3692  dbl *pM4 = &(M[3*(m+1)]);
3693  dbl *pr = r;
3694  int mp1 = m+1;
3695 
3696  for(i=0; i<n-3;
3697  i+=4, pM1 += 3*mp1, pM2 += 3*mp1, pM3 += 3*mp1, pM4 += 3*mp1,
3698  pr+=4)
3699  {
3700  a1 = *pM1;
3701  a2 = *pM2;
3702  a3 = *pM3;
3703  a4 = *pM4;
3704  pM1++; pM2++; pM3++; pM4++;
3705  for(j=0; j<m-1; j+=2, pM1+=2, pM2+=2, pM3+=2, pM4+=2)
3706  {
3707  c = v[j];
3708  d = v[j+1];
3709  a1 = a1 + *pM1 * c + *(pM1+1) * d;
3710  a2 = a2 + *pM2 * c + *(pM2+1) * d;
3711  a3 = a3 + *pM3 * c + *(pM3+1) * d;
3712  a4 = a4 + *pM4 * c + *(pM4+1) * d;
3713  }
3714  for(/*j set above*/; j<m; j++, pM1++, pM2++, pM3++, pM4++)
3715  {
3716  c = v[j];
3717  a1 = a1 + *pM1 * c;
3718  a2 = a2 + *pM2 * c;
3719  a3 = a3 + *pM3 * c;
3720  a4 = a4 + *pM4 * c;
3721  }
3722  *pr = a1; *(pr+1) = a2; *(pr+2) = a3; *(pr+3) = a4;
3723  }
3724  for(/*i set above*/; i<n; i++)
3725  {
3726  pM1 = &(M[i*(m+1)]);
3727  a1 = *pM1;
3728  pM1++;
3729  for(j=0; j<m; j++, pM1++)
3730  {
3731  a1 = a1 + *pM1 * v[j];
3732  }
3733  r[i] = a1;
3734  }
3735 }
double dbl
Definition: mlp_gen.h:12
void MLP_MM2rows ( dbl c,
type_pat a,
dbl b,
int  Ni,
int  Nj,
int  Nk,
int  NaOffs,
int  NbOffs 
)

Definition at line 3820 of file mlp_gen.cc.

References gen::k.

Referenced by MLP_Test_MM().

3822 {
3823 //int i,j,k;
3824 int j,k;
3825 dbl s00,s01,s10,s11;
3826 type_pat *pa0,*pa1;
3827 dbl *pb0,*pb1,*pc0,*pc1;
3828 
3829  for (j=0; j<=Nj-2; j+=2)
3830  {
3831  pc0 = c+j;
3832  pc1 = c+j+Nj;
3833  s00 = 0.0; s01 = 0.0; s10 = 0.0; s11 = 0.0;
3834 
3835  for (k=0,pb0=b+k+NbOffs*j,
3836  pb1=b+k+NbOffs*(j+1),
3837  pa0=a+k,
3838  pa1=a+k+NaOffs;
3839  k<Nk;
3840  k++,pa0++,
3841  pa1++,
3842  pb0++,
3843  pb1++)
3844  {
3845  s00 += (*pa0)*(*pb0);
3846  s01 += (*pa0)*(*pb1);
3847  s10 += (*pa1)*(*pb0);
3848  s11 += (*pa1)*(*pb1);
3849  }
3850  *pc0 = s00; *(pc0+1) = s01; *pc1 = s10; *(pc1+1) = s11;
3851  }
3852  for (/*j set above*/; j<Nj; j++)
3853  {
3854  pc0 = c+j;
3855  pc1 = c+j+Nj;
3856  s00 = 0.0; s10 = 0.0;
3857  for (k=0,pb0=b+k+NbOffs*j,
3858  pa0=a+k,
3859  pa1=a+k+NaOffs;
3860  k<Nk;
3861  k++,pa0++,
3862  pa1++,
3863  pb0++)
3864  {
3865  s00 += (*pa0)*(*pb0);
3866  s10 += (*pa1)*(*pb0);
3867  }
3868  *pc0 = s00; *pc1 = s10;
3869  }
3870 }
double type_pat
Definition: mlp_gen.h:13
int k[5][pyjets_maxn]
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void MLP_Out ( type_pat rrin,
dbl rrout 
)

Definition at line 62 of file mlp_gen.cc.

References mps_fire::i, recoMuon::in, funct::m, MLP_MatrixVectorBias(), MLP_vSigmoideDeriv(), and NET.

Referenced by MLP_ResLin().

63 {
64  int i, il, in, j, m, mp1;
65  dbl **deriv1;
66 
67 /* input layer */
68 
69  deriv1 = NET.Deriv1;
70  m = NET.Nneur[0]%4;
71  if(m==0) goto L10;
72  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
73 L10:
74  mp1 = m+1;
75  for(i=mp1; i<=NET.Nneur[0]; i+=4)
76  {
77  NET.Outn[0][i-1] = rrin[i-1];
78  NET.Outn[0][i] = rrin[i];
79  NET.Outn[0][i+1] = rrin[i+1];
80  NET.Outn[0][i+2] = rrin[i+2];
81  }
82 
83 /* hidden and output layers */
84 
85  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
86  NET.Outn[1],NET.Nneur[1],NET.Nneur[0]);
87 
88  for(il=2; il<NET.Nlayer; il++)
89  {
90  MLP_vSigmoideDeriv(NET.Outn[il-1],
91  deriv1[il-1],NET.Nneur[il-1]);
92  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
93  NET.Outn[il],NET.Nneur[il],
94  NET.Nneur[il-1]);
95  }
96  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
97  {
98  deriv1[NET.Nlayer-1][in] = 1;
99  }
100 }
#define NET
Definition: mlp_gen.h:25
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3685
double dbl
Definition: mlp_gen.h:12
void MLP_Out2 ( type_pat rrin)

Definition at line 184 of file mlp_gen.cc.

References mps_fire::i, recoMuon::in, funct::m, MLP_MatrixVectorBias(), MLP_vSigmoideDeriv(), NET, and nin.

Referenced by MLP_Stochastic(), and MLP_Train().

185 {
186  int il, in, m, mp1;
187  int i;
188  dbl **rrout, **deriv1;
189  dbl *prrout;
190  type_pat *prrin;
191  int nhid = NET.Nneur[1];
192  int nin = NET.Nneur[0];
193 
194  rrout = NET.Outn;
195  deriv1 = NET.Deriv1;
196 
197  m = NET.Nneur[0]%4;
198  if(m==0) goto L10;
199  if(m==1)
200  {
201  rrout[0][0] = rrin[1];
202  goto L10;
203  }
204  else if(m==2)
205  {
206  rrout[0][0] = rrin[1];
207  rrout[0][1] = rrin[2];
208  goto L10;
209  }
210  else if(m==3)
211  {
212  rrout[0][0] = rrin[1];
213  rrout[0][1] = rrin[2];
214  rrout[0][2] = rrin[3];
215  goto L10;
216  }
217 L10:
218  mp1 = m+1;
219  prrout = &(rrout[0][mp1]);
220  prrin = &(rrin[mp1+1]);
221  for(i=mp1; i<=NET.Nneur[0]; i+=4, prrout+=4, prrin+=4)
222  {
223  *(prrout-1) = *(prrin-1);
224  *prrout = *prrin;
225  *(prrout+1)= *(prrin+1);
226  *(prrout+2) = *(prrin+2);
227  }
228 
229 /* input layer */
230 
231  MLP_MatrixVectorBias(NET.vWeights[1],NET.Outn[0],
232  NET.Outn[1],nhid,nin);
233 
234 
235 /* hidden and output layers */
236 
237  for(il=2; il<NET.Nlayer; il++)
238  {
239  MLP_vSigmoideDeriv(NET.Outn[il-1],deriv1[il-1],NET.Nneur[il-1]);
240  MLP_MatrixVectorBias(NET.vWeights[il],NET.Outn[il-1],
241  NET.Outn[il],NET.Nneur[il],NET.Nneur[il-1]);
242  }
243  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
244  deriv1[NET.Nlayer-1][in] = 1;
245 }
int nin
#define NET
Definition: mlp_gen.h:25
void MLP_vSigmoideDeriv(dbl *x, dbl *dy, int n)
double type_pat
Definition: mlp_gen.h:13
void MLP_MatrixVectorBias(dbl *M, dbl *v, dbl *r, int n, int m)
Definition: mlp_gen.cc:3685
double dbl
Definition: mlp_gen.h:12
void MLP_Out_T ( type_pat rrin)

Definition at line 113 of file mlp_gen.cc.

References a, mps_fire::i, recoMuon::in, funct::m, MLP_Sigmoide(), and NET.

Referenced by PhysicsTools::MLP::eval(), and MLP_Test().

114 {
115  int i, il, in, j, ilm1, m, mp1;
116  dbl a;
117 
118 /* input layer */
119 
120  m = NET.Nneur[0]%4;
121  if(m==0) goto L10;
122  for(j=0;j<m;j++) NET.Outn[0][j] = rrin[j];
123 L10:
124  mp1 = m+1;
125  for(i=mp1; i<=NET.Nneur[0]; i+=4)
126  {
127  NET.Outn[0][i-1] = rrin[i-1];
128  NET.Outn[0][i] = rrin[i];
129  NET.Outn[0][i+1] = rrin[i+1];
130  NET.Outn[0][i+2] = rrin[i+2];
131  }
132 
133 /* hidden and output layers */
134 
135 /* for(in=0;in<NET.Nneur[0]; in++) printf("%e %e\n",
136  NET.Outn[0][in],NET.Weights[1][0][in]);
137  printf("\n"); */
138  for(il=1; il<NET.Nlayer; il++)
139  {
140  ilm1 = il-1;
141  m = NET.Nneur[ilm1]%4;
142  for(in=0; in<NET.Nneur[il]; in++)
143  {
144  a = NET.Weights[il][in][0];
145  if(m==0) goto L20;
146  for(j=1;j<=m;j++) a +=
147  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
148 L20:
149  mp1 = m+1;
150  for(j=mp1; j<=NET.Nneur[ilm1]; j+=4)
151  {
152  a +=
153  NET.Weights[il][in][j+3]*NET.Outn[ilm1][j+2]+
154  NET.Weights[il][in][j+2]*NET.Outn[ilm1][j+1]+
155  NET.Weights[il][in][j+1]*NET.Outn[ilm1][j]+
156  NET.Weights[il][in][j]*NET.Outn[ilm1][j-1];
157  }
158  switch(NET.T_func[il][in])
159  {
160  case 2: NET.Outn[il][in] = MLP_Sigmoide(a);
161  break;
162  case 1: NET.Outn[il][in] = a;
163  break;
164  case 0: NET.Outn[il][in] = 0;
165  break;
166  }
167  }
168  }
169 }
#define NET
Definition: mlp_gen.h:25
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
dbl MLP_Sigmoide(dbl x)
Definition: mlp_sigmoide.cc:27
int MLP_PrCFun ( char *  filename)

Definition at line 2881 of file mlp_gen.cc.

References DIVERS, recoMuon::in, NET, and STAT.

2882 {
2883  int il,in,jn;
2884  FILE *W;
2885 
2886  W=fopen(filename,"w");
2887  if(W==nullptr) return -1;
2888 
2889  fprintf(W,"double sigmoid(double x)\n");
2890  fprintf(W,"{\n");
2891  fprintf(W,"return 1/(1+exp(-x));\n");
2892  fprintf(W,"}\n");
2893  fprintf(W,"void rnnfun(double *rin,double *rout)\n");
2894  fprintf(W,"{\n");
2895  fprintf(W," double out1[%d];\n",NET.Nneur[0]);
2896  fprintf(W," double out2[%d];\n",NET.Nneur[1]);
2897  if(NET.Nlayer>=3) fprintf(W," double out3[%d];\n",NET.Nneur[2]);
2898  if(NET.Nlayer>=4) fprintf(W," double out4[%d];\n",NET.Nneur[3]);
2899  fprintf(W,"\n");
2900 
2901  for(in=0; in<NET.Nneur[0]; in++)
2902  {
2903  if(DIVERS.Norm==0)
2904  {
2905  fprintf(W," out1[%d] = rin[%d];\n",in,in);
2906  }
2907  else
2908  {
2909  fprintf(W," out1[%d] = (rin[%d]-%e)/%e;\n",
2910  in,in,
2911  STAT.mean[in],STAT.sigma[in]);
2912  }
2913  }
2914 
2915  for(il=1; il<=NET.Nlayer-1; il++)
2916  {
2917  fprintf(W,"\n");
2918  fprintf(W,"/* layer %d */\n",il+1);
2919  for(in=0; in<NET.Nneur[il]; in++)
2920  {
2921  fprintf(W," out%d[%d] = %e\n",il+1,in,
2922  (double) NET.Weights[il][in][0]);
2923  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2924  fprintf(W," +(%e) * out%d[%d]\n",
2925  (double) NET.Weights[il][in][jn],il,jn-1);
2926  fprintf(W," ;\n");
2927  }
2928  fprintf(W,"\n");
2929  for(in=0; in<NET.Nneur[il]; in++)
2930  {
2931  if(NET.T_func[il][in]==0)
2932  {
2933  fprintf(W," out%d[%d] = 0;\n",il+1,in);
2934  }
2935  else if(NET.T_func[il][in]==1)
2936  {
2937  }
2938  else if(NET.T_func[il][in]==2)
2939  {
2940  fprintf(W," out%d[%d] = sigmoid(out%d[%d]);\n",
2941  il+1,in,il+1,in);
2942  }
2943  }
2944  }
2945  il = NET.Nlayer-1;
2946  for(in=0; in<NET.Nneur[il]; in++)
2947  {
2948  fprintf(W," rout[%d] = out%d[%d];\n",in,il+1,in);
2949  }
2950  fprintf(W,"}\n");
2951  fclose(W);
2952  return 0;
2953 }
#define STAT
Definition: mlp_gen.h:60
#define NET
Definition: mlp_gen.h:25
#define DIVERS
Definition: mlp_gen.h:54
int MLP_PrFFun ( char *  filename)

Definition at line 2772 of file mlp_gen.cc.

References DIVERS, recoMuon::in, NET, and STAT.

2773 {
2774  int il,in,jn;
2775  FILE *W;
2776 
2777  W=fopen(filename,"w");
2778  if(W==nullptr) return -1;
2779  fprintf(W," SUBROUTINE RNNFUN(rin,rout)\n");
2780  fprintf(W," DIMENSION RIN(%d)\n",NET.Nneur[0]);
2781  fprintf(W," DIMENSION ROUT(%d)\n",NET.Nneur[NET.Nlayer-1]);
2782  fprintf(W,"C\n");
2783 
2784  for(in=0; in<NET.Nneur[0]; in++)
2785  {
2786  if(DIVERS.Norm==0)
2787  {
2788  fprintf(W," OUT%d = RIN(%d)\n",in+1,in+1);
2789  }
2790  else
2791  {
2792  fprintf(W," OUT%d = (RIN(%d)-%e)/%e\n",in+1,in+1,
2793  STAT.mean[in],STAT.sigma[in]);
2794  }
2795  }
2796  for(il=1; il<NET.Nlayer-1; il++)
2797  {
2798  fprintf(W,"C\n");
2799  fprintf(W,"C layer %d\n",il+1);
2800  for(in=0; in<NET.Nneur[il]; in++)
2801  {
2802  fprintf(W," RIN%d = %e\n",in+1,
2803  (double) NET.Weights[il][in][0]);
2804  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2805  fprintf(W," > +(%e) * OUT%d\n",
2806  (double) NET.Weights[il][in][jn],jn);
2807  }
2808  fprintf(W,"C\n");
2809  for(in=0; in<NET.Nneur[il]; in++)
2810  {
2811  if(NET.T_func[il][in]==0)
2812  {
2813  fprintf(W," OUT%d = 0\n",in+1);
2814  }
2815  else if(NET.T_func[il][in]==1)
2816  {
2817  fprintf(W," OUT%d = RIN%d\n",in+1,in+1);
2818  }
2819  else if(NET.T_func[il][in]==2)
2820  {
2821  fprintf(W," OUT%d = SIGMOID(RIN%d)\n",
2822  in+1,in+1);
2823  }
2824  }
2825  }
2826  il = NET.Nlayer-1;
2827  fprintf(W,"C\n");
2828  fprintf(W,"C layer %d\n",il+1);
2829  for(in=0; in<NET.Nneur[il]; in++)
2830  {
2831  fprintf(W," RIN%d = %e\n",in+1,
2832  (double) NET.Weights[il][in][0]);
2833  for(jn=1;jn<=NET.Nneur[il-1]; jn++)
2834  fprintf(W," > +(%e) * OUT%d\n",
2835  (double) NET.Weights[il][in][jn],jn);
2836  }
2837  fprintf(W,"C\n");
2838  for(in=0; in<NET.Nneur[il]; in++)
2839  {
2840  if(NET.T_func[il][in]==0)
2841  {
2842  fprintf(W," ROUT(%d) = 0\n",in+1);
2843  }
2844  else if(NET.T_func[il][in]==1)
2845  {
2846  fprintf(W," ROUT(%d) = RIN%d\n",in+1,in+1);
2847  }
2848  else if(NET.T_func[il][in]==2)
2849  {
2850  fprintf(W," ROUT(%d) = SIGMOID(RIN%d)\n",
2851  in+1,in+1);
2852  }
2853  }
2854 
2855  fprintf(W,"C\n");
2856  fprintf(W," END\n");
2857  fprintf(W," REAL FUNCTION SIGMOID(X)\n");
2858  fprintf(W," SIGMOID = 1./(1.+EXP(-X))\n");
2859  fprintf(W," END\n");
2860 
2861  fclose(W);
2862  return 0;
2863 }
#define STAT
Definition: mlp_gen.h:60
#define NET
Definition: mlp_gen.h:25
#define DIVERS
Definition: mlp_gen.h:54
int MLP_PrintInputStat ( )

Definition at line 3335 of file mlp_gen.cc.

References SiStripPI::mean, MLP_StatInputs(), NET, PAT, runEdmFileComparison::returnCode, and stat_::sigma.

3336 {
3337  int j;
3338  dbl *mean, *sigma, *minimum, *maximum;
3339 
3340 /* allocate memory */
3341  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3342  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3343  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3344  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3345  int returnCode = -111; // to return if any malloc failed
3346 
3347  if(mean && sigma && minimum && maximum) {
3348 
3349  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],mean,sigma,minimum,maximum);
3350 
3351  printf("\t mean \t\t RMS \t\t min \t\t max\n");
3352  for(j=0;j<NET.Nneur[0];j++)
3353  {
3354  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3355  mean[j],sigma[j],minimum[j],maximum[j]);
3356  }
3357  returnCode = 0; // everything went fine
3358  }
3359 
3360  free(mean);
3361  free(sigma);
3362  free(minimum);
3363  free(maximum);
3364  printf("\n");
3365  return returnCode;
3366 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3278
double dbl
Definition: mlp_gen.h:12
double MLP_Rand ( dbl  min,
dbl  max 
)

Definition at line 2131 of file mlp_gen.cc.

References random.

Referenced by InitWeights(), and ShuffleExamples().

2132 {
2133 return mini+(maxi-mini)*random()/RAND_MAX;
2134 }
TRandom random
Definition: MVATrainer.cc:138
void MLP_ResLin ( )

Definition at line 1971 of file mlp_gen.cc.

References dgels_(), recoMuon::in, LEARN, MLP_Out(), MLP_Test(), NET, PAT, and mathSSE::sqrt().

Referenced by MLP_Epoch(), MLP_LineHyb(), and StochStepHyb().

1972 {
1973 /* dbl rrans[NMAX], rrout[NMAX];*/
1974 /* type_pat rrin[NMAX];*/
1975  double *HR,*dpat; //,*wlin,*SV;
1976  double err,lambda,lambda2;
1977  int Nl,M,Nhr,khr,nrhs,iret,ierr;
1978  int il, in, inl, ipat;
1979  /*register dbl a;*/ //a unused
1980  char Trans = 'N';
1981 
1982 
1983 /* int rank; */
1984 // double rcond = -1; /* use machine precision */
1985 
1986  lambda2 = LEARN.Alambda;
1987 
1988 /* Nl = number of linear weights
1989  M = number of terms in linear system = number of examples + regularisation*/
1990  Nl = NET.Nneur[NET.Nlayer-2] + 1;
1991  M = PAT.Npat[0]+Nl;
1992 
1993  int Lwork = 5 * M;
1994  double *Work = (double*) malloc((int) Lwork*sizeof(double));
1995 
1996 /* memory allocation */
1997  dpat = (double*) malloc((int) M*sizeof(double));
1998 // wlin = (double*) malloc((int) Nl*sizeof(double));
1999 // SV = (double*) malloc((int) Nl*sizeof(double));
2000 
2001  Nhr = M * Nl;
2002  HR = (double*) malloc((int) Nhr*sizeof(double));
2003  err = 0.;
2004  for(ipat=0;ipat<PAT.Npat[0];ipat++)
2005  {
2006 /* *** Filling dpat and HR *** */
2007 /* for(in=0; in<NET.Nneur[0]; in++)
2008  {
2009  rrin[in] = PAT.Rin[0][ipat][in];
2010  }*/
2011 
2012  MLP_Out(PAT.Rin[0][ipat],NET.Outn[NET.Nlayer-1]);
2013 /* MLP_Out(rrin,rrout);*/
2014  /*for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
2015  {
2016  a = (dbl) PAT.Rans[0][ipat][in]; //a was not used
2017  } */
2018  il = NET.Nlayer-2;
2019  dpat[ipat] = (dbl) PAT.Rans[0][ipat][0]*sqrt(PAT.Pond[0][ipat]);
2020  khr = ipat;
2021  HR[khr] = (dbl) sqrt(PAT.Pond[0][ipat]);
2022  for(in=0;in<NET.Nneur[il];in++)
2023  {
2024  khr = M *(in+1) + ipat;
2025  HR[khr] = NET.Outn[il][in]*
2026  (dbl) sqrt(PAT.Pond[0][ipat]);
2027  }
2028  }
2029  il = NET.Nlayer-2;
2030  lambda = sqrt(lambda2);
2031  for(ipat=0;ipat<=NET.Nneur[il];ipat++)
2032  {
2033  dpat[ipat+PAT.Npat[0]] = 0;
2034  for(in=0;in<=NET.Nneur[il];in++)
2035  {
2036  khr = M *in + ipat + PAT.Npat[0];
2037  HR[khr] = 0;
2038  if(in==ipat) HR[khr]=lambda;
2039  }
2040  }
2041  if(NET.Debug>=4)
2042  {
2043  err = MLP_Test(0,0);
2044  printf("entry ResLin, err=MLP_Test(0,0), err= %f\n",err);
2045  }
2046 /* */
2047 /* Trouve les poids lineaires par resolution lineaire */
2048 /* */
2049  nrhs = 1;
2050  ierr = dgels_(&Trans,&M,&Nl,&nrhs,HR,&M,dpat,&M,Work,
2051  &Lwork,&iret);
2052  if(iret != 0) printf("Warning from dgels: iret = %d\n",(int)iret);
2053  if(ierr != 0) printf("Warning from dgels: ierr = %d\n",(int)ierr);
2054 
2055 /* ierr = dgelss_(&M,&Nl,&nrhs,HR,&M,dpat,&M,SV,&rcond,&rank,Work,&Lwork,
2056  &iret);
2057  if(iret != 0) printf("Warning from dgelss: iret = %d\n",iret);
2058  if(ierr != 0) printf("Warning from dgelss: ierr = %d\n",ierr);*/
2059 
2060  il = NET.Nlayer-1;
2061  for (inl=0; inl<=NET.Nneur[il-1];inl++)
2062  {
2063  NET.Weights[il][0][inl] = dpat[inl];
2064  }
2065  if(NET.Debug>=4)
2066  {
2067  err = MLP_Test(0,0);
2068  printf("ResLin, apres tlsfor, err= %f\n",err);
2069  }
2070  free(Work);
2071  free(dpat);
2072 // free(wlin);
2073  free(HR);
2074 // free(SV);
2075 }
int dgels_(char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info)
void MLP_Out(type_pat *rrin, dbl *rrout)
Definition: mlp_gen.cc:62
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
T sqrt(T t)
Definition: SSEVec.h:18
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int MLP_SetNet ( int *  nl,
int *  nn 
)

Definition at line 3642 of file mlp_gen.cc.

References AllocNetwork(), createfilelist::int, NET, NLMAX, and SetDefaultFuncs().

Referenced by PhysicsTools::MLP::MLP(), and ReadPatterns().

3643 {
3644  int il,ierr;
3645 
3646  if((*nl)>NLMAX) return(1);
3647  if((*nl)<2) return(2);
3648 
3649 /* LearnFree(); */
3650 /* allocate memory */
3651  ierr = AllocNetwork(*nl,nn);
3652  if(ierr != 0) return ierr;
3653 
3654 /* set number of layers */
3655  NET.Nlayer = (int) *nl;
3656 
3657 /* set number of neurons */
3658  for(il=0; il<NET.Nlayer; il++) {
3659  NET.Nneur[il] = nn[il];
3660  }
3661 
3662 /* set transfer functions */
3663  SetDefaultFuncs();
3664 /* LearnAlloc(); */
3665 
3666  return(0);
3667 }
#define NLMAX
Definition: mlp_gen.cc:14
#define NET
Definition: mlp_gen.h:25
void SetDefaultFuncs()
Definition: mlp_gen.cc:1223
int AllocNetwork(int Nlayer, int *Neurons)
Definition: mlp_gen.cc:3451
int MLP_StatInputs ( int  Nexamples,
int  Ninputs,
type_pat **  inputs,
dbl mean,
dbl sigma,
dbl minimum,
dbl maximum 
)

Definition at line 3278 of file mlp_gen.cc.

References mathSSE::sqrt().

Referenced by MLP_PrintInputStat(), and NormalizeInputs().

3280 {
3281  dbl *fmean;
3282  int j, ipat, nmax;
3283 
3284 /* first compute a fast rough mean using the first 100 events */
3285  fmean = (dbl*) malloc(Ninputs*sizeof(dbl));
3286  nmax = 100;
3287  if(Nexamples<100) nmax=Nexamples;
3288 
3289  for(j=0;j<Ninputs;j++)
3290  {
3291  fmean[j] = 0;
3292  for(ipat=0;ipat<nmax;ipat++)
3293  {
3294  fmean[j] += (dbl) inputs[ipat][j];
3295  }
3296  fmean[j] = fmean[j]/(dbl) nmax;
3297 
3298 /* now compute real mean and sigma, min and max */
3299  mean[j] = 0;
3300  sigma[j] = 0;
3301  minimum[j] = 99999;
3302  maximum[j] = -99999;
3303  for(ipat=0;ipat<Nexamples;ipat++)
3304  {
3305  mean[j] += (dbl) inputs[ipat][j];
3306  sigma[j] += ((dbl) inputs[ipat][j]-fmean[j])*
3307  ((dbl) inputs[ipat][j]-fmean[j]);
3308  if((dbl) inputs[ipat][j] > maximum[j])
3309  maximum[j]=(dbl) inputs[ipat][j];
3310  if((dbl) inputs[ipat][j] < minimum[j])
3311  minimum[j]=(dbl) inputs[ipat][j];
3312  }
3313  mean[j] = mean[j]/(dbl) Nexamples;
3314  sigma[j] = sqrt(sigma[j]/ (dbl) Nexamples -
3315  (mean[j]-fmean[j])*
3316  (mean[j]-fmean[j]));
3317  }
3318  free(fmean);
3319  return 0;
3320 }
T sqrt(T t)
Definition: SSEVec.h:18
double dbl
Definition: mlp_gen.h:12
dbl MLP_Stochastic ( )

Definition at line 518 of file mlp_gen.cc.

References a, b, createTree::dd, PVValHelper::eta, EtaDecay(), cuy::ii, recoMuon::in, LEARN, MLP_Out2(), NET, PAT, ShuffleExamples(), and HGCalRecHit_cfi::weights.

Referenced by MLP_Epoch().

519 {
520  int ipat, ii, inm1;
521  dbl err = 0;
522  int il, in1, in, itest2;
523  dbl deriv, deriv1, deriv2, deriv3, deriv4, pond;
524  dbl eta, eps;
525  dbl a, b, dd, a1, a2, a3, a4;
526  dbl *pout, *pdelta, *pw1, *pw2, *pw3, *pw4;
527  dbl ***weights;
528 
529  if(NET.Debug>=5) printf(" Entry MLP_Stochastic\n");
530  weights = NET.Weights;
531 /* shuffle patterns */
533 
534 /* reduce learning parameter */
535  if(LEARN.Decay<1) EtaDecay();
536 
537  eta = -LEARN.eta;
538  eps = LEARN.epsilon;
539 
540 /* loop on the examples */
541  for(ipat=0;ipat<PAT.Npat[0];ipat++)
542  {
543  ii = ExamplesIndex[ipat];
544  pond = PAT.Pond[0][ii];
545 
546  MLP_Out2(&(PAT.vRin[0][ii*(NET.Nneur[0]+1)]));
547 
548 /* next lines are equivalent to DeDwSum */
549  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
550  {
551  deriv = NET.Deriv1[NET.Nlayer-1][in];
552  a = (dbl) PAT.Rans[0][ii][in];
553  b = NET.Outn[NET.Nlayer-1][in]-a;
554  err += b*b*pond;
555  NET.Delta[NET.Nlayer-1][in] = b*deriv*pond*eta;
556  }
557 
558  for(il=NET.Nlayer-2; il>0; il--)
559  {
560  dd = NET.Delta[il+1][0];
561  for(in=0; in<NET.Nneur[il]-3; in+=4)
562  {
563  deriv1 = NET.Deriv1[il][in];
564  deriv2 = NET.Deriv1[il][in+1];
565  deriv3 = NET.Deriv1[il][in+2];
566  deriv4 = NET.Deriv1[il][in+3];
567  itest2 = (NET.Nneur[il+1]==1);
568  a1 = dd*weights[il+1][0][in+1];
569  a2 = dd*weights[il+1][0][in+2];
570  a3 = dd*weights[il+1][0][in+3];
571  a4 = dd*weights[il+1][0][in+4];
572  if(itest2) goto L1;
573  pdelta = &(NET.Delta[il+1][1]);
574  for(in1=1; in1<NET.Nneur[il+1];
575  in1++, pdelta++)
576  {
577  a1 += *pdelta * weights[il+1][in1][in+1];
578  a2 += *pdelta * weights[il+1][in1][in+2];
579  a3 += *pdelta * weights[il+1][in1][in+3];
580  a4 += *pdelta * weights[il+1][in1][in+4];
581  }
582 L1: NET.Delta[il][in] = a1*deriv1;
583  NET.Delta[il][in+1] = a2*deriv2;
584  NET.Delta[il][in+2] = a3*deriv3;
585  NET.Delta[il][in+3] = a4*deriv4;
586  }
587  for(/*'in' set above*/; in<NET.Nneur[il]; in++)
588  {
589  deriv = NET.Deriv1[il][in];
590  itest2 = (NET.Nneur[il+1]==1);
591  a = dd*weights[il+1][0][in+1];
592  if(itest2) goto L2;
593  pdelta = &(NET.Delta[il+1][1]);
594  for(in1=1; in1<NET.Nneur[il+1];
595  in1++, pdelta++)
596  {
597  a += *pdelta *
598  weights[il+1][in1][in+1];
599  }
600 L2: NET.Delta[il][in] = a*deriv;
601  }
602 
603  } /* end of loop on layers */
604 
605 
606 /* update the weights */
607  if(eps==0)
608  {
609  for(il=1; il<NET.Nlayer; il++)
610  {
611  inm1 = NET.Nneur[il-1];
612  for(in=0; in<NET.Nneur[il]-3; in+=4)
613  {
614  a1 = NET.Delta[il][in];
615  a2 = NET.Delta[il][in+1];
616  a3 = NET.Delta[il][in+2];
617  a4 = NET.Delta[il][in+3];
618  pout = &(NET.Outn[il-1][0]);
619  weights[il][in][0] += a1;
620  weights[il][in+1][0] += a2;
621  weights[il][in+2][0] += a3;
622  weights[il][in+3][0] += a4;
623  weights[il][in][1] += a1* (*pout);
624  weights[il][in+1][1] += a2* (*pout);
625  weights[il][in+2][1] += a3* (*pout);
626  weights[il][in+3][1] += a4* (*pout);
627  pout++;
628  pw1 = &(weights[il][in][2]);
629  pw2 = &(weights[il][in+1][2]);
630  pw3 = &(weights[il][in+2][2]);
631  pw4 = &(weights[il][in+3][2]);
632  for(in1=2; in1<=inm1;
633  ++in1, ++pout, ++pw1, ++pw2,
634  ++pw3, ++pw4)
635  {
636  *pw1 += a1 * *pout;
637  *pw2 += a2 * *pout;
638  *pw3 += a3 * *pout;
639  *pw4 += a4 * *pout;
640  }
641  }
642  for(/*'in' set above*/; in<NET.Nneur[il]; in++)
643  {
644  a1 = NET.Delta[il][in];
645  pout = &(NET.Outn[il-1][0]);
646  weights[il][in][0] += a1;
647  weights[il][in][1] += a1* (*pout);
648  pout++;
649  pw1 = &(weights[il][in][2]);
650  for(in1=2; in1<=inm1;
651  ++in1, ++pout, ++pw1)
652  {
653  *pw1 += a1 * *pout;
654  }
655  }
656  }
657  }
658  else
659  {
660  for(il=1; il<NET.Nlayer; il++)
661  {
662  for(in=0; in<NET.Nneur[il]; in++)
663  {
664 
665  a = NET.Delta[il][in];
666  LEARN.Odw[il][in][0] = a + eps * LEARN.Odw[il][in][0];
667  NET.Weights[il][in][0] += LEARN.Odw[il][in][0];
668 
669  b = a*NET.Outn[il-1][0];
670  LEARN.Odw[il][in][1] = b + eps*LEARN.Odw[il][in][1];
671  NET.Weights[il][in][1] += LEARN.Odw[il][in][1];
672 
673  for(in1=2; in1<=NET.Nneur[il-1]; in1++)
674  {
675  b = a*NET.Outn[il-1][in1-1];
676  LEARN.Odw[il][in][in1] = b + eps*LEARN.Odw[il][in][in1];
677  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
678  }
679  }
680  }
681  }
682 
683  } /* end of loop on examples */
684  return(err);
685 }
int * ExamplesIndex
Definition: mlp_gen.cc:40
void EtaDecay()
Definition: mlp_gen.cc:2086
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int ShuffleExamples(int n, int *index)
Definition: mlp_gen.cc:2102
ii
Definition: cuy.py:590
#define LEARN
Definition: mlp_gen.h:36
double b
Definition: hdecay.h:120
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:184
dbl MLP_Test ( int  ifile,
int  regul 
)

Definition at line 446 of file mlp_gen.cc.

References compare_using_db::ifile, recoMuon::in, LEARN, MLP_Out_T(), MLP_Test_MM(), NET, PAT, and tmp.

Referenced by DecreaseSearch(), LineSearch(), LineSearchHyb(), MLP_ResLin(), and SetLambda().

447 {
448  dbl err, rrans;
449  int in,jn,ipat,ipati;
450 
451  dbl *tmp;
452 
453  tmp = (dbl *) malloc(2 * NET.Nneur[1] * sizeof(dbl));
454  if(tmp == nullptr) /* not enough memory */
455  {
456  printf("not enough memory in MLP_Test\n");
457  err = 0;
458  for(ipat=0; ipat<PAT.Npat[ifile]; ipat++)
459  {
460  if(ifile==0)
461  {
462  ipati = ExamplesIndex[ipat];
463  }
464  else
465  {
466  ipati = ipat;
467  }
468  MLP_Out_T(PAT.Rin[ifile][ipati]);
469  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
470  {
471  rrans = (dbl) PAT.Rans[ifile][ipati][in];
472  err += (rrans-NET.Outn[NET.Nlayer-1][in])*
473  (rrans-NET.Outn[NET.Nlayer-1][in])*
474  PAT.Pond[ifile][ipati];
475  }
476  }
477 
478  if(regul>=1)
479  {
480  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
481  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
482  {
483  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
484  NET.Weights[NET.Nlayer-1][in][jn];
485  }
486  }
487  free(tmp);
488  return(err);
489  }
490  else /* computation using matrix - matrix multiply */
491  {
492  err = MLP_Test_MM(ifile, tmp);
493  if(regul>=1)
494  {
495  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
496  for(jn=0; jn<=NET.Nneur[NET.Nlayer-2]; jn++)
497  {
498  err += LEARN.Alambda*NET.Weights[NET.Nlayer-1][in][jn]*
499  NET.Weights[NET.Nlayer-1][in][jn];
500  }
501  }
502  free(tmp);
503  return(err);
504  }
505 }
dbl MLP_Test_MM(int ifile, dbl *tmp)
Definition: mlp_gen.cc:263
int * ExamplesIndex
Definition: mlp_gen.cc:40
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
void MLP_Out_T(type_pat *rrin)
Definition: mlp_gen.cc:113
#define LEARN
Definition: mlp_gen.h:36
std::vector< std::vector< double > > tmp
Definition: MVATrainer.cc:100
double dbl
Definition: mlp_gen.h:12
int MLP_Train ( int *  ipat,
dbl err 
)

Definition at line 895 of file mlp_gen.cc.

References DeDwSum(), recoMuon::in, MLP_Out2(), NET, and PAT.

Referenced by MLP_Epoch().

896 {
897  int in;
898 
899 /* if(*ipat>=PAT.Npat[0]) return(1);*/
900  if(*ipat<0) return(2);
901 
902 /* MLP_Out(PAT.Rin[0][*ipat],NET.Outn[NET.Nlayer-1]);*/
903  MLP_Out2(&(PAT.vRin[0][*ipat*(NET.Nneur[0]+1)]));
904  for(in=0; in<NET.Nneur[NET.Nlayer-1]; in++)
905  {
906  *err += ((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])
907  *((dbl) PAT.Rans[0][*ipat][in]-NET.Outn[NET.Nlayer-1][in])*
908  PAT.Pond[0][*ipat];
909  }
910  DeDwSum(PAT.Rans[0][*ipat],NET.Outn[NET.Nlayer-1],*ipat);
911  return(0);
912 }
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int DeDwSum(type_pat *ans, dbl *out, int ipat)
Definition: mlp_gen.cc:1123
double dbl
Definition: mlp_gen.h:12
void MLP_Out2(type_pat *rrin)
Definition: mlp_gen.cc:184
int NormalizeInputs ( )

Definition at line 3382 of file mlp_gen.cc.

References objects.autophobj::float, SiStripPI::mean, MLP_StatInputs(), NET, PAT, runEdmFileComparison::returnCode, stat_::sigma, and STAT.

3383 {
3384  int j, ipat;
3385  dbl *mean, *sigma, *minimum, *maximum;
3386 
3387 /* allocate memory */
3388  mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3389  sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3390  STAT.mean = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3391  STAT.sigma = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3392  minimum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3393  maximum = (dbl *) malloc(NET.Nneur[0]*sizeof(dbl));
3394  int returnCode = -111; // to return if any malloc failed
3395 
3396  if(mean && sigma && minimum && maximum && STAT.mean && STAT.sigma) {
3397 
3398  MLP_StatInputs(PAT.Npat[0],NET.Nneur[0],PAT.Rin[0],mean,sigma,minimum,maximum);
3399 
3400  if(NET.Debug>=1) printf("\t mean \t\t RMS \t\t min \t\t max\n");
3401  for(j=0;j<NET.Nneur[0];j++)
3402  {
3403  if(NET.Debug>=1)
3404  printf("var%d \t %e \t %e \t %e \t %e\n",j+1,
3405  mean[j],sigma[j],minimum[j],maximum[j]);
3406 
3407  /* store mean and sigma for output function */
3408  STAT.mean[j] = mean[j];
3409  STAT.sigma[j] = sigma[j];
3410 
3411  /* finally apply the normalization */
3412  for(ipat=0;ipat<PAT.Npat[0];ipat++)
3413  {
3414  PAT.Rin[0][ipat][j] =
3415  (PAT.Rin[0][ipat][j]-(float) mean[j])/
3416  (float) sigma[j];
3417  }
3418  for(ipat=0;ipat<PAT.Npat[1];ipat++)
3419  {
3420  PAT.Rin[1][ipat][j] =
3421  (PAT.Rin[1][ipat][j]-(float) mean[j])/
3422  (float) sigma[j];
3423  }
3424  }
3425  returnCode = 0; // everything went fine
3426  }
3427 
3428  free(mean);
3429  free(sigma);
3430  free(minimum);
3431  free(maximum);
3432  if(NET.Debug>=1) printf("\n");
3433  return returnCode;
3434 }
#define STAT
Definition: mlp_gen.h:60
#define PAT
Definition: mlp_gen.h:45
#define NET
Definition: mlp_gen.h:25
int MLP_StatInputs(int Nexamples, int Ninputs, type_pat **inputs, dbl *mean, dbl *sigma, dbl *minimum, dbl *maximum)
Definition: mlp_gen.cc:3278
double dbl
Definition: mlp_gen.h:12
void PrintWeights ( )

Definition at line 2168 of file mlp_gen.cc.

References mps_fire::i, and NET.

Referenced by MLP_Epoch().

2169 {
2170  int ilayer,ineur,i;
2171 
2172  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2173  {
2174  if(MessLang==1)
2175  {
2176  printf("Couche %d\n",ilayer);
2177  }
2178  else
2179  {
2180  printf("Layer %d\n",ilayer);
2181  }
2182  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2183  {
2184  if(MessLang==1)
2185  {
2186  printf("Neurone %d",ineur);
2187  }
2188  else
2189  {
2190  printf("Neuron %d",ineur);
2191  }
2192  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2193  {
2194  printf(" %f",
2195  (double) NET.Weights[ilayer][ineur][i]);
2196  }
2197  printf("\n");
2198  }
2199  printf("\n");
2200  }
2201 }
int MessLang
Definition: mlp_gen.cc:22
#define NET
Definition: mlp_gen.h:25
int ReadPatterns ( char *  filename,
int  ifile,
int *  inet,
int *  ilearn,
int *  iexamples 
)

Definition at line 2229 of file mlp_gen.cc.

References AllocPatterns(), CLEN, CountLexemes(), DIVERS, GetNetStructure(), getnLexemes(), mps_fire::i, compare_using_db::ifile, checklumidiff::l, LEARN, mps_splice::line, MLP_SetNet(), NET, nin, NLMAX, nout, np, AlCaHLTBitMon_ParallelJobs::p, PAT, ReadPatterns(), alignCSCRings::s, and indexGen::s2.

Referenced by ReadPatterns().

2231 {
2232 char s[CLEN], s2[CLEN], cc[6], cc2[6];
2233 char otherfile[CLEN];
2234 double p;
2235 //int line,i,j;
2236 int line,i;
2237 //int l,ll,ipat,nmax,il,in,tf;
2238 int l,ll,ipat,nmax;
2239 int np=0; /* nombre d'exemples */
2240 int nin=0; /* nombre d'entrees */
2241 int nout=0; /* nombre de sorties */
2242 int npon=0;
2243 int ntot, ierr;
2244 //char **ss;
2245 char **ss=nullptr;
2246 FILE *LVQpat;
2247 int nlayer, nneur[NLMAX];
2248 
2249 printf("\nLoading file %s\n",filename);
2250 LVQpat=fopen(filename,"r");
2251 if(LVQpat == nullptr) return -1;
2252 
2253 line=0;
2254 
2255 while(fgets(s,CLEN,LVQpat))
2256  {
2257  if(*s=='N')
2258  {
2259  if(*(s+1)=='N') /* NNEU */
2260  {
2261  printf("Number of neurons %s",s);
2262  *inet = 1;
2263  sscanf(s,"%s %s",cc,s2);
2264  ierr = GetNetStructure(s2,&nlayer,nneur);
2265  if(ierr != 0) return ierr;
2266  ierr = MLP_SetNet(&nlayer,nneur);
2267  if(ierr != 0) return ierr;
2268  }
2269  else
2270  {
2271  sscanf(s,"%s %d",cc,&l);
2272  if(*(cc+1)=='P') /* NPAT */
2273  {
2274  np=l;
2275  printf("Number of patterns %d\n",np);
2276  }
2277  else if(*(cc+1)=='I') /* NINP */
2278  {
2279  nin=l;
2280  PAT.Nin = nin;
2281  printf("Number of inputs %d\n",nin);
2282  }
2283  else if(*(cc+1)=='O' && *(cc+2)=='U') /* NOUT */
2284  {
2285  nout=l;
2286  PAT.Nout = nout;
2287  printf("Number of outputs %d\n",nout);
2288  }
2289  else if(*(cc+1)=='O' && *(cc+2)=='R') /* NORM */
2290  {
2291  DIVERS.Norm=l;
2292  if(l==1) printf("Normalize inputs\n");
2293  }
2294 /* obsolete datacard NLAY */
2295  else if(*(cc+1)=='L')
2296  {
2297  printf("NLAY datacard is no longer needed\n");
2298  }
2299  else if(*(cc+1)=='E') /* NEPO */
2300  {
2301  LEARN.Nepoch=l;
2302  printf("Number of epochs %d\n",l);
2303  }
2304  else if(*(cc+1)=='R') /* NRES */
2305  {
2306  LEARN.Nreset=l;
2307  printf(
2308  "Reset to steepest descent every %d epochs\n",
2309  l);
2310  }
2311  }
2312  }
2313  else if(*s=='L')
2314  {
2315  if(*(s+1)=='P') /* LPAR */
2316  {
2317  sscanf(s,"%s %le",cc,&p);
2318  printf("Learning parameter %f\n",p);
2319  LEARN.eta = (dbl) p;
2320  }
2321  else if(*(s+1)=='M') /* LMET */
2322  {
2323  *ilearn = 1;
2324  sscanf(s,"%s %d",cc,&(LEARN.Meth));
2325  printf("Learning method = ");
2326  switch(LEARN.Meth)
2327  {
2328  case 1: printf("Stochastic Minimization\n");
2329  break;
2330  case 2: printf("Steepest descent with fixed step\n");
2331  break;
2332  case 3: printf("Steepest descent with line search\n"); break;
2333  case 4: printf("Polak-Ribiere Conjugate Gradients\n"); break;
2334  case 5: printf("Fletcher-Reeves Conjugate Gradients\n");
2335  break;
2336  case 6: printf("BFGS\n");
2337  break;
2338  case 7: printf("Hybrid BFGS-linear\n");
2339  break;
2340  default: printf("Error: unknown method\n"); break;
2341  }
2342 
2343  }
2344  else if(*(s+1)=='T') /* LTAU */
2345  {
2346  sscanf(s,"%s %lf",cc,&p);
2347  printf("Tau %f\n",p);
2348  LEARN.Tau = (dbl) p;
2349  }
2350  else if(*(s+1)=='A') /* LAMB */
2351  {
2352  sscanf(s,"%s %lf",cc,&p);
2353  printf("Lambda %f\n",p);
2354  LEARN.Lambda = (dbl) p;
2355  }
2356  }
2357  else if(*s=='F')
2358  {
2359  if(*(s+1)=='S') /* FSPO */
2360  {
2361  sscanf(s,"%s %le",cc,&p);
2362  printf("Flat spot elimination parameter %f\n",p);
2363  LEARN.delta = (dbl) p;
2364  }
2365  else if(*(s+1)=='I') /* FILE */
2366  {
2367  sscanf(s,"%s %s",cc,otherfile);
2368  ierr = ReadPatterns(otherfile,ifile, inet, ilearn, iexamples);
2369  if(ierr != 0) return ierr;
2370  }
2371  }
2372  else if(*s=='M') /* momentum */
2373  {
2374  sscanf(s,"%s %le",cc,&p);
2375  printf("Momentum term %f\n",p);
2376  LEARN.epsilon = (dbl) p;
2377  }
2378  else if(*s=='O') /* OUTx */
2379  {
2380  if(*(s+3)=='W') /* OUTW */
2381  {
2382  sscanf(s,"%s %d",cc,&OutputWeights);
2383  if(OutputWeights == 0)
2384  {
2385  printf("Never write file weights.out\n");
2386  }
2387  else if(OutputWeights == -1)
2388  {
2389  printf("Write weights to output file at the end\n");
2390  }
2391  else
2392  {
2393  printf("Write weights to file every %d epochs\n",
2394  OutputWeights);
2395  }
2396  }
2397  else if(*(s+3)=='F') /* OUTF */
2398  {
2399  sscanf(s,"%s %s",cc,cc2);
2400  if(*cc2=='F' || *cc2=='C')
2401  {
2402  DIVERS.Outf = *cc2;
2403  }
2404  else
2405  {
2406  printf(" *** Error while loading file %s at line %s :",
2407  filename,s);
2408  printf(" unknown language\n");
2409  }
2410  }
2411  else
2412  {
2413  printf(" *** Error while loading file %s at line %s\n",
2414  filename,s);
2415  }
2416  }
2417  else if(*s=='R') /* RDWT */
2418  {
2419  sscanf(s,"%s %d",cc,&(NET.Rdwt));
2420  if(NET.Rdwt == 0)
2421  {
2422  printf("Random weights \n");
2423  }
2424  else
2425  {
2426  printf("Read weights from file weights.in\n");
2427  }
2428  }
2429  else if(*s=='S') /* STAT */
2430  {
2431  sscanf(s,"%s %d",cc,&(DIVERS.Stat));
2432  }
2433 /* else if(*s=='T') TFUN
2434  {
2435  sscanf(s,"%s %d %d %d",cc,&il,&in,&tf);
2436  SetTransFunc(il,in,tf);
2437  } */
2438  else if(*s=='H') /* HESS */
2439  {
2440  sscanf(s,"%s %d",cc,&(DIVERS.Ihess));
2441  }
2442  else if(*s=='D')
2443  {
2444  if(*(s+1)=='C') /* DCAY */
2445  {
2446  sscanf(s,"%s %le",cc,&p);
2447  LEARN.Decay = p;
2448  printf("Learning parameter decay %f\n",
2449  (double) LEARN.Decay);
2450  }
2451  if(*(s+1)=='B') /* DBIN */
2452  {
2453  sscanf(s,"%s %d",cc,&(DIVERS.Dbin));
2454  printf("Fill histogram every %d epochs\n",DIVERS.Dbin);
2455  }
2456  if(*(s+1)=='E') /* DEBU */
2457  {
2458  sscanf(s,"%s %d",cc,&(NET.Debug));
2459  printf("Debug mode %d\n",NET.Debug);
2460  }
2461  }
2462  else if(*s=='P') /* POND */
2463  {
2464  npon = CountLexemes(s);
2465  if(npon==2)
2466  {
2467  sscanf(s,"%s %d",cc,&(PAT.Iponde));
2468  }
2469  else
2470  {
2471  ss = (char**) malloc((npon+1)*sizeof(char*));
2472  for(i=0;i<=npon;i++)
2473  ss[i]=(char*) malloc(40*sizeof(char));
2474  getnLexemes(npon,s,ss);
2475  sscanf(ss[1],"%d",&(PAT.Iponde));
2476  for(i=2;i<npon;i++)
2477  {
2478  sscanf(ss[i],"%le",&(PAT.Ponds[i-2]));
2479  }
2480  }
2481  if(PAT.Iponde==0)
2482  {
2483  npon = 0;
2484  }
2485  else
2486  {
2487  npon = 1;
2488  }
2489  }
2490  else if(*s=='#') /* comments */
2491  {
2492  }
2493  else /* exemple itself */
2494  {
2495  if(np==0) return 1;
2496  if(nin==0) return 2;
2497  if(nout==0) return 3;
2498 
2499 
2500 /* store number of exemples and allocate memory*/
2501  if(line==0)
2502  {
2503  PAT.Npat[ifile] = np;
2504  ierr = AllocPatterns(ifile,np,nin,nout,0);
2505  if(ierr != 0) return ierr;
2506  *iexamples = 1;
2507  }
2508 
2509 /* now get exemple */
2510 
2511  line++;
2512  ll = (line-1)%2;
2513  ipat = (line-1)/2;
2514  /* printf("Loading event \t %d\r",ipat);*/
2515 /* if(ipat>NPMAX)
2516  {
2517  printf("Too many examples in file\n");
2518  printf("Loading %d examples\n",NPMAX);
2519  PAT.Npat[ifile] = NPMAX;
2520  break;
2521  }
2522 */
2523 
2524 /* allocate the number of lines */
2525 
2526  if(line==1)
2527  {
2528 
2529  nmax = nin;
2530  if(nout>nin) nmax=nout;
2531  ss = (char**) malloc((nmax+1)*sizeof(char*));
2532  if(ss == nullptr) return -111;
2533  for(i=0;i<=nmax;i++)
2534  {
2535  ss[i]=(char*) malloc(40*sizeof(char));
2536  if(ss[i] == nullptr) return -111;
2537  }
2538  }
2539 
2540  if(ll==0) /* inputs */
2541  {
2542  getnLexemes(nin,s,ss);
2543  for(i=0;i<nin;i++)
2544  {
2545  sscanf(ss[i],"%le",&p);
2546  PAT.Rin[ifile][ipat][i] = (type_pat) p;
2547  }
2548  }
2549  else /* answers */
2550  {
2551  ntot=nout+npon;
2552  getnLexemes(ntot,s,ss);
2553  for(i=0;i<ntot;i++)
2554  {
2555  sscanf(ss[i],"%le",&p);
2556  if(i<nout)
2557  {
2558  PAT.Rans[ifile][ipat][i] = (type_pat) p;
2559  }
2560  else
2561  {
2562  if(PAT.Iponde==1)
2563  {
2564  PAT.Pond[ifile][ipat] =
2565  (type_pat) p;
2566  }
2567  else
2568  {
2569  PAT.Pond[ifile][ipat] =
2570  (type_pat) PAT.Ponds[(int) p -1];
2571  }
2572  }
2573  }
2574  }
2575  }
2576  }
2577  printf("%d examples loaded \n\n",PAT.Npat[ifile]);
2578  fclose(LVQpat);
2579  return 0;
2580 }
void getnLexemes(int n, char *s, char **ss)
Definition: mlp_gen.cc:2602
int ReadPatterns(char *filename, int ifile, int *inet, int *ilearn, int *iexamples)
Definition: mlp_gen.cc:2229
int OutputWeights
Definition: mlp_gen.cc:23
#define NLMAX
Definition: mlp_gen.cc:14
int MLP_SetNet(int *nl, int *nn)
Definition: mlp_gen.cc:3642
#define PAT
Definition: mlp_gen.h:45
int nin
#define NET
Definition: mlp_gen.h:25
int np
Definition: AMPTWrapper.h:33
int GetNetStructure(char *s, int *Nlayer, int *Nneur)
Definition: mlp_gen.cc:3594
double type_pat
Definition: mlp_gen.h:13
int CountLexemes(char *s)
Definition: mlp_gen.cc:2585
int AllocPatterns(int ifile, int npat, int nin, int nout, int iadd)
Definition: mlp_gen.cc:3080
#define DIVERS
Definition: mlp_gen.h:54
#define LEARN
Definition: mlp_gen.h:36
int nout
double dbl
Definition: mlp_gen.h:12
#define CLEN
Definition: mlp_gen.cc:2226
int SaveWeights ( char *  filename,
int  iepoch 
)

Definition at line 2975 of file mlp_gen.cc.

References mps_fire::i, and NET.

Referenced by PhysicsTools::MLP::save().

2976 {
2977  FILE *W;
2978  int ilayer,ineur,i;
2979 
2980  W=fopen(filename,"w");
2981  if(W==nullptr) return -1;
2982 
2983  fprintf(W,"# network structure ");
2984  for(ilayer=0; ilayer<NET.Nlayer; ilayer++)
2985  {
2986  fprintf(W,"%d ",NET.Nneur[ilayer]);
2987  }
2988 
2989  fprintf(W,"\n %d\n",iepoch);
2990  for(ilayer=1; ilayer<NET.Nlayer; ilayer++)
2991  {
2992  for(ineur=0; ineur<NET.Nneur[ilayer]; ineur++)
2993  {
2994  for(i=0; i<=NET.Nneur[ilayer-1]; i++)
2995  {
2996  fprintf(W," %1.15e\n",
2997  (double) NET.Weights[ilayer][ineur][i]);
2998  }
2999  }
3000  }
3001  fclose(W);
3002  return 0;
3003 }
#define NET
Definition: mlp_gen.h:25
void SetDefaultFuncs ( )

Definition at line 1223 of file mlp_gen.cc.

References recoMuon::in, and NET.

Referenced by MLP_SetNet().

1224 {
1225  int il,in;
1226  for(il=0; il<NET.Nlayer; il++) {
1227  for(in=0; in<NET.Nneur[il]; in++) {
1228  NET.T_func[il][in] = 2;
1229  if(il==NET.Nlayer-1) NET.T_func[il][in] = 1;
1230  }
1231  }
1232 
1233 }
#define NET
Definition: mlp_gen.h:25
void SetLambda ( double  Wmax)

Definition at line 1949 of file mlp_gen.cc.

References LEARN, MLP_Test(), and NET.

Referenced by MLP_Epoch().

1950 {
1951  dbl err;
1952  err = MLP_Test(0,0);
1953  LEARN.Alambda =
1954  LEARN.Lambda*err/(Wmax*Wmax*(dbl)(NET.Nneur[NET.Nlayer-2]+1));
1955 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
dbl MLP_Test(int ifile, int regul)
Definition: mlp_gen.cc:446
int SetTransFunc ( int  layer,
int  neuron,
int  func 
)

Definition at line 1200 of file mlp_gen.cc.

References patCandidates_cff::func, NET, and NLMAX.

1202 {
1203  if(layer>NLMAX) return(1);
1204 /* if(neuron>NMAX) return(2);*/
1205 
1206  NET.T_func[layer-1][neuron-1] = func;
1207 
1208  return(0);
1209 }
#define NLMAX
Definition: mlp_gen.cc:14
#define NET
Definition: mlp_gen.h:25
int ShuffleExamples ( int  n,
int *  index 
)

Definition at line 2102 of file mlp_gen.cc.

References a, mps_fire::i, cuy::ii, createfilelist::int, MLP_Rand(), and gen::n.

Referenced by MLP_Epoch(), and MLP_Stochastic().

2103 {
2104  int i,ii,itmp;
2105  dbl a = (dbl) (n-1);
2106 
2107  for(i=0;i<n;i++)
2108  {
2109  ii = (int) MLP_Rand(0.,a);
2110  itmp = index[ii];
2111  index[ii] = index[i];
2112  index[i] = itmp;
2113  }
2114  return 0;
2115 }
double MLP_Rand(dbl mini, dbl maxi)
Definition: mlp_gen.cc:2131
ii
Definition: cuy.py:590
double a
Definition: hdecay.h:121
double dbl
Definition: mlp_gen.h:12
void SteepestDir ( )

Definition at line 1244 of file mlp_gen.cc.

References recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

1245 {
1246  int il,in,jn;
1247  for(il=1; il<NET.Nlayer; il++)
1248  for(in=0; in<NET.Nneur[il]; in++)
1249  for(jn=0; jn<=NET.Nneur[il-1]; jn++)
1250  dir[il][in][jn] = -LEARN.DeDw[il][in][jn];
1251 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
dbl *** dir
Definition: mlp_gen.cc:35
int StochStep ( )

Definition at line 965 of file mlp_gen.cc.

References PVValHelper::eta, recoMuon::in, LEARN, and NET.

Referenced by MLP_Epoch().

966 {
967  int il, in1, in;
968  dbl eta, eps, epseta;
969 
970  eta = -LEARN.eta;
971  eps = LEARN.epsilon;
972  epseta = eps/eta;
973  for(il=NET.Nlayer-1; il>0; il--) {
974  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
975 
976  /* compute delta weights */
977  for(in=0; in<NET.Nneur[il]; in++) {
978  LEARN.Odw[il][in][in1] = eta * (LEARN.DeDw[il][in][in1]
979  + epseta * LEARN.Odw[il][in][in1]);
980  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
981  }
982 
983  }
984  }
985 
986  return(0);
987 }
#define NET
Definition: mlp_gen.h:25
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12
int StochStepHyb ( )

Definition at line 926 of file mlp_gen.cc.

References PVValHelper::eta, recoMuon::in, LEARN, MLP_ResLin(), and NET.

927 {
928  int il, in1, in;
929  dbl eta, eps;
930 
931  eta = LEARN.eta;
932  eps = LEARN.epsilon;
933  for(il=NET.Nlayer-2; il>0; il--) {
934 
935  for(in=0; in<NET.Nneur[il]; in++) {
936 
937  /* compute delta weights */
938  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
939  LEARN.Odw[il][in][in1] = -eta * LEARN.DeDw[il][in][in1]
940  + eps * LEARN.Odw[il][in][in1];
941  }
942 
943  /* update weights */
944  for(in1=0; in1<=NET.Nneur[il-1]; in1++) {
945  NET.Weights[il][in][in1] += LEARN.Odw[il][in][in1];
946  }
947  }
948  }
949  MLP_ResLin();
950  return(0);
951 }
#define NET
Definition: mlp_gen.h:25
void MLP_ResLin()
Definition: mlp_gen.cc:1971
#define LEARN
Definition: mlp_gen.h:36
double dbl
Definition: mlp_gen.h:12

Variable Documentation

dbl** Hessian MLP_HIDDEN

Definition at line 62 of file mlp_gen.h.