CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
ProcMLP.cc
Go to the documentation of this file.
1 #include <algorithm>
2 #include <iostream>
3 #include <sstream>
4 #include <fstream>
5 #include <cstddef>
6 #include <cstring>
7 #include <vector>
8 #include <memory>
9 #include <cmath>
10 
11 #include <TRandom.h>
12 
13 #include <xercesc/dom/DOM.hpp>
14 
16 
18 
24 
25 #include "MLP.h"
26 
27 XERCES_CPP_NAMESPACE_USE
28 
29 using namespace PhysicsTools;
30 
31 namespace { // anonymous
32 
33 class ProcMLP : public TrainProcessor {
34  public:
36 
37  ProcMLP(const char *name, const AtomicId *id,
38  MVATrainer *trainer);
39  virtual ~ProcMLP();
40 
41  virtual void configure(DOMElement *elem) override;
42  virtual Calibration::VarProcessor *getCalibration() const override;
43 
44  virtual void trainBegin() override;
45  virtual void trainData(const std::vector<double> *values,
46  bool target, double weight) override;
47  virtual void trainEnd() override;
48 
49  virtual bool load() override;
50  virtual void cleanup() override;
51 
52  private:
53  void runMLPTrainer();
54 
55  enum Iteration {
56  ITER_COUNT,
57  ITER_TRAIN,
58  ITER_WAIT,
59  ITER_DONE
60  } iteration;
61 
63  unsigned int steps;
64  unsigned int count, row;
65  double weightSum;
66  std::auto_ptr<MLP> mlp;
67  std::vector<double> vars;
68  std::vector<double> targets;
69  bool needCleanup;
70  int boost;
71  TRandom rand;
72  double limiter;
73 };
74 
75 static ProcMLP::Registry registry("ProcMLP");
76 
77 ProcMLP::ProcMLP(const char *name, const AtomicId *id,
78  MVATrainer *trainer) :
79  TrainProcessor(name, id, trainer),
80  iteration(ITER_COUNT),
81  count(0),
82  weightSum(0.0),
83  needCleanup(false),
84  boost(-1),
85  limiter(0.0)
86 {
87 }
88 
89 ProcMLP::~ProcMLP()
90 {
91 }
92 
93 void ProcMLP::configure(DOMElement *elem)
94 {
95  DOMNode *node = elem->getFirstChild();
96  while(node && node->getNodeType() != DOMNode::ELEMENT_NODE)
97  node = node->getNextSibling();
98 
99  if (!node)
100  throw cms::Exception("ProcMLP")
101  << "Expected MLP config in config section."
102  << std::endl;
103 
104  if (std::strcmp(XMLSimpleStr(node->getNodeName()), "config") != 0)
105  throw cms::Exception("ProcMLP")
106  << "Expected config tag in config section."
107  << std::endl;
108 
109  elem = static_cast<DOMElement*>(node);
110 
111  boost = XMLDocument::readAttribute<int>(elem, "boost", -1);
112  limiter = XMLDocument::readAttribute<double>(elem, "limiter", 0);
113  steps = XMLDocument::readAttribute<unsigned int>(elem, "steps");
114 
115  layout = (const char*)XMLSimpleStr(node->getTextContent());
116 
117  node = node->getNextSibling();
118  while(node && node->getNodeType() != DOMNode::ELEMENT_NODE)
119  node = node->getNextSibling();
120 
121  if (node)
122  throw cms::Exception("ProcMLP")
123  << "Superfluous tags in config section."
124  << std::endl;
125 
126  vars.resize(getInputs().size() - (boost >= 0 ? 1 : 0));
127  targets.resize(getOutputs().size());
128 }
129 
130 bool ProcMLP::load()
131 {
132  bool ok = false;
133  /* test for weights file */ {
134  std::ifstream in(trainer->trainFileName(this, "txt").c_str());
135  ok = in.good();
136  }
137 
138  if (!ok)
139  return false;
140 
141 // mlp->load(trainer->trainFileName(this, "txt"));
142  iteration = ITER_DONE;
143  trained = true;
144  return true;
145 }
146 
147 Calibration::VarProcessor *ProcMLP::getCalibration() const
148 {
150 
151  std::auto_ptr<MLP> mlp(new MLP(getInputs().size() - (boost >= 0 ? 1 : 0),
152  getOutputs().size(), layout));
153  mlp->load(trainer->trainFileName(this, "txt"));
154 
155  std::string fileName = trainer->trainFileName(this, "txt");
156  std::ifstream in(fileName.c_str(), std::ios::binary | std::ios::in);
157  if (!in.good())
158  throw cms::Exception("ProcMLP")
159  << "Weights file " << fileName
160  << "cannot be opened for reading." << std::endl;
161 
162  char linebuf[128];
163  linebuf[127] = 0;
164  do
165  in.getline(linebuf, 127);
166  while(linebuf[0] == '#');
167 
168  int layers = mlp->getLayers();
169  const int *neurons = mlp->getLayout();
170 
171  for(int layer = 1; layer < layers; layer++) {
172  Calibration::ProcMLP::Layer layerConf;
173 
174  for(int i = 0; i < neurons[layer]; i++) {
176 
177  for(int j = 0; j <= neurons[layer - 1]; j++) {
178  in.getline(linebuf, 127);
179  std::istringstream ss(linebuf);
180  double weight;
181  ss >> weight;
182 
183  if (j == 0)
184  neuron.first = weight;
185  else
186  neuron.second.push_back(weight);
187  }
188  layerConf.first.push_back(neuron);
189  }
190  layerConf.second = layer < layers - 1;
191 
192  calib->layers.push_back(layerConf);
193  }
194 
195  in.close();
196 
197  return calib;
198 }
199 
200 void ProcMLP::trainBegin()
201 {
202  rand.SetSeed(65539);
203  switch(iteration) {
204  case ITER_COUNT:
205  count = 0;
206  weightSum = 0.0;
207  break;
208  case ITER_TRAIN:
209  try {
210  mlp = std::auto_ptr<MLP>(
211  new MLP(getInputs().size() - (boost >= 0 ? 1 : 0),
212  getOutputs().size(), layout));
213  mlp->init(count);
214  row = 0;
215  } catch(cms::Exception e) {
216  // MLP probably busy (or layout invalid, aaaack)
217  iteration = ITER_WAIT;
218  }
219  break;
220  default:
221  /* shut up */;
222  }
223 }
224 
225 void ProcMLP::trainData(const std::vector<double> *values,
226  bool target, double weight)
227 {
228  if (boost >= 0) {
229  double x = values[boost][0];
230  if (target)
231  weight *= 1.0 + 0.02 * std::exp(5.0 * (1.0 - x));
232  else
233  weight *= 1.0 + 0.1 * std::exp(5.0 * x);
234  }
235 
236  if (weight < limiter) {
237  if (rand.Uniform(limiter) > weight)
238  return;
239  weight = limiter;
240  }
241 
242  if (iteration == ITER_COUNT)
243  count++;
244  weightSum += weight;
245 
246  if (iteration != ITER_TRAIN)
247  return;
248 
249  for(unsigned int i = 0; i < vars.size(); i++, values++) {
250  if ((int)i == boost)
251  values++;
252  vars[i] = values->front();
253  }
254 
255  for(unsigned int i = 0; i < targets.size(); i++)
256  targets[i] = target;
257 
258  mlp->set(row++, &vars.front(), &targets.front(), weight);
259 }
260 
261 void ProcMLP::runMLPTrainer()
262 {
263  for(unsigned int i = 0; i < steps; i++) {
264  double error = mlp->train();
265  if ((i % 10) == 0)
266  std::cout << "Training MLP epoch " << mlp->getEpoch()
267  << ", rel chi^2: " << (error / weightSum)
268  << std::endl;
269  }
270 }
271 
272 void ProcMLP::trainEnd()
273 {
274  switch(iteration) {
275  case ITER_COUNT:
276  case ITER_WAIT:
277  iteration = ITER_TRAIN;
278  std::cout << "Training with " << count << " events. "
279  "(weighted " << weightSum << ")" << std::endl;
280  break;
281  case ITER_TRAIN:
282  runMLPTrainer();
283  mlp->save(trainer->trainFileName(this, "txt"));
284  mlp->clear();
285  mlp.reset();
286  needCleanup = true;
287  iteration = ITER_DONE;
288  trained = true;
289  break;
290  default:
291  /* shut up */;
292  }
293 }
294 
295 void ProcMLP::cleanup()
296 {
297  if (!needCleanup)
298  return;
299 
300  std::remove(trainer->trainFileName(this, "txt").c_str());
301 }
302 
304 
305 } // anonymous namespace
int i
Definition: DBlmapReader.cc:9
std::vector< LayerSetAndLayers > layers(const SeedingLayerSetsHits &sets)
Definition: LayerTriplets.cc:4
static void cleanup(const Factory::MakerMap::value_type &v)
Definition: Factory.cc:12
template to generate a registry singleton for a type.
std::pair< std::vector< Neuron >, bool > Layer
Definition: MVAComputer.h:209
tuple node
Definition: Node.py:50
Cheap generic unique keyword identifier class.
Definition: AtomicId.h:31
T x() const
Cartesian x coordinate.
MVATrainerComputer * calib
Definition: MVATrainer.cc:64
std::pair< double, std::vector< double > > Neuron
Definition: MVAComputer.h:208
tuple iteration
Definition: align_cfg.py:5
def load
Definition: svgfig.py:546
int j
Definition: DBlmapReader.cc:9
Signal rand(Signal arg)
Definition: vlib.cc:442
tuple cout
Definition: gather_cfg.py:121
volatile std::atomic< bool > shutdown_flag false
static Interceptor::Registry registry("Interceptor")
tuple size
Write out results.
#define MVA_TRAINER_DEFINE_PLUGIN(T)