28 using namespace PhysicsTools;
37 ProcMLP(
const char *
name,
42 virtual void configure(ConfIterator iter,
unsigned int n)
override;
43 virtual void eval(ValueIterator iter,
unsigned int n)
const override;
44 virtual std::vector<double> deriv(
45 ValueIterator iter,
unsigned int n)
const override;
52 coeffs(orig.coeffs), sigmoid(orig.sigmoid) {}
56 std::vector<double> coeffs;
73 coeffs.resize(neurons * (
inputs + 1));
74 std::vector<double>::iterator inserter = coeffs.begin();
76 for(std::vector<Neuron>::const_iterator iter = calib.first.begin();
77 iter != calib.first.end(); iter++) {
78 *inserter++ = iter->first;
80 if (iter->second.size() !=
inputs)
82 <<
"ProcMLP neuron layer inconsistent."
85 inserter =
std::copy(iter->second.begin(), iter->second.end(),
90 ProcMLP::ProcMLP(
const char *
name,
97 std::back_inserter(
layers));
99 for(
unsigned int i = 0;
i <
layers.size();
i++) {
100 maxTmp = std::max<unsigned int>(maxTmp,
layers[
i].neurons);
103 <<
"ProcMLP neuron layers do not connect "
104 "properly." << std::endl;
108 void ProcMLP::configure(ConfIterator iter,
unsigned int n)
110 if (n !=
layers.front().inputs)
113 for(
unsigned int i = 0;
i <
n;
i++)
114 iter++(Variable::FLAG_NONE);
116 for(
unsigned int i = 0;
i <
layers.back().neurons;
i++)
117 iter << Variable::FLAG_NONE;
120 void ProcMLP::eval(ValueIterator iter,
unsigned int n)
const
122 double *
tmp = (
double*)alloca(2 * maxTmp *
sizeof(
double));
125 for(
double *pos = tmp; iter; iter++, pos++)
129 for(std::vector<Layer>::const_iterator layer =
layers.begin();
130 layer !=
layers.end(); layer++, flip = !flip) {
131 const double *
input = &tmp[flip ? maxTmp : 0];
132 output = &tmp[flip ? 0 : maxTmp];
133 std::vector<double>::const_iterator coeff =
134 layer->coeffs.begin();
135 for(
unsigned int i = 0;
i < layer->neurons;
i++) {
136 double sum = *coeff++;
137 for(
unsigned int j = 0;
j < layer->inputs;
j++)
138 sum += input[
j] * *coeff++;
145 for(
const double *pos = &tmp[flip ? maxTmp : 0]; pos <
output; pos++)
149 std::vector<double> ProcMLP::deriv(ValueIterator iter,
unsigned int n)
const
151 std::vector<double> prevValues, nextValues;
152 std::vector<double> prevMatrix, nextMatrix;
155 nextValues.push_back(*iter++);
157 unsigned int size = nextValues.size();
158 nextMatrix.resize(size * size);
159 for(
unsigned int i = 0;
i <
size;
i++)
160 nextMatrix[
i * size +
i] = 1.;
162 for(std::vector<Layer>::const_iterator layer =
layers.begin();
163 layer !=
layers.end(); layer++) {
169 std::vector<double>::const_iterator coeff =
170 layer->coeffs.begin();
171 for(
unsigned int i = 0;
i < layer->neurons;
i++) {
172 double sum = *coeff++;
173 for(
unsigned int j = 0;
j < layer->inputs;
j++)
174 sum += prevValues[
j] * *coeff++;
177 if (layer->sigmoid) {
179 sum = 1.0 / (e + 1.0);
180 deriv = 1.0 / (e + 1.0/e + 2.0);
184 nextValues.push_back(sum);
186 for(
unsigned int k = 0;
k <
size;
k++) {
188 coeff -= layer->inputs;
189 for(
unsigned int j = 0;
j < layer->inputs;
j++)
190 sum += prevMatrix[
j * size +
k] *
192 nextMatrix.push_back(sum * deriv);
std::vector< LayerSetAndLayers > layers(const SeedingLayerSetsHits &sets)
static std::string const input
SeedingLayerSetsHits::SeedingLayer Layer
U second(std::pair< T, U > const &p)
MVATrainerComputer * calib
void swap(edm::DataFrameContainer &lhs, edm::DataFrameContainer &rhs)
std::vector< std::vector< double > > tmp
tuple size
Write out results.