28 using namespace PhysicsTools;
37 ~ProcMLP()
override {}
39 void configure(ConfIterator iter,
unsigned int n)
override;
40 void eval(ValueIterator iter,
unsigned int n)
const override;
41 std::vector<double> deriv(ValueIterator iter,
unsigned int n)
const override;
47 :
inputs(orig.
inputs), neurons(orig.neurons), coeffs(orig.coeffs), sigmoid(orig.sigmoid) {}
51 std::vector<double> coeffs;
65 coeffs.resize(neurons * (
inputs + 1));
66 std::vector<double>::iterator inserter = coeffs.begin();
68 for (std::vector<Neuron>::const_iterator iter = calib.first.begin(); iter != calib.first.end(); iter++) {
69 *inserter++ = iter->first;
71 if (iter->second.size() !=
inputs)
72 throw cms::Exception(
"ProcMLPInput") <<
"ProcMLP neuron layer inconsistent." << std::endl;
74 inserter =
std::copy(iter->second.begin(), iter->second.end(), inserter);
82 for (
unsigned int i = 0;
i <
layers.size();
i++) {
83 maxTmp = std::max<unsigned int>(maxTmp,
layers[
i].neurons);
85 throw cms::Exception(
"ProcMLPInput") <<
"ProcMLP neuron layers do not connect "
91 void ProcMLP::configure(ConfIterator iter,
unsigned int n) {
92 if (n !=
layers.front().inputs)
95 for (
unsigned int i = 0;
i <
n;
i++)
96 iter++(Variable::FLAG_NONE);
98 for (
unsigned int i = 0;
i <
layers.back().neurons;
i++)
99 iter << Variable::FLAG_NONE;
102 void ProcMLP::eval(ValueIterator iter,
unsigned int n)
const {
103 double *
tmp = (
double *)alloca(2 * maxTmp *
sizeof(
double));
106 for (
double *pos = tmp; iter; iter++, pos++)
111 const double *
input = &tmp[flip ? maxTmp : 0];
112 output = &tmp[flip ? 0 : maxTmp];
113 std::vector<double>::const_iterator coeff =
layer->coeffs.begin();
114 for (
unsigned int i = 0;
i <
layer->neurons;
i++) {
115 double sum = *coeff++;
116 for (
unsigned int j = 0;
j <
layer->inputs;
j++)
117 sum += input[
j] * *coeff++;
124 for (
const double *pos = &tmp[flip ? maxTmp : 0]; pos <
output; pos++)
128 std::vector<double> ProcMLP::deriv(ValueIterator iter,
unsigned int n)
const {
129 std::vector<double> prevValues, nextValues;
130 std::vector<double> prevMatrix, nextMatrix;
133 nextValues.push_back(*iter++);
135 unsigned int size = nextValues.size();
136 nextMatrix.resize(size * size);
137 for (
unsigned int i = 0;
i <
size;
i++)
138 nextMatrix[
i * size +
i] = 1.;
146 std::vector<double>::const_iterator coeff =
layer->coeffs.begin();
147 for (
unsigned int i = 0;
i <
layer->neurons;
i++) {
148 double sum = *coeff++;
149 for (
unsigned int j = 0;
j <
layer->inputs;
j++)
150 sum += prevValues[
j] * *coeff++;
153 if (
layer->sigmoid) {
155 sum = 1.0 / (e + 1.0);
156 deriv = 1.0 / (e + 1.0 / e + 2.0);
160 nextValues.push_back(sum);
162 for (
unsigned int k = 0;
k <
size;
k++) {
164 coeff -=
layer->inputs;
165 for (
unsigned int j = 0;
j <
layer->inputs;
j++)
166 sum += prevMatrix[
j * size +
k] * *coeff++;
167 nextMatrix.push_back(sum * deriv);
std::vector< LayerSetAndLayers > layers(const SeedingLayerSetsHits &sets)
Exp< T >::type exp(const T &t)
constexpr std::array< uint8_t, layerIndexSize > layer
static std::string const input
SeedingLayerSetsHits::SeedingLayer Layer
U second(std::pair< T, U > const &p)
void swap(edm::DataFrameContainer &lhs, edm::DataFrameContainer &rhs)
tuple size
Write out results.