73 static bool compareMuons(
const std::shared_ptr<MicroGMTConfiguration::InterMuon>& mu1,
74 const std::shared_ptr<MicroGMTConfiguration::InterMuon>& mu2);
94 std::unique_ptr<MuonBxCollection>&
out,
142 : m_debugOut(
"test/debug/iso_debug.dat"),
174 produces<MuonBxCollection>();
175 produces<MuonBxCollection>(
"imdMuonsBMTF");
176 produces<MuonBxCollection>(
"imdMuonsEMTFPos");
177 produces<MuonBxCollection>(
"imdMuonsEMTFNeg");
178 produces<MuonBxCollection>(
"imdMuonsOMTFPos");
179 produces<MuonBxCollection>(
"imdMuonsOMTFNeg");
273 splitAndConvertMuons(emtfMuons, internMuonsEmtfPos, internMuonsEmtfNeg, emtfPosWedges, emtfNegWedges,
bx);
274 splitAndConvertMuons(omtfMuons, internMuonsOmtfPos, internMuonsOmtfNeg, omtfPosWedges, omtfNegWedges,
bx);
323 for (
const auto&
mu : internalMuons) {
324 if (
mu->hwPt() > 0) {
326 (
mu->hwPt() - 1) * 0.5,
mu->hwEta() * 0.010875,
mu->hwGlobalPhi() * 0.010908, 0.0};
327 int iso =
mu->hwAbsIso() + (
mu->hwRelIso() << 1);
328 int outMuQual = MicroGMTConfiguration::setOutputMuonQuality(
mu->hwQual(),
mu->trackFinderType(),
mu->hwHF());
346 outMu.setHwEtaAtVtx(MicroGMTConfiguration::calcMuonHwEtaExtra(outMu));
347 outMu.setHwPhiAtVtx(MicroGMTConfiguration::calcMuonHwPhiExtra(outMu));
348 outMu.setEtaAtVtx(MicroGMTConfiguration::calcMuonEtaExtra(outMu));
349 outMu.setPhiAtVtx(MicroGMTConfiguration::calcMuonPhiExtra(outMu));
352 int hwPtUnconstrained{
mu->hwPtUnconstrained()};
353 outMu.setPtUnconstrained(hwPtUnconstrained == 0 ? 0
354 : (hwPtUnconstrained - 1) * 0.5);
355 outMu.setHwPtUnconstrained(hwPtUnconstrained);
356 outMu.setHwDXY(
mu->hwDXY());
358 if (
mu->hwSignValid()) {
359 outMu.setCharge(1 - 2 *
mu->hwSign());
363 m_debugOut <<
mu->hwCaloPhi() <<
" " <<
mu->hwCaloEta() << std::endl;
364 outMuons->push_back(
bx, outMu);
378 const std::shared_ptr<MicroGMTConfiguration::InterMuon>& mu2) {
379 return (mu1->hwWins() >= mu2->hwWins());
383 MicroGMTConfiguration::InterMuonList::iterator mu1;
385 for (mu1 =
muons.begin(); mu1 !=
muons.end(); ++mu1) {
386 (*mu1)->setHwWins(0);
390 for (mu1 =
muons.begin(); mu1 !=
muons.end(); ++mu1) {
391 int mu1CancelBit = (*mu1)->hwCancelBit();
392 nCancelled += mu1CancelBit;
395 for (; mu2 !=
muons.end(); ++mu2) {
396 if (mu1CancelBit != 1 && (*mu2)->hwCancelBit() != 1) {
397 if ((*mu1)->hwRank() >= (*mu2)->hwRank()) {
398 (*mu1)->increaseWins();
400 (*mu2)->increaseWins();
402 }
else if (mu1CancelBit != 1) {
403 (*mu1)->increaseWins();
404 }
else if ((*mu2)->hwCancelBit() != 1) {
405 (*mu2)->increaseWins();
410 size_t nMuonsBefore =
muons.size();
411 int minWins = nMuonsBefore - nSurvivors;
415 muons.remove_if([&minWins](
auto muon) {
return ((
muon->hwWins() < minWins) || (
muon->hwCancelBit() == 1)); });
420 for (
auto& mu1 :
muons) {
422 mu1->setHwRank(rank);
428 std::unique_ptr<MuonBxCollection>&
out,
430 for (
auto&
mu : coll) {
431 interout.push_back(
mu);
433 int outMuQual = MicroGMTConfiguration::setOutputMuonQuality(
mu->hwQual(),
mu->trackFinderType(),
mu->hwHF());
451 int hwPtUnconstrained{
mu->hwPtUnconstrained()};
452 outMu.setPtUnconstrained(hwPtUnconstrained == 0 ? 0 : (hwPtUnconstrained - 1) * 0.5);
453 outMu.setHwPtUnconstrained(hwPtUnconstrained);
454 outMu.setHwDXY(
mu->hwDXY());
456 if (
mu->hwSignValid()) {
457 outMu.setCharge(1 - 2 *
mu->hwSign());
462 out->push_back(
bx, outMu);
473 for (
int i = 0;
i < 6; ++
i) {
474 wedges_pos[
i] = std::vector<std::shared_ptr<GMTInternalMuon>>();
475 wedges_pos[
i].reserve(3);
476 wedges_neg[
i] = std::vector<std::shared_ptr<GMTInternalMuon>>();
477 wedges_neg[
i].reserve(3);
479 if (bx < in->getFirstBX() ||
bx >
in->getLastBX())
483 for (
size_t i = 0;
i <
in->size(
bx); ++
i, ++muIdx) {
484 if (
in->at(
bx,
i).hwPt() > 0) {
489 if (currentLink !=
link) {
493 int gPhi = MicroGMTConfiguration::calcGlobalPhi(
494 in->at(
bx,
i).hwPhi(),
in->at(
bx,
i).trackFinderType(),
in->at(
bx,
i).processor());
495 int tfMuonIdx = 3 * (currentLink - 36) + muIdx;
496 std::shared_ptr<GMTInternalMuon>
out = std::make_shared<GMTInternalMuon>(
in->at(
bx,
i), gPhi, tfMuonIdx);
497 if (
in->at(
bx,
i).hwEta() > 0) {
498 out_pos.push_back(
out);
499 wedges_pos[
in->at(
bx,
i).processor()].push_back(
out);
501 out_neg.emplace_back(
out);
502 wedges_neg[
in->at(
bx,
i).processor()].push_back(
out);
506 for (
int i = 0;
i < 6; ++
i) {
507 if (wedges_pos[
i].
size() > 3)
508 edm::LogWarning(
"Input Mismatch") <<
" too many inputs per processor for emtf+ / omtf+. Wedge " <<
i <<
": Size "
509 << wedges_pos[
i].size() << std::endl;
510 if (wedges_neg[
i].
size() > 3)
511 edm::LogWarning(
"Input Mismatch") <<
" too many inputs per processor for emtf- / omtf-. Wedge " <<
i <<
": Size "
512 << wedges_neg[
i].size() << std::endl;
521 for (
int i = 0;
i < 12; ++
i) {
522 wedges[
i] = std::vector<std::shared_ptr<GMTInternalMuon>>();
523 wedges[
i].reserve(3);
525 if (bx < in->getFirstBX() ||
bx >
in->getLastBX())
529 for (
size_t i = 0;
i <
in->size(
bx); ++
i, ++muIdx) {
530 if (
in->at(
bx,
i).hwPt() > 0) {
535 if (currentLink !=
link) {
539 int gPhi = MicroGMTConfiguration::calcGlobalPhi(
540 in->at(
bx,
i).hwPhi(),
in->at(
bx,
i).trackFinderType(),
in->at(
bx,
i).processor());
541 int tfMuonIdx = 3 * (currentLink - 36) + muIdx;
542 std::shared_ptr<GMTInternalMuon> outMu = std::make_shared<GMTInternalMuon>(
in->at(
bx,
i), gPhi, tfMuonIdx);
543 out.emplace_back(outMu);
544 wedges[
in->at(
bx,
i).processor()].push_back(outMu);
547 for (
int i = 0;
i < 12; ++
i) {
548 if (wedges[
i].
size() > 3)
549 edm::LogWarning(
"Input Mismatch") <<
" too many inputs per processor for barrel. Wedge " <<
i <<
": Size "
550 << wedges[
i].size() << std::endl;
558 microGMTParamsRcd.
get(microGMTParamsHandle);
560 std::unique_ptr<L1TMuonGlobalParams_PUBLIC> microGMTParams(
562 if (microGMTParams->pnodes_.empty()) {
574 <<
"\n EMTF-|OMTF-| BMTF |OMTF+|EMTF+| CALO | res 0";
582 <<
"\n EMTF-|OMTF-| BMTF |OMTF+|EMTF+| CALO | res 0";