1 #ifndef RecoTracker_MkFitCore_src_Matriplex_MatriplexVector_h 2 #define RecoTracker_MkFitCore_src_Matriplex_MatriplexVector_h 34 for (
idx_t i = 0;
i < kTotSize; ++
i) {
52 template <
typename T,
idx_t D1,
idx_t D2,
idx_t D3,
idx_t N>
56 int n_to_process = 0) {
60 const int np = n_to_process ? n_to_process :
A.size();
62 for (
int i = 0;
i <
np; ++
i) {
67 template <
typename T,
idx_t D1,
idx_t D2,
idx_t D3,
idx_t N>
71 int n_to_process = 0) {
75 const int np = n_to_process ? n_to_process :
A.size();
77 for (
int i = 0;
i <
np; ++
i) {
82 template <
typename T,
idx_t D1,
idx_t D2,
idx_t D3,
idx_t N>
86 int n_to_process = 0) {
90 const int np = n_to_process ? n_to_process :
A.size();
92 for (
int i = 0;
i <
np; ++
i) {
99 template <
typename T,
idx_t D,
idx_t N>
103 int n_to_process = 0) {
107 const int np = n_to_process ? n_to_process :
A.size();
109 for (
int i = 0;
i <
np; ++
i) {
116 template <
typename T,
idx_t D,
idx_t N>
118 const int np = n_to_process ? n_to_process :
A.size();
120 for (
int i = 0;
i <
np; ++
i) {
125 template <
typename T,
idx_t D,
idx_t N>
127 const int np = n_to_process ? n_to_process :
A.size();
129 for (
int i = 0;
i <
np; ++
i) {
134 template <
typename T,
idx_t D,
idx_t N>
136 const int np = n_to_process ? n_to_process :
A.size();
138 for (
int i = 0;
i <
np; ++
i) {
143 template <
typename T,
idx_t D,
idx_t N>
145 const int np = n_to_process ? n_to_process :
A.size();
147 for (
int i = 0;
i <
np; ++
i) {
void multiply(const MPlex< T, D, D, N > &A, const MPlex< T, D, D, N > &B, MPlex< T, D, D, N > &C)
const MP & operator[](int i) const
void invertCholeskySym(MPlexSym< T, D, N > &A)
void multiply3in(MPlexVec< MPlex< T, D1, D2, N >> &A, MPlexVec< MPlex< T, D2, D3, N >> &B, MPlexVec< MPlex< T, D1, D3, N >> &C, int n_to_process=0)
void invertCramerSym(MPlexSym< T, D, N > &A, double *determ=nullptr)
Container::value_type value_type
void multiplyGeneral(const MPlex< T, D1, D2, N > &A, const MPlex< T, D2, D3, N > &B, MPlex< T, D1, D3, N > &C)
void invertCramer(MPlex< T, D, D, N > &A, double *determ=nullptr)
void copyIn(idx_t n, T *arr)
T & operator()(idx_t n, idx_t i, idx_t j)
void * aligned_alloc64(std::size_t size)
void invertCholesky(MPlex< T, D, D, N > &A)
const MP & mplex(int i) const
T & At(idx_t n, idx_t i, idx_t j)
void copyOut(idx_t n, T *arr)