Go to the documentation of this file. 1 #ifndef HeterogeneousCore_SonicTriton_TritonData
2 #define HeterogeneousCore_SonicTriton_TritonData
9 #include <unordered_map>
15 #include "grpc_client.h"
16 #include "grpc_service.pb.h"
22 template <
typename DT>
24 template <
typename DT>
28 template <
typename IO>
31 using Result = nvidia::inferenceserver::client::InferResult;
44 template <
typename DT>
46 template <
typename DT>
66 bool setShape(
unsigned loc, int64_t
val,
bool canThrow);
74 return std::any_of(vec.
begin(), vec.
end(), [](int64_t
i) {
return i < 0; });
77 return std::accumulate(vec.
begin(), vec.
end(), 1, std::multiplies<int64_t>());
105 template <
typename DT>
108 template <
typename DT>
nvidia::inferenceserver::client::InferResult Result
std::shared_ptr< Result > result_
inference::ModelMetadataResponse_TensorMetadata TensorMetadata
int64_t sizeShape() const
void toServer(std::shared_ptr< TritonInput< DT >> ptr)
std::unordered_map< std::string, TritonInputData > TritonInputMap
unsigned batchSize() const
std::vector< std::vector< DT > > TritonInput
std::vector< int64_t > ShapeType
bool variableDims() const
TritonOutput< DT > fromServer() const
bool any(const std::vector< T > &v, const T &what)
void createObject(IO **ioptr) const
inference::DataType dtype_
int64_t dimProduct(const ShapeView &vec) const
bool setShape(const ShapeType &newShape)
bool setShape(unsigned loc, int64_t val)
std::unordered_map< std::string, TritonOutputData > TritonOutputMap
std::shared_ptr< IO > data_
std::vector< edm::Span< const DT * > > TritonOutput
void setBatchSize(unsigned bsize)
const ShapeView & shape() const
void setResult(std::shared_ptr< Result > result)
const std::string & dname() const
TritonData(const std::string &name, const TensorMetadata &model_info, bool noBatch)
bool anyNeg(const ShapeView &vec) const