18 _options.config.set_intra_op_parallelism_threads(nThreads);
19 _options.config.set_inter_op_parallelism_threads(nThreads);
39 (*
_options.config.mutable_device_count())[
"GPU"] = 0;
40 _options.config.mutable_gpu_options()->set_visible_device_list(
"");
48 if ((*
_options.config.mutable_device_count())[
"GPU"] == 0) {
50 ex <<
"Cuda backend requested, NVIDIA GPU visible to cmssw, but not visible to TensorFlow in the job";
51 ex.
addContext(
"Calling tensorflow::setBackend()");
55 (*
_options.config.mutable_device_count())[
"GPU"] = 1;
56 _options.config.mutable_gpu_options()->set_visible_device_list(
"0");
58 _options.config.mutable_gpu_options()->set_allow_growth(
true);
61 ex <<
"Cuda backend requested, but no NVIDIA GPU available in the job";
62 ex.
addContext(
"Calling tensorflow::setBackend()");
69 ex <<
"ROCm/Intel GPU backend requested, but TF is not compiled yet for this platform";
70 ex.
addContext(
"Calling tensorflow::setBackend()");
78 (*
_options.config.mutable_device_count())[
"GPU"] = 1;
79 _options.config.mutable_gpu_options()->set_visible_device_list(
"0");
81 _options.config.mutable_gpu_options()->set_allow_growth(
true);
84 (*
_options.config.mutable_device_count())[
"GPU"] = 0;
85 _options.config.mutable_gpu_options()->set_visible_device_list(
"");
98 RunOptions runOptions;
99 SavedModelBundle bundle;
102 status = LoadSavedModel(
options.getSessionOptions(), runOptions, exportDir, {
tag}, &bundle);
105 <<
"error while loading metaGraphDef from '" << exportDir <<
"': " <<
status.ToString();
109 return new MetaGraphDef(bundle.meta_graph_def);
114 <<
"tensorflow::loadMetaGraph() is deprecated, use tensorflow::loadMetaGraphDef() instead";
124 GraphDef* graphDef =
new GraphDef();
130 <<
"error while loading graphDef from '" << pbFile <<
"': " <<
status.ToString();
149 throw cms::Exception(
"InvalidSession") <<
"error while creating session: " <<
status.ToString();
157 if (metaGraphDef ==
nullptr) {
158 throw cms::Exception(
"InvalidMetaGraphDef") <<
"error while creating session: metaGraphDef is nullptr";
162 if (metaGraphDef->graph_def().node_size() <= 0) {
163 throw cms::Exception(
"InvalidMetaGraphDef") <<
"error while creating session: graphDef has no nodes";
170 status = session->Create(metaGraphDef->graph_def());
173 <<
"error while attaching metaGraphDef to session: " <<
status.ToString();
178 std::string varFileTensorName = metaGraphDef->saver_def().filename_tensor_name();
179 std::string restoreOpName = metaGraphDef->saver_def().restore_op_name();
180 std::string varDir = io::JoinPath(exportDir, kSavedModelVariablesDirectory);
181 std::string indexFile = io::JoinPath(varDir, MetaFilename(kSavedModelVariablesFilename));
182 std::string varFile = io::JoinPath(varDir, kSavedModelVariablesFilename);
190 Tensor varFileTensor(DT_STRING, TensorShape({}));
191 varFileTensor.scalar<tensorflow::tstring>()() = varFile;
194 status = session->Run({{varFileTensorName, varFileTensor}}, {}, {restoreOpName},
nullptr);
196 throw cms::Exception(
"InvalidSession") <<
"error while restoring variables in session: " <<
status.ToString();
209 if (graphDef ==
nullptr) {
210 throw cms::Exception(
"InvalidGraphDef") <<
"error while creating session: graphDef is nullptr";
214 if (graphDef->node_size() <= 0) {
215 throw cms::Exception(
"InvalidGraphDef") <<
"error while creating session: graphDef has no nodes";
223 status = session->Create(*graphDef);
227 throw cms::Exception(
"InvalidSession") <<
"error while attaching graphDef to session: " <<
status.ToString();
234 if (session ==
nullptr) {
249 auto s =
const_cast<Session*
>(session);
260 bool isEmpty =
false;
263 if (
input.second.shape().num_elements() == 0) {
275 const thread::ThreadPoolOptions& threadPoolOptions) {
276 if (session ==
nullptr) {
277 throw cms::Exception(
"InvalidSession") <<
"cannot run empty session";
281 RunOptions runOptions;
298 thread::ThreadPoolInterface* threadPool) {
300 thread::ThreadPoolOptions threadPoolOptions;
301 threadPoolOptions.inter_op_threadpool = threadPool;
302 threadPoolOptions.intra_op_threadpool = threadPool;
314 if (threadPoolName ==
"no_threads") {
316 }
else if (threadPoolName ==
"tbb") {
319 }
else if (threadPoolName ==
"tensorflow") {
323 <<
"thread pool implementation'" << threadPoolName <<
"' unknown, use 'no_threads', 'tbb', or 'tensorflow'";
343 if (
graph.load() !=
nullptr) {
345 graph.store(
nullptr);
std::vector< NamedTensor > NamedTensorList
void setBackend(Backend backend=Backend::cpu)
GraphDef * loadGraphDef(const std::string &pbFile)
MetaGraphDef * loadMetaGraphDef(const std::string &exportDir, const std::string &tag=kSavedModelTagServe)
static std::string const input
std::atomic< Session * > session
void run(Session *session, const NamedTensorList &inputs, const std::vector< std::string > &outputNames, std::vector< Tensor > *outputs, const thread::ThreadPoolOptions &threadPoolOptions)
bool closeSession(Session *&session)
bool checkEmptyInputs(const NamedTensorList &inputs)
static TBBThreadPool & instance(int nThreads=-1)
MetaGraphDef * loadMetaGraph(const std::string &exportDir, const std::string &tag, Options &Options)
Session * createSession()
Log< level::Info, false > LogInfo
std::atomic< GraphDef * > graph
void addContext(std::string const &context)
void setThreading(int nThreads=1)
static NoThreadPool & instance()