18 _options.config.set_intra_op_parallelism_threads(nThreads);
19 _options.config.set_inter_op_parallelism_threads(nThreads);
39 (*
_options.config.mutable_device_count())[
"GPU"] = 0;
40 _options.config.mutable_gpu_options()->set_visible_device_list(
"");
48 if ((*
_options.config.mutable_device_count())[
"GPU"] == 0) {
50 ex <<
"Cuda backend requested, NVIDIA GPU visible to cmssw, but not visible to TensorFlow in the job";
51 ex.
addContext(
"Calling tensorflow::setBackend()");
55 (*
_options.config.mutable_device_count())[
"GPU"] = 1;
56 _options.config.mutable_gpu_options()->set_visible_device_list(
"0");
58 _options.config.mutable_gpu_options()->set_allow_growth(
true);
61 ex <<
"Cuda backend requested, but no NVIDIA GPU available in the job";
62 ex.
addContext(
"Calling tensorflow::setBackend()");
69 ex <<
"ROCm/Intel GPU backend requested, but TF is not compiled yet for this platform";
70 ex.
addContext(
"Calling tensorflow::setBackend()");
78 (*
_options.config.mutable_device_count())[
"GPU"] = 1;
79 _options.config.mutable_gpu_options()->set_visible_device_list(
"0");
81 _options.config.mutable_gpu_options()->set_allow_growth(
true);
84 (*
_options.config.mutable_device_count())[
"GPU"] = 0;
85 _options.config.mutable_gpu_options()->set_visible_device_list(
"");
97 setenv(
"TF_CPP_MIN_LOG_LEVEL",
level.c_str(), 0);
108 RunOptions runOptions;
109 SavedModelBundle bundle;
112 status = LoadSavedModel(
options.getSessionOptions(), runOptions, exportDir, {
tag}, &bundle);
115 <<
"error while loading metaGraphDef from '" << exportDir <<
"': " <<
status.ToString();
119 return new MetaGraphDef(bundle.meta_graph_def);
124 <<
"tensorflow::loadMetaGraph() is deprecated, use tensorflow::loadMetaGraphDef() instead";
134 GraphDef* graphDef =
new GraphDef();
140 <<
"error while loading graphDef from '" << pbFile <<
"': " <<
status.ToString();
159 throw cms::Exception(
"InvalidSession") <<
"error while creating session: " <<
status.ToString();
167 if (metaGraphDef ==
nullptr) {
168 throw cms::Exception(
"InvalidMetaGraphDef") <<
"error while creating session: metaGraphDef is nullptr";
172 if (metaGraphDef->graph_def().node_size() <= 0) {
173 throw cms::Exception(
"InvalidMetaGraphDef") <<
"error while creating session: graphDef has no nodes";
180 status = session->Create(metaGraphDef->graph_def());
183 <<
"error while attaching metaGraphDef to session: " <<
status.ToString();
188 std::string varFileTensorName = metaGraphDef->saver_def().filename_tensor_name();
189 std::string restoreOpName = metaGraphDef->saver_def().restore_op_name();
190 std::string varDir = io::JoinPath(exportDir, kSavedModelVariablesDirectory);
191 std::string indexFile = io::JoinPath(varDir, MetaFilename(kSavedModelVariablesFilename));
192 std::string varFile = io::JoinPath(varDir, kSavedModelVariablesFilename);
200 Tensor varFileTensor(DT_STRING, TensorShape({}));
201 varFileTensor.scalar<tensorflow::tstring>()() = varFile;
204 status = session->Run({{varFileTensorName, varFileTensor}}, {}, {restoreOpName},
nullptr);
206 throw cms::Exception(
"InvalidSession") <<
"error while restoring variables in session: " <<
status.ToString();
219 if (graphDef ==
nullptr) {
220 throw cms::Exception(
"InvalidGraphDef") <<
"error while creating session: graphDef is nullptr";
224 if (graphDef->node_size() <= 0) {
225 throw cms::Exception(
"InvalidGraphDef") <<
"error while creating session: graphDef has no nodes";
233 status = session->Create(*graphDef);
237 throw cms::Exception(
"InvalidSession") <<
"error while attaching graphDef to session: " <<
status.ToString();
244 if (session ==
nullptr) {
259 auto s =
const_cast<Session*
>(session);
270 bool isEmpty =
false;
273 if (
input.second.shape().num_elements() == 0) {
285 const thread::ThreadPoolOptions& threadPoolOptions) {
286 if (session ==
nullptr) {
287 throw cms::Exception(
"InvalidSession") <<
"cannot run empty session";
291 RunOptions runOptions;
308 thread::ThreadPoolInterface* threadPool) {
310 thread::ThreadPoolOptions threadPoolOptions;
311 threadPoolOptions.inter_op_threadpool = threadPool;
312 threadPoolOptions.intra_op_threadpool = threadPool;
324 if (threadPoolName ==
"no_threads") {
326 }
else if (threadPoolName ==
"tbb") {
329 }
else if (threadPoolName ==
"tensorflow") {
333 <<
"thread pool implementation'" << threadPoolName <<
"' unknown, use 'no_threads', 'tbb', or 'tensorflow'";
353 if (
graph.load() !=
nullptr) {
355 graph.store(
nullptr);
std::vector< NamedTensor > NamedTensorList
void setBackend(Backend backend=Backend::cpu)
GraphDef * loadGraphDef(const std::string &pbFile)
MetaGraphDef * loadMetaGraphDef(const std::string &exportDir, const std::string &tag=kSavedModelTagServe)
static std::string const input
std::atomic< Session * > session
void run(Session *session, const NamedTensorList &inputs, const std::vector< std::string > &outputNames, std::vector< Tensor > *outputs, const thread::ThreadPoolOptions &threadPoolOptions)
bool closeSession(Session *&session)
bool checkEmptyInputs(const NamedTensorList &inputs)
static TBBThreadPool & instance(int nThreads=-1)
MetaGraphDef * loadMetaGraph(const std::string &exportDir, const std::string &tag, Options &Options)
Session * createSession()
Log< level::Info, false > LogInfo
void setLogging(const std::string &level="3")
std::atomic< GraphDef * > graph
void addContext(std::string const &context)
void setThreading(int nThreads=1)
static NoThreadPool & instance()