CMS 3D CMS Logo

List of all members | Public Member Functions | Public Attributes
tensorflow::Options Struct Reference

#include <TensorFlow.h>

Public Member Functions

Backend getBackend () const
 
int getNThreads () const
 
SessionOptions & getSessionOptions ()
 
 Options (Backend backend)
 
 Options ()
 
void setBackend (Backend backend=Backend::cpu)
 
void setThreading (int nThreads=1)
 

Public Attributes

Backend _backend
 
int _nThreads
 
SessionOptions _options
 

Detailed Description

Definition at line 33 of file TensorFlow.h.

Constructor & Destructor Documentation

◆ Options() [1/2]

tensorflow::Options::Options ( Backend  backend)
inline

Definition at line 38 of file TensorFlow.h.

38  : _nThreads{1}, _backend{backend} {
41  };
void setBackend(Backend backend=Backend::cpu)
Definition: TensorFlow.cc:22
void setThreading(int nThreads=1)
Definition: TensorFlow.cc:15

◆ Options() [2/2]

tensorflow::Options::Options ( )
inline

Definition at line 43 of file TensorFlow.h.

46  };
void setBackend(Backend backend=Backend::cpu)
Definition: TensorFlow.cc:22
void setThreading(int nThreads=1)
Definition: TensorFlow.cc:15

Member Function Documentation

◆ getBackend()

Backend tensorflow::Options::getBackend ( ) const
inline

Definition at line 57 of file TensorFlow.h.

References _backend.

57 { return _backend; };

◆ getNThreads()

int tensorflow::Options::getNThreads ( ) const
inline

Definition at line 56 of file TensorFlow.h.

References _nThreads.

56 { return _nThreads; };

◆ getSessionOptions()

SessionOptions& tensorflow::Options::getSessionOptions ( )
inline

Definition at line 55 of file TensorFlow.h.

References _options.

55 { return _options; };
SessionOptions _options
Definition: TensorFlow.h:36

◆ setBackend()

void tensorflow::Options::setBackend ( Backend  backend = Backend::cpu)

Definition at line 22 of file TensorFlow.cc.

References _options, cms::Exception::addContext(), HLT_2024v12_cff::backend, tensorflow::best, tensorflow::cpu, tensorflow::cuda, tensorflow::intel, edm::ResourceInformation::nvidiaDriverVersion(), tensorflow::rocm, and edm::errors::UnavailableAccelerator.

22  {
23  /*
24  * The TensorFlow backend configures the available devices using options provided in the sessionOptions proto.
25  * // Options from https://github.com/tensorflow/tensorflow/blob/c53dab9fbc9de4ea8b1df59041a5ffd3987328c3/tensorflow/core/protobuf/config.proto
26  *
27  * If the device_count["GPU"] = 0 GPUs are not used.
28  * The visible_device_list configuration is used to map the `visible` devices (from CUDA_VISIBLE_DEVICES) to `virtual` devices.
29  * If Backend::cpu is request, the GPU device is disallowed by device_count configuration.
30  * If Backend::cuda is request:
31  * - if ResourceInformation shows an available Nvidia GPU device:
32  * the device is used with memory_growth configuration (not allocating all cuda memory at once).
33  * - if no device is present: an exception is raised.
34  */
35 
37  if (backend == Backend::cpu) {
38  // disable GPU usage
39  (*_options.config.mutable_device_count())["GPU"] = 0;
40  _options.config.mutable_gpu_options()->set_visible_device_list("");
41  }
42  // NVidia GPU
43  else if (backend == Backend::cuda) {
44  if (not ri->nvidiaDriverVersion().empty()) {
45  // Take only the first GPU in the CUDA_VISIBLE_DEVICE list
46  (*_options.config.mutable_device_count())["GPU"] = 1;
47  _options.config.mutable_gpu_options()->set_visible_device_list("0");
48  // Do not allocate all the memory on the GPU at the beginning.
49  _options.config.mutable_gpu_options()->set_allow_growth(true);
50  } else {
52  ex << "Cuda backend requested, but no NVIDIA GPU available in the job";
53  ex.addContext("Calling tensorflow::setBackend()");
54  throw ex;
55  }
56  }
57  // ROCm and Intel GPU are still not supported
58  else if ((backend == Backend::rocm) || (backend == Backend::intel)) {
60  ex << "ROCm/Intel GPU backend requested, but TF is not compiled yet for this platform";
61  ex.addContext("Calling tensorflow::setBackend()");
62  throw ex;
63  }
64  // Get NVidia GPU if possible or fallback to CPU
65  else if (backend == Backend::best) {
66  // Check if a Nvidia GPU is availabl
67  if (not ri->nvidiaDriverVersion().empty()) {
68  // Take only the first GPU in the CUDA_VISIBLE_DEVICE list
69  (*_options.config.mutable_device_count())["GPU"] = 1;
70  _options.config.mutable_gpu_options()->set_visible_device_list("0");
71  // Do not allocate all the memory on the GPU at the beginning.
72  _options.config.mutable_gpu_options()->set_allow_growth(true);
73  } else {
74  // Just CPU support
75  (*_options.config.mutable_device_count())["GPU"] = 0;
76  _options.config.mutable_gpu_options()->set_visible_device_list("");
77  }
78  }
79  }
virtual std::string const & nvidiaDriverVersion() const =0
SessionOptions _options
Definition: TensorFlow.h:36

◆ setThreading()

void tensorflow::Options::setThreading ( int  nThreads = 1)

Definition at line 15 of file TensorFlow.cc.

References _nThreads, and _options.

15  {
16  _nThreads = nThreads;
17  // set number of threads used for intra and inter operation communication
18  _options.config.set_intra_op_parallelism_threads(nThreads);
19  _options.config.set_inter_op_parallelism_threads(nThreads);
20  }
SessionOptions _options
Definition: TensorFlow.h:36

Member Data Documentation

◆ _backend

Backend tensorflow::Options::_backend

Definition at line 35 of file TensorFlow.h.

Referenced by getBackend().

◆ _nThreads

int tensorflow::Options::_nThreads

Definition at line 34 of file TensorFlow.h.

Referenced by getNThreads(), and setThreading().

◆ _options

SessionOptions tensorflow::Options::_options

Definition at line 36 of file TensorFlow.h.

Referenced by getSessionOptions(), setBackend(), and setThreading().