CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
json_batchallocator.h
Go to the documentation of this file.
1 #ifndef JSONCPP_BATCHALLOCATOR_H_INCLUDED
2 # define JSONCPP_BATCHALLOCATOR_H_INCLUDED
3 
4 # include <stdlib.h>
5 # include <assert.h>
6 
7 # ifndef JSONCPP_DOC_EXCLUDE_IMPLEMENTATION
8 
9 namespace Json {
10 
11 /* Fast memory allocator.
12  *
13  * This memory allocator allocates memory for a batch of object (specified by
14  * the page size, the number of object in each page).
15  *
16  * It does not allow the destruction of a single object. All the allocated objects
17  * can be destroyed at once. The memory can be either released or reused for future
18  * allocation.
19  *
20  * The in-place new operator must be used to construct the object using the pointer
21  * returned by allocate.
22  */
23 template<typename AllocatedType
24  ,const unsigned int objectPerAllocation>
26 {
27 public:
28  typedef AllocatedType Type;
29 
30  BatchAllocator( unsigned int objectsPerPage = 255 )
31  : freeHead_( 0 )
32  , objectsPerPage_( objectsPerPage )
33  {
34 // printf( "Size: %d => %s\n", sizeof(AllocatedType), typeid(AllocatedType).name() );
35  assert( sizeof(AllocatedType) * objectPerAllocation >= sizeof(AllocatedType *) ); // We must be able to store a slist in the object free space.
36  assert( objectsPerPage >= 16 );
37  batches_ = allocateBatch( 0 ); // allocated a dummy page
39  }
40 
42  {
43  for ( BatchInfo *batch = batches_; batch; )
44  {
45  BatchInfo *nextBatch = batch->next_;
46  free( batch );
47  batch = nextBatch;
48  }
49  }
50 
53  AllocatedType *allocate()
54  {
55  if ( freeHead_ ) // returns node from free list.
56  {
57  AllocatedType *object = freeHead_;
58  freeHead_ = *(AllocatedType **)object;
59  return object;
60  }
62  {
66 
67  if ( !currentBatch_ ) // no free batch found, allocate a new one
68  {
70  currentBatch_->next_ = batches_; // insert at the head of the list
72  }
73  }
74  AllocatedType *allocated = currentBatch_->used_;
75  currentBatch_->used_ += objectPerAllocation;
76  return allocated;
77  }
78 
81  void release( AllocatedType *object )
82  {
83  assert( object != 0 );
84  *(AllocatedType **)object = freeHead_;
85  freeHead_ = object;
86  }
87 
88 private:
89  struct BatchInfo
90  {
92  AllocatedType *used_;
93  AllocatedType *end_;
94  AllocatedType buffer_[objectPerAllocation];
95  };
96 
97  // disabled copy constructor and assignement operator.
98  BatchAllocator( const BatchAllocator & );
99  void operator =( const BatchAllocator &);
100 
101  static BatchInfo *allocateBatch( unsigned int objectsPerPage )
102  {
103  const unsigned int mallocSize = sizeof(BatchInfo) - sizeof(AllocatedType)* objectPerAllocation
104  + sizeof(AllocatedType) * objectPerAllocation * objectsPerPage;
105  BatchInfo *batch = static_cast<BatchInfo*>( malloc( mallocSize ) );
106  batch->next_ = 0;
107  batch->used_ = batch->buffer_;
108  batch->end_ = batch->buffer_ + objectsPerPage;
109  return batch;
110  }
111 
115  AllocatedType *freeHead_;
116  unsigned int objectsPerPage_;
117 };
118 
119 
120 } // namespace Json
121 
122 # endif // ifndef JSONCPP_DOC_INCLUDE_IMPLEMENTATION
123 
124 #endif // JSONCPP_BATCHALLOCATOR_H_INCLUDED
125 
void operator=(const BatchAllocator &)
tuple batch
Use ROOT&#39;s batch mode, unless outputting to C macros, since there is a bug in pyROOT that fails to ex...
Definition: core.py:63
assert(m_qm.get())
AllocatedType * allocate()
BatchAllocator(unsigned int objectsPerPage=255)
static BatchInfo * allocateBatch(unsigned int objectsPerPage)
AllocatedType buffer_[objectPerAllocation]
void release(AllocatedType *object)
AllocatedType * freeHead_
Head of a single linked list within the allocated space of freeed object.