CMS 3D CMS Logo

 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
DBCopy.py
Go to the documentation of this file.
1 import coral
2 import CommonUtils, TagTree, tagInventory
3 
4 class DBCopy(object):
5 
6  def __init__( self, sourcesession, destsession, rowcachesize=1024 ):
7  self.__sourcesession=sourcesession
8  self.__destsession=destsession
9  self.__rowcachesize=rowcachesize
10 
11  def resetrowcachesize( self, newrowcachesize):
12  self.__rowcachesize=newrowcachesize
13 
14  def copyInventory( self ):
15  """copy entire inventory. The original inventory in the source db will be wiped.
16  """
17  inv=tagInventory.tagInventory(self.__destsession)
18  inv.createInventoryTable()
19  dest_transaction=self.__destsession.transaction()
20  source_transaction=self.__sourcesession.transaction()
21  try:
22  dest_transaction.start(False)
23  #copy inventory table
24  data=coral.AttributeList()
25  my_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.inventoryTableName()).dataEditor()
26  source_transaction.start(True)
27  source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.inventoryTableName()).newQuery()
28  conditionData=coral.AttributeList()
29  source_query.setCondition('',conditionData)
30  source_query.setRowCacheSize(self.__rowcachesize)
31  my_editor.rowBuffer(data)
32  source_query.defineOutput(data)
33  bulkOperation=my_editor.bulkInsert(data,self.__rowcachesize)
34  cursor=source_query.execute()
35  while (cursor.next() ):
36  bulkOperation.processNextIteration()
37  bulkOperation.flush()
38  del bulkOperation
39  del source_query
40 
41  #copy inventory id table
42  source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.inventoryIDTableName()).newQuery()
43  my_ideditor=self.__destsession.nominalSchema().tableHandle(CommonUtils.inventoryIDTableName()).dataEditor()
44  iddata=coral.AttributeList()
45  source_query.setCondition('',conditionData)
46  source_query.setRowCacheSize(self.__rowcachesize)
47  my_ideditor.rowBuffer(iddata)
48  source_query.defineOutput(iddata)
49  bulkOperation=my_ideditor.bulkInsert(iddata,self.__rowcachesize)
50  cursor=source_query.execute()
51  while cursor.next():
52  bulkOperation.processNextIteration()
53  bulkOperation.flush()
54  del bulkOperation
55  del source_query
56 
57  #copy comment table if exists
58  if self.__sourcesession.nominalSchema().existsTable(CommonUtils.commentTableName()):
59  source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.commentTableName()).newQuery()
60  my_commenteditor=self.__destsession.nominalSchema().tableHandle(CommonUtils.commentTableName()).dataEditor()
61  commentdata=coral.AttributeList()
62  qcondition=coral.AttributeList()
63  qcondition.extend('tablename','string')
64  qcondition['tablename'].setData(CommonUtils.commentTableName())
65  source_query.setCondition('tablename = :tablename',qcondition)
66  source_query.setRowCacheSize(self.__rowcachesize)
67  my_commenteditor.rowBuffer(commentdata)
68  source_query.defineOutput(commentdata)
69  bulkOperation=my_commenteditor.bulkInsert(commentdata,self.__rowcachesize)
70  cursor=source_query.execute()
71  while cursor.next():
72  bulkOperation.processNextIteration()
73  bulkOperation.flush()
74  del bulkOperation
75  del source_query
76 
77  source_transaction.commit()
78  dest_transaction.commit()
79  except Exception, e:
80  source_transaction.rollback()
81  dest_transaction.rollback()
82  raise Exception, str(e)
83 
84  def copyTrees( self, treenames ):
85  """copy tree from an external source.
86  Merge inventory if existing in the destination
87  """
88  allleafs=[]
89  for treename in treenames:
91  allleafs.append(t.getAllLeaves())
92  #create a unique tag list
93  merged={}
94  for s in allleafs:
95  for x in s:
96  merged[x.tagid]=1
98  sourcetags=sourceinv.getAllEntries()
99  entries=[]
100  for i in merged.keys():
101  for n in sourcetags:
102  if n.tagid==i:
103  entry={}
104  entry['tagid']=i
105  entry['tagname']=n.tagname
106  entry['pfn']=n.pfn
107  entry['recordname']=n.recordname
108  entry['objectname']=n.objectname
109  entry['labelname']=n.labelname
110  entries.append(entry)
111  inv=tagInventory.tagInventory(self.__destsession)
112  tagiddict=inv.bulkInsertEntries(entries)
113  dest_transaction=self.__destsession.transaction()
114  source_transaction=self.__sourcesession.transaction()
115  #copy table contents
116  try:
117  for treename in treenames:
118  desttree=TagTree.tagTree(self.__destsession,treename)
119  desttree.createTagTreeTable()
120  dest_transaction.start(False)
121  source_transaction.start(True)
122  #copy tree tables
123  data=coral.AttributeList()
124  dest_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.treeTableName(treename)).dataEditor()
125  source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.treeTableName(treename)).newQuery()
126  conditionData=coral.AttributeList()
127  source_query.setCondition('',conditionData)
128  source_query.setRowCacheSize(self.__rowcachesize)
129  dest_editor.rowBuffer(data)
130  source_query.defineOutput(data)
131  bulkOperation=dest_editor.bulkInsert(data,self.__rowcachesize)
132  cursor=source_query.execute()
133  while cursor.next():
134  bulkOperation.processNextIteration()
135  bulkOperation.flush()
136  del bulkOperation
137  del source_query
138  #copy id tables
139  iddata=coral.AttributeList()
140  dest_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.treeIDTableName(treename)).dataEditor()
141  source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.treeIDTableName(treename)).newQuery()
142  conditionData=coral.AttributeList()
143  source_query.setCondition('',conditionData)
144  source_query.setRowCacheSize(self.__rowcachesize)
145  dest_editor.rowBuffer(iddata)
146  source_query.defineOutput(iddata)
147  bulkOperation=dest_editor.bulkInsert(iddata,self.__rowcachesize)
148  cursor=source_query.execute()
149  while cursor.next():
150  bulkOperation.processNextIteration()
151  bulkOperation.flush()
152  del bulkOperation
153  del source_query
154  #copy comment tables if exist
155  if self.__sourcesession.nominalSchema().existsTable(CommonUtils.commentTableName()):
156  data=coral.AttributeList()
157  dest_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.commentTableName()).dataEditor()
158  source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.commentTableName()).newQuery()
159  conditionData=coral.AttributeList()
160  source_query.setCondition('tablename = :tablename',conditionData)
161  conditionData.extend('tablename','string')
162  conditionData['tablename'].setData(CommonUtils.treeTableName(treename))
163  source_query.setRowCacheSize(self.__rowcachesize)
164  dest_editor.rowBuffer(data)
165  source_query.defineOutput(data)
166  bulkOperation=dest_editor.bulkInsert(data,self.__rowcachesize)
167  cursor=source_query.execute()
168  while cursor.next():
169  bulkOperation.processNextIteration()
170  bulkOperation.flush()
171  del bulkOperation
172  del source_query
173 
174  source_transaction.commit()
175  dest_transaction.commit()
176  #fix leaf node links
177  desttree.replaceLeafLinks(tagiddict)
178  except Exception, e:
179  source_transaction.rollback()
180  dest_transaction.rollback()
181  raise Exception, str(e)
182 
183 
184  def copyDB( self ):
185  """copy all globaltag related tables from an external source.
186  The destination database must be empty. If not so, it will be cleaned implicitly. Inventory are implicitly copied as well.
187  """
188  dest_transaction=self.__destsession.transaction()
189  source_transaction=self.__sourcesession.transaction()
190  tablelist=[]
191  alltablelist=[]
192  trees=[]
193  try:
194  source_transaction.start(True)
195  tablelist=list(self.__sourcesession.nominalSchema().listTables())
196  source_transaction.commit()
197  except Exception, e:
198  source_transaction.rollback()
199  raise Exception, str(e)
200  try:
201  i = tablelist.index(CommonUtils.inventoryTableName())
202  alltablelist.append(CommonUtils.inventoryTableName())
203  except ValueError:
204  raise 'Error: '+CommonUtils.inventoryTableName()+' does not exist in the source'
205  try:
206  i = tablelist.index(CommonUtils.inventoryIDTableName())
207  alltablelist.append(CommonUtils.inventoryIDTableName())
208  except ValueError:
209  raise 'Error: '+CommonUtils.inventoryIDTableName()+' does not exist'
210 
211  try:
212  i = tablelist.index(CommonUtils.commentTableName())
213  alltablelist.append(CommonUtils.commentTableName())
214  except ValueError:
215  pass
216 
217  for tablename in tablelist:
218  posbeg=tablename.find('TAGTREE_TABLE_')
219  if posbeg != -1:
220  treename=tablename[posbeg+len('TAGTREE_TABLE_'):]
221  trees.append(treename)
222  for tree in trees:
223  try:
224  tablelist.index(CommonUtils.treeIDTableName(tree))
225  except ValueError:
226  print 'non-existing id table for tree ',tree
227  continue
228  alltablelist.append(CommonUtils.treeTableName(tree))
229  alltablelist.append(CommonUtils.treeIDTableName(tree))
230  #schema preparation
231  inv=tagInventory.tagInventory(self.__destsession)
232  inv.createInventoryTable()
233  for treename in trees:
234  t=TagTree.tagTree(self.__destsession,treename)
235  t.createTagTreeTable()
236  #copy table contents
237  try:
238  for mytable in alltablelist:
239  dest_transaction.start(False)
240  source_transaction.start(True)
241  data=coral.AttributeList()
242  my_editor=self.__destsession.nominalSchema().tableHandle(mytable).dataEditor()
243  source_query=self.__sourcesession.nominalSchema().tableHandle(mytable).newQuery()
244  conditionData=coral.AttributeList()
245  source_query.setCondition('',conditionData)
246  source_query.setRowCacheSize(self.__rowcachesize)
247  my_editor.rowBuffer(data)
248  source_query.defineOutput(data)
249  bulkOperation=my_editor.bulkInsert(data,self.__rowcachesize)
250  cursor=source_query.execute()
251  while cursor.next():
252  bulkOperation.processNextIteration()
253  bulkOperation.flush()
254  del bulkOperation
255  del source_query
256  source_transaction.commit()
257  dest_transaction.commit()
258  except Exception, e:
259  source_transaction.rollback()
260  dest_transaction.rollback()
261  raise Exception, str(e)
262 
263 if __name__ == "__main__":
264  #context = coral.Context()
265  #context.setVerbosityLevel( 'ERROR' )
266  svc = coral.ConnectionService()
267 
268  sourcesession = svc.connect( 'sqlite_file:source.db',
269  accessMode = coral.access_Update )
270  destsession = svc.connect( 'sqlite_file:dest.db',
271  accessMode = coral.access_Update )
272  try:
273  dbcp=DBCopy(sourcesession,destsession,1024)
274  print "TEST copyInventory"
275  dbcp.copyInventory()
276  print "TEST copytrees"
277  treenames=['CRUZET3_V2H']
278  dbcp.copyTrees(treenames)
279  del sourcesession
280  del destsession
281  except Exception, e:
282  print "Failed in unit test"
283  print str(e)
284  del sourcesession
285  del destsession
286 
287  sourcesession = svc.connect( 'sqlite_file:source.db',
288  accessMode = coral.access_Update )
289  destsession = svc.connect( 'sqlite_file:dest2.db',
290  accessMode = coral.access_Update )
291  try:
292  dbcp=DBCopy(sourcesession,destsession,1024)
293  print "TEST full dbCopy"
294  dbcp.copyDB()
295  del sourcesession
296  del destsession
297  except Exception, e:
298  print "Failed in unit test"
299  print str(e)
300  del sourcesession
301  del destsession
list object
Definition: dbtoconf.py:77
How EventSelector::AcceptEvent() decides whether to accept an event for output otherwise it is excluding the probing of A single or multiple positive and the trigger will pass if any such matching triggers are PASS or EXCEPTION[A criterion thatmatches no triggers at all is detected and causes a throw.] A single negative with an expectation of appropriate bit checking in the decision and the trigger will pass if any such matching triggers are FAIL or EXCEPTION A wildcarded negative criterion that matches more than one trigger in the trigger list("!*","!HLTx*"if it matches 2 triggers or more) will accept the event if all the matching triggers are FAIL.It will reject the event if any of the triggers are PASS or EXCEPTION(this matches the behavior of"!*"before the partial wildcard feature was incorporated).Triggers which are in the READY state are completely ignored.(READY should never be returned since the trigger paths have been run