List of all members.
Detailed Description
Definition at line 4 of file DBCopy.py.
Constructor & Destructor Documentation
def python::DBCopy::DBCopy::__init__ |
( |
|
self, |
|
|
|
sourcesession, |
|
|
|
destsession, |
|
|
|
rowcachesize = 1024 |
|
) |
| |
Definition at line 6 of file DBCopy.py.
00007 :
00008 self.__sourcesession=sourcesession
00009 self.__destsession=destsession
00010 self.__rowcachesize=rowcachesize
Member Function Documentation
def python::DBCopy::DBCopy::copyDB |
( |
|
self | ) |
|
copy all globaltag related tables from an external source.
The destination database must be empty. If not so, it will be cleaned implicitly. Inventory are implicitly copied as well.
Definition at line 184 of file DBCopy.py.
00185 :
00186 """copy all globaltag related tables from an external source.
00187 The destination database must be empty. If not so, it will be cleaned implicitly. Inventory are implicitly copied as well.
00188 """
00189 dest_transaction=self.__destsession.transaction()
00190 source_transaction=self.__sourcesession.transaction()
00191 tablelist=[]
00192 alltablelist=[]
00193 trees=[]
00194 try:
00195 source_transaction.start(True)
00196 tablelist=list(self.__sourcesession.nominalSchema().listTables())
00197 source_transaction.commit()
00198 except Exception, e:
00199 source_transaction.rollback()
00200 raise Exception, str(e)
00201 try:
00202 i = tablelist.index(CommonUtils.inventoryTableName())
00203 alltablelist.append(CommonUtils.inventoryTableName())
00204 except ValueError:
00205 raise 'Error: '+CommonUtils.inventoryTableName()+' does not exist in the source'
00206 try:
00207 i = tablelist.index(CommonUtils.inventoryIDTableName())
00208 alltablelist.append(CommonUtils.inventoryIDTableName())
00209 except ValueError:
00210 raise 'Error: '+CommonUtils.inventoryIDTableName()+' does not exist'
00211
00212 try:
00213 i = tablelist.index(CommonUtils.commentTableName())
00214 alltablelist.append(CommonUtils.commentTableName())
00215 except ValueError:
00216 pass
00217
00218 for tablename in tablelist:
00219 posbeg=tablename.find('TAGTREE_TABLE_')
00220 if posbeg != -1:
00221 treename=tablename[posbeg+len('TAGTREE_TABLE_'):]
00222 trees.append(treename)
00223 for tree in trees:
00224 try:
00225 tablelist.index(CommonUtils.treeIDTableName(tree))
00226 except ValueError:
00227 print 'non-existing id table for tree ',tree
00228 continue
00229 alltablelist.append(CommonUtils.treeTableName(tree))
00230 alltablelist.append(CommonUtils.treeIDTableName(tree))
00231
00232 inv=tagInventory.tagInventory(self.__destsession)
00233 inv.createInventoryTable()
00234 for treename in trees:
00235 t=TagTree.tagTree(self.__destsession,treename)
00236 t.createTagTreeTable()
00237
00238 try:
00239 for mytable in alltablelist:
00240 dest_transaction.start(False)
00241 source_transaction.start(True)
00242 data=coral.AttributeList()
00243 my_editor=self.__destsession.nominalSchema().tableHandle(mytable).dataEditor()
00244 source_query=self.__sourcesession.nominalSchema().tableHandle(mytable).newQuery()
00245 conditionData=coral.AttributeList()
00246 source_query.setCondition('',conditionData)
00247 source_query.setRowCacheSize(self.__rowcachesize)
00248 my_editor.rowBuffer(data)
00249 source_query.defineOutput(data)
00250 bulkOperation=my_editor.bulkInsert(data,self.__rowcachesize)
00251 cursor=source_query.execute()
00252 while cursor.next():
00253 bulkOperation.processNextIteration()
00254 bulkOperation.flush()
00255 del bulkOperation
00256 del source_query
00257 source_transaction.commit()
00258 dest_transaction.commit()
00259 except Exception, e:
00260 source_transaction.rollback()
00261 dest_transaction.rollback()
00262 raise Exception, str(e)
def python::DBCopy::DBCopy::copyInventory |
( |
|
self | ) |
|
copy entire inventory. The original inventory in the source db will be wiped.
Definition at line 14 of file DBCopy.py.
00015 :
00016 """copy entire inventory. The original inventory in the source db will be wiped.
00017 """
00018 inv=tagInventory.tagInventory(self.__destsession)
00019 inv.createInventoryTable()
00020 dest_transaction=self.__destsession.transaction()
00021 source_transaction=self.__sourcesession.transaction()
00022 try:
00023 dest_transaction.start(False)
00024
00025 data=coral.AttributeList()
00026 my_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.inventoryTableName()).dataEditor()
00027 source_transaction.start(True)
00028 source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.inventoryTableName()).newQuery()
00029 conditionData=coral.AttributeList()
00030 source_query.setCondition('',conditionData)
00031 source_query.setRowCacheSize(self.__rowcachesize)
00032 my_editor.rowBuffer(data)
00033 source_query.defineOutput(data)
00034 bulkOperation=my_editor.bulkInsert(data,self.__rowcachesize)
00035 cursor=source_query.execute()
00036 while (cursor.next() ):
00037 bulkOperation.processNextIteration()
00038 bulkOperation.flush()
00039 del bulkOperation
00040 del source_query
00041
00042
00043 source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.inventoryIDTableName()).newQuery()
00044 my_ideditor=self.__destsession.nominalSchema().tableHandle(CommonUtils.inventoryIDTableName()).dataEditor()
00045 iddata=coral.AttributeList()
00046 source_query.setCondition('',conditionData)
00047 source_query.setRowCacheSize(self.__rowcachesize)
00048 my_ideditor.rowBuffer(iddata)
00049 source_query.defineOutput(iddata)
00050 bulkOperation=my_ideditor.bulkInsert(iddata,self.__rowcachesize)
00051 cursor=source_query.execute()
00052 while cursor.next():
00053 bulkOperation.processNextIteration()
00054 bulkOperation.flush()
00055 del bulkOperation
00056 del source_query
00057
00058
00059 if self.__sourcesession.nominalSchema().existsTable(CommonUtils.commentTableName()):
00060 source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.commentTableName()).newQuery()
00061 my_commenteditor=self.__destsession.nominalSchema().tableHandle(CommonUtils.commentTableName()).dataEditor()
00062 commentdata=coral.AttributeList()
00063 qcondition=coral.AttributeList()
00064 qcondition.extend('tablename','string')
00065 qcondition['tablename'].setData(CommonUtils.commentTableName())
00066 source_query.setCondition('tablename = :tablename',qcondition)
00067 source_query.setRowCacheSize(self.__rowcachesize)
00068 my_commenteditor.rowBuffer(commentdata)
00069 source_query.defineOutput(commentdata)
00070 bulkOperation=my_commenteditor.bulkInsert(commentdata,self.__rowcachesize)
00071 cursor=source_query.execute()
00072 while cursor.next():
00073 bulkOperation.processNextIteration()
00074 bulkOperation.flush()
00075 del bulkOperation
00076 del source_query
00077
00078 source_transaction.commit()
00079 dest_transaction.commit()
00080 except Exception, e:
00081 source_transaction.rollback()
00082 dest_transaction.rollback()
00083 raise Exception, str(e)
def python::DBCopy::DBCopy::copyTrees |
( |
|
self, |
|
|
|
treenames |
|
) |
| |
copy tree from an external source.
Merge inventory if existing in the destination
Definition at line 84 of file DBCopy.py.
00085 :
00086 """copy tree from an external source.
00087 Merge inventory if existing in the destination
00088 """
00089 allleafs=[]
00090 for treename in treenames:
00091 t=TagTree.tagTree(self.__sourcesession,treename)
00092 allleafs.append(t.getAllLeaves())
00093
00094 merged={}
00095 for s in allleafs:
00096 for x in s:
00097 merged[x.tagid]=1
00098 sourceinv=tagInventory.tagInventory(self.__sourcesession)
00099 sourcetags=sourceinv.getAllEntries()
00100 entries=[]
00101 for i in merged.keys():
00102 for n in sourcetags:
00103 if n.tagid==i:
00104 entry={}
00105 entry['tagid']=i
00106 entry['tagname']=n.tagname
00107 entry['pfn']=n.pfn
00108 entry['recordname']=n.recordname
00109 entry['objectname']=n.objectname
00110 entry['labelname']=n.labelname
00111 entries.append(entry)
00112 inv=tagInventory.tagInventory(self.__destsession)
00113 tagiddict=inv.bulkInsertEntries(entries)
00114 dest_transaction=self.__destsession.transaction()
00115 source_transaction=self.__sourcesession.transaction()
00116
00117 try:
00118 for treename in treenames:
00119 desttree=TagTree.tagTree(self.__destsession,treename)
00120 desttree.createTagTreeTable()
00121 dest_transaction.start(False)
00122 source_transaction.start(True)
00123
00124 data=coral.AttributeList()
00125 dest_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.treeTableName(treename)).dataEditor()
00126 source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.treeTableName(treename)).newQuery()
00127 conditionData=coral.AttributeList()
00128 source_query.setCondition('',conditionData)
00129 source_query.setRowCacheSize(self.__rowcachesize)
00130 dest_editor.rowBuffer(data)
00131 source_query.defineOutput(data)
00132 bulkOperation=dest_editor.bulkInsert(data,self.__rowcachesize)
00133 cursor=source_query.execute()
00134 while cursor.next():
00135 bulkOperation.processNextIteration()
00136 bulkOperation.flush()
00137 del bulkOperation
00138 del source_query
00139
00140 iddata=coral.AttributeList()
00141 dest_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.treeIDTableName(treename)).dataEditor()
00142 source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.treeIDTableName(treename)).newQuery()
00143 conditionData=coral.AttributeList()
00144 source_query.setCondition('',conditionData)
00145 source_query.setRowCacheSize(self.__rowcachesize)
00146 dest_editor.rowBuffer(iddata)
00147 source_query.defineOutput(iddata)
00148 bulkOperation=dest_editor.bulkInsert(iddata,self.__rowcachesize)
00149 cursor=source_query.execute()
00150 while cursor.next():
00151 bulkOperation.processNextIteration()
00152 bulkOperation.flush()
00153 del bulkOperation
00154 del source_query
00155
00156 if self.__sourcesession.nominalSchema().existsTable(CommonUtils.commentTableName()):
00157 data=coral.AttributeList()
00158 dest_editor=self.__destsession.nominalSchema().tableHandle(CommonUtils.commentTableName()).dataEditor()
00159 source_query=self.__sourcesession.nominalSchema().tableHandle(CommonUtils.commentTableName()).newQuery()
00160 conditionData=coral.AttributeList()
00161 source_query.setCondition('tablename = :tablename',conditionData)
00162 conditionData.extend('tablename','string')
00163 conditionData['tablename'].setData(CommonUtils.treeTableName(treename))
00164 source_query.setRowCacheSize(self.__rowcachesize)
00165 dest_editor.rowBuffer(data)
00166 source_query.defineOutput(data)
00167 bulkOperation=dest_editor.bulkInsert(data,self.__rowcachesize)
00168 cursor=source_query.execute()
00169 while cursor.next():
00170 bulkOperation.processNextIteration()
00171 bulkOperation.flush()
00172 del bulkOperation
00173 del source_query
00174
00175 source_transaction.commit()
00176 dest_transaction.commit()
00177
00178 desttree.replaceLeafLinks(tagiddict)
00179 except Exception, e:
00180 source_transaction.rollback()
00181 dest_transaction.rollback()
00182 raise Exception, str(e)
00183
def python::DBCopy::DBCopy::resetrowcachesize |
( |
|
self, |
|
|
|
newrowcachesize |
|
) |
| |
Definition at line 11 of file DBCopy.py.
00012 :
00013 self.__rowcachesize=newrowcachesize
Member Data Documentation