00001
00002 """
00003 Usage: ./cmsBenchmark.py [options]
00004
00005 Options:
00006 --cpu=... specify the core on which to run the performance suite
00007 --cores=... specify the number of cores of the machine (can be used with 0 to stop cmsScimark from running on the other cores)
00008 -n ..., --numevts specify the number of events for each tests/each candle/each step
00009 --candle=... specify the candle to run instead of all the 7 candles of the suite
00010 --step=... specify the step to run instead of all steps of the suite
00011 --repeat=... specify the number of times to re-run the whole suite
00012 -h, --help show this help
00013 -d show debugging information
00014
00015 Legal entries for individual candles (--candle option):
00016 HiggsZZ4LM190
00017 MinBias
00018 SingleElectronE1000
00019 SingleMuMinusPt10
00020 SinglePiMinusE1000
00021 TTbar
00022 QCD_80_120
00023
00024 Legal entries for specific tests (--step option):
00025 GEN
00026 SIM
00027 DIGI
00028 L1
00029 DIGI2RAW
00030 HLT
00031 RAW2DIGI
00032 RECO
00033 and combinations of steps like:
00034 GEN-SIM
00035 L1-DIGI2RAW-HLT
00036 DIGI2RAW-RAW2DIGI
00037 and sequences of steps or combinations of steps like:
00038 GEN-SIM,DIGI,L1-DIGI2RAW-RAW2DIGI,RECO
00039 Note: when the necessary pre-steps are omitted, cmsPerfSuite.py will take care of it.
00040
00041 Examples:
00042 ./cmsBenchmark.py
00043 This will run with the default options --cpu=1, --cores=4, --numevts=100, --step=GEN-SIM,DIGI,RECO --repeat=1 (Note: all results will be reported in a directory called Run1).
00044 OR
00045 ./cmsBenchmark.py --cpu=2
00046 This will run the test on core cpu2.
00047 OR
00048 ./cmsBenchmark.py --cpu=0,1 --cores=8 -n 200
00049 This will run the suite with 200 events for all tests/candles/step, on cores cpu0 and cpu1 simulataneously, while running the cmsScimark benchmarks on the other 6 cores.
00050 OR
00051 ./cmsBenchmark.py --cores=8 --repeat=10 --candle QCD_80_120
00052 This will run the performance tests only on candle QCD_80_120, running 100 evts for all steps, and it will repeat these tests 10 times, saving the results in 10 separate directories (each called RunN, with N=1,..,10) to check for systematic/statistical uncertainties. Note that by default --repeat=1, so all results will be in a directory called Run1.
00053 OR
00054 ./cmsBenchmark.py --step=GEN-SIM,DIGI,RECO
00055 This will run the performance tests only for the steps "GEN,SIM" (at once), "DIGI" and "RECO" taking care of running the necessary intermediate steps to make sure all steps can be run.
00056
00057 """
00058 import os
00059
00060 cmssw_base=os.environ["CMSSW_BASE"]
00061 cmssw_release_base=os.environ["CMSSW_RELEASE_BASE"]
00062 cmssw_version=os.environ["CMSSW_VERSION"]
00063 host=os.environ["HOST"]
00064 user=os.environ["USER"]
00065
00066
00067 Script="cmsPerfSuite.py"
00068
00069
00070 import getopt
00071 import sys
00072
00073 def usage():
00074 print __doc__
00075
00076 def main(argv):
00077
00078
00079 coresOption="4"
00080 cores=" --cores=4"
00081
00082 cpuOption=(1)
00083 cpu=" --cpu=1"
00084
00085 numevtsOption="100"
00086 numevts=" --timesize=100"
00087
00088 igprofevts=" --igprof=0"
00089 valgrindevts=" --valgrind=0"
00090
00091 candleOption=""
00092 candle=""
00093
00094 stepOption="GEN-SIM,DIGI,RECO"
00095 step=" --step="+stepOption
00096
00097 repeatOption=1
00098
00099 try:
00100 opts, args = getopt.getopt(argv, "n:hd", ["cpu=","cores=","numevts=","candle=","step=","repeat=","help"])
00101 except getopt.GetoptError:
00102 print "This argument option is not accepted"
00103 usage()
00104 sys.exit(2)
00105 for opt, arg in opts:
00106 if opt in ("-h", "--help"):
00107 usage()
00108 sys.exit()
00109 elif opt == '-d':
00110 global _debug
00111 _debug = 1
00112 elif opt == "--cpu":
00113 cpuOption=arg
00114 cpus=cpuOption.split(",")
00115 cpu=" --cpu="+cpuOption
00116 elif opt == "--cores":
00117 coresOption = arg
00118 elif opt in ("-n", "--numevts"):
00119 numevtsOption = arg
00120 numevts=" --timesize="+arg
00121 elif opt == "--candle":
00122 candleOption = arg
00123 candle=" --candle="+arg
00124 elif opt == "--step":
00125 stepOption = arg
00126 steps=stepOption.split(",")
00127 elif opt == "--repeat":
00128 repeatOption = int(arg)
00129
00130 if opts == []:
00131 print "No arguments given, so DEFAULT test will be run:"
00132
00133 import time
00134 date=time.ctime()
00135 path=os.path.abspath(".")
00136 print "CMS Benchmarking started running at %s on %s in directory %s, run by user %s" % (date,host,path,user)
00137
00138
00139
00140 print "This machine (%s) is assumed to have %s cores, and the suite will be run on cpu(s) %s" %(host,coresOption,cpuOption)
00141 print "%s events per test will be run" % numevtsOption
00142 if candleOption !="":
00143 print "Running only %s candle, instead of all the candles in the performance suite" % candleOption
00144 if stepOption != "":
00145 print "Profiling only the following steps: %s" % stepOption
00146 step=" --step="+stepOption
00147
00148
00149
00150
00151
00152
00153 if repeatOption !=1:
00154 print "The benchmarking will be repeated %s times" % repeatOption
00155
00156 for repetition in range(repeatOption):
00157 mkdircdcmd="mkdir Run"+str(repetition+1)+";cd Run"+str(repetition+1)
00158
00159
00160
00161
00162 PerfSuitecmd="cmsPerfSuite.py" + cpu + cores + numevts + igprofevts + valgrindevts + candle + step + ">& cmsPerfSuiteRun" + str(repetition + 1) + ".log"
00163 launchcmd=mkdircdcmd+";"+PerfSuitecmd
00164 print launchcmd
00165 sys.stdout.flush()
00166
00167
00168 launchcmdstdout=Popen(launchcmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read()
00169 print launchcmdstdout
00170
00171 if __name__ == "__main__":
00172 main(sys.argv[1:])