Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  import atexit 
  21  import glob 
  22  import logging 
  23  import math 
  24  import optparse 
  25  import os 
  26  import pydoc 
  27  import random 
  28  import re 
  29  import shutil 
  30  import subprocess 
  31  import sys 
  32  import traceback 
  33  import time 
  34  import signal 
  35  import tarfile 
  36  import copy 
  37  import datetime 
  38  import tarfile 
  39  import traceback 
  40  import StringIO 
  41  try: 
  42      import cpickle as pickle 
  43  except: 
  44      import pickle 
  45   
  46  try: 
  47      import readline 
  48      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  49  except: 
  50      GNU_SPLITTING = True 
  51   
  52  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  53  root_path = os.path.split(root_path)[0] 
  54  sys.path.insert(0, os.path.join(root_path,'bin')) 
  55   
  56  # usefull shortcut 
  57  pjoin = os.path.join 
  58  # Special logger for the Cmd Interface 
  59  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  60  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  61    
  62  try: 
  63      import madgraph 
  64  except ImportError:  
  65      aMCatNLO = True  
  66      import internal.extended_cmd as cmd 
  67      import internal.common_run_interface as common_run 
  68      import internal.banner as banner_mod 
  69      import internal.misc as misc     
  70      from internal import InvalidCmd, MadGraph5Error 
  71      import internal.files as files 
  72      import internal.cluster as cluster 
  73      import internal.save_load_object as save_load_object 
  74      import internal.gen_crossxhtml as gen_crossxhtml 
  75      import internal.sum_html as sum_html 
  76      import internal.shower_card as shower_card 
  77      import internal.FO_analyse_card as analyse_card  
  78      import internal.lhe_parser as lhe_parser 
  79  else: 
  80      # import from madgraph directory 
  81      aMCatNLO = False 
  82      import madgraph.interface.extended_cmd as cmd 
  83      import madgraph.interface.common_run_interface as common_run 
  84      import madgraph.iolibs.files as files 
  85      import madgraph.iolibs.save_load_object as save_load_object 
  86      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  87      import madgraph.madevent.sum_html as sum_html 
  88      import madgraph.various.banner as banner_mod 
  89      import madgraph.various.cluster as cluster 
  90      import madgraph.various.misc as misc 
  91      import madgraph.various.shower_card as shower_card 
  92      import madgraph.various.FO_analyse_card as analyse_card 
  93      import madgraph.various.lhe_parser as lhe_parser 
  94      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error,MG5DIR 
95 96 -class aMCatNLOError(Exception):
97 pass
98
99 100 -def compile_dir(*arguments):
101 """compile the direcory p_dir 102 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 103 this function needs not to be a class method in order to do 104 the compilation on multicore""" 105 106 if len(arguments) == 1: 107 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 108 elif len(arguments)==7: 109 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 110 else: 111 raise aMCatNLOError, 'not correct number of argument' 112 logger.info(' Compiling %s...' % p_dir) 113 114 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 115 116 try: 117 #compile everything 118 # compile and run tests 119 for test in tests: 120 # skip check_poles for LOonly dirs 121 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 122 continue 123 if test == 'test_ME' or test == 'test_MC': 124 test_exe='test_soft_col_limits' 125 else: 126 test_exe=test 127 misc.compile([test_exe], cwd = this_dir, job_specs = False) 128 input = pjoin(me_dir, '%s_input.txt' % test) 129 #this can be improved/better written to handle the output 130 misc.call(['./%s' % (test_exe)], cwd=this_dir, 131 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w'), 132 close_fds=True) 133 if test == 'check_poles' and os.path.exists(pjoin(this_dir,'MadLoop5_resources')) : 134 tf=tarfile.open(pjoin(this_dir,'MadLoop5_resources.tar.gz'),'w:gz', 135 dereference=True) 136 tf.add(pjoin(this_dir,'MadLoop5_resources'),arcname='MadLoop5_resources') 137 tf.close() 138 139 if not options['reweightonly']: 140 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 141 misc.call(['./gensym'],cwd= this_dir, 142 stdout=open(pjoin(this_dir, 'gensym.log'), 'w'), 143 close_fds=True) 144 #compile madevent_mintMC/mintFO 145 misc.compile([exe], cwd=this_dir, job_specs = False) 146 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 147 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 148 149 logger.info(' %s done.' % p_dir) 150 return 0 151 except MadGraph5Error, msg: 152 return msg
153
154 155 -def check_compiler(options, block=False):
156 """check that the current fortran compiler is gfortran 4.6 or later. 157 If block, stops the execution, otherwise just print a warning""" 158 159 msg = 'In order to be able to run at NLO MadGraph5_aMC@NLO, you need to have ' + \ 160 'gfortran 4.6 or later installed.\n%s has been detected\n'+\ 161 'Note that You can still run all MadEvent run without any problem!' 162 #first check that gfortran is installed 163 if options['fortran_compiler']: 164 compiler = options['fortran_compiler'] 165 elif misc.which('gfortran'): 166 compiler = 'gfortran' 167 else: 168 compiler = '' 169 170 if 'gfortran' not in compiler: 171 if block: 172 raise aMCatNLOError(msg % compiler) 173 else: 174 logger.warning(msg % compiler) 175 else: 176 curr_version = misc.get_gfortran_version(compiler) 177 if not ''.join(curr_version.split('.')) >= '46': 178 if block: 179 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 180 else: 181 logger.warning(msg % (compiler + ' ' + curr_version))
182
183 184 185 #=============================================================================== 186 # CmdExtended 187 #=============================================================================== 188 -class CmdExtended(common_run.CommonRunCmd):
189 """Particularisation of the cmd command for aMCatNLO""" 190 191 #suggested list of command 192 next_possibility = { 193 'start': [], 194 } 195 196 debug_output = 'ME5_debug' 197 error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' 198 error_debug += 'More information is found in \'%(debug)s\'.\n' 199 error_debug += 'Please attach this file to your report.' 200 201 config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' 202 203 204 keyboard_stop_msg = """stopping all operation 205 in order to quit MadGraph5_aMC@NLO please enter exit""" 206 207 # Define the Error 208 InvalidCmd = InvalidCmd 209 ConfigurationError = aMCatNLOError 210
211 - def __init__(self, me_dir, options, *arg, **opt):
212 """Init history and line continuation""" 213 214 # Tag allowing/forbiding question 215 self.force = False 216 217 # If possible, build an info line with current version number 218 # and date, from the VERSION text file 219 info = misc.get_pkg_info() 220 info_line = "" 221 if info and info.has_key('version') and info.has_key('date'): 222 len_version = len(info['version']) 223 len_date = len(info['date']) 224 if len_version + len_date < 30: 225 info_line = "#* VERSION %s %s %s *\n" % \ 226 (info['version'], 227 (30 - len_version - len_date) * ' ', 228 info['date']) 229 else: 230 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 231 info_line = "#* VERSION %s %s *\n" % \ 232 (version, (24 - len(version)) * ' ') 233 234 # Create a header for the history file. 235 # Remember to fill in time at writeout time! 236 self.history_header = \ 237 '#************************************************************\n' + \ 238 '#* MadGraph5_aMC@NLO *\n' + \ 239 '#* *\n' + \ 240 "#* * * *\n" + \ 241 "#* * * * * *\n" + \ 242 "#* * * * * 5 * * * * *\n" + \ 243 "#* * * * * *\n" + \ 244 "#* * * *\n" + \ 245 "#* *\n" + \ 246 "#* *\n" + \ 247 info_line + \ 248 "#* *\n" + \ 249 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 250 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 251 "#* and *\n" + \ 252 "#* http://amcatnlo.cern.ch *\n" + \ 253 '#* *\n' + \ 254 '#************************************************************\n' + \ 255 '#* *\n' + \ 256 '#* Command File for aMCatNLO *\n' + \ 257 '#* *\n' + \ 258 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 259 '#* *\n' + \ 260 '#************************************************************\n' 261 262 if info_line: 263 info_line = info_line[1:] 264 265 logger.info(\ 266 "************************************************************\n" + \ 267 "* *\n" + \ 268 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 269 "* a M C @ N L O *\n" + \ 270 "* *\n" + \ 271 "* * * *\n" + \ 272 "* * * * * *\n" + \ 273 "* * * * * 5 * * * * *\n" + \ 274 "* * * * * *\n" + \ 275 "* * * *\n" + \ 276 "* *\n" + \ 277 info_line + \ 278 "* *\n" + \ 279 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 280 "* http://amcatnlo.cern.ch *\n" + \ 281 "* *\n" + \ 282 "* Type 'help' for in-line help. *\n" + \ 283 "* *\n" + \ 284 "************************************************************") 285 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
286 287
288 - def get_history_header(self):
289 """return the history header""" 290 return self.history_header % misc.get_time_info()
291
292 - def stop_on_keyboard_stop(self):
293 """action to perform to close nicely on a keyboard interupt""" 294 try: 295 if hasattr(self, 'cluster'): 296 logger.info('rm jobs on queue') 297 self.cluster.remove() 298 if hasattr(self, 'results'): 299 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 300 self.add_error_log_in_html(KeyboardInterrupt) 301 except: 302 pass
303
304 - def postcmd(self, stop, line):
305 """ Update the status of the run for finishing interactive command """ 306 307 # relaxing the tag forbidding question 308 self.force = False 309 310 if not self.use_rawinput: 311 return stop 312 313 314 arg = line.split() 315 if len(arg) == 0: 316 return stop 317 elif str(arg[0]) in ['exit','quit','EOF']: 318 return stop 319 320 try: 321 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 322 level=None, error=True) 323 except Exception: 324 misc.sprint('self.update_status fails', log=logger) 325 pass
326
327 - def nice_user_error(self, error, line):
328 """If a ME run is currently running add a link in the html output""" 329 330 self.add_error_log_in_html() 331 cmd.Cmd.nice_user_error(self, error, line)
332
333 - def nice_config_error(self, error, line):
334 """If a ME run is currently running add a link in the html output""" 335 336 self.add_error_log_in_html() 337 cmd.Cmd.nice_config_error(self, error, line)
338
339 - def nice_error_handling(self, error, line):
340 """If a ME run is currently running add a link in the html output""" 341 342 self.add_error_log_in_html() 343 cmd.Cmd.nice_error_handling(self, error, line)
344
345 346 347 #=============================================================================== 348 # HelpToCmd 349 #=============================================================================== 350 -class HelpToCmd(object):
351 """ The Series of help routine for the aMCatNLOCmd""" 352
353 - def help_launch(self):
354 """help for launch command""" 355 _launch_parser.print_help()
356
357 - def help_banner_run(self):
358 logger.info("syntax: banner_run Path|RUN [--run_options]") 359 logger.info("-- Reproduce a run following a given banner") 360 logger.info(" One of the following argument is require:") 361 logger.info(" Path should be the path of a valid banner.") 362 logger.info(" RUN should be the name of a run of the current directory") 363 self.run_options_help([('-f','answer all question by default'), 364 ('--name=X', 'Define the name associated with the new run')])
365 366
367 - def help_compile(self):
368 """help for compile command""" 369 _compile_parser.print_help()
370
371 - def help_generate_events(self):
372 """help for generate_events commandi 373 just call help_launch""" 374 _generate_events_parser.print_help()
375 376
377 - def help_calculate_xsect(self):
378 """help for generate_events command""" 379 _calculate_xsect_parser.print_help()
380
381 - def help_shower(self):
382 """help for shower command""" 383 _shower_parser.print_help()
384 385
386 - def help_open(self):
387 logger.info("syntax: open FILE ") 388 logger.info("-- open a file with the appropriate editor.") 389 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 390 logger.info(' the path to the last created/used directory is used')
391
392 - def run_options_help(self, data):
393 if data: 394 logger.info('-- local options:') 395 for name, info in data: 396 logger.info(' %s : %s' % (name, info)) 397 398 logger.info("-- session options:") 399 logger.info(" Note that those options will be kept for the current session") 400 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 401 logger.info(" --multicore : Run in multi-core configuration") 402 logger.info(" --nb_core=X : limit the number of core to use to X.")
403
404 405 406 407 #=============================================================================== 408 # CheckValidForCmd 409 #=============================================================================== 410 -class CheckValidForCmd(object):
411 """ The Series of check routine for the aMCatNLOCmd""" 412
413 - def check_shower(self, args, options):
414 """Check the validity of the line. args[0] is the run_directory""" 415 416 if options['force']: 417 self.force = True 418 419 if len(args) == 0: 420 self.help_shower() 421 raise self.InvalidCmd, 'Invalid syntax, please specify the run name' 422 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 423 raise self.InvalidCmd, 'Directory %s does not exists' % \ 424 pjoin(os.getcwd(), 'Events', args[0]) 425 426 self.set_run_name(args[0], level= 'shower') 427 args[0] = pjoin(self.me_dir, 'Events', args[0])
428
429 - def check_plot(self, args):
430 """Check the argument for the plot command 431 plot run_name modes""" 432 433 434 madir = self.options['madanalysis_path'] 435 td = self.options['td_path'] 436 437 if not madir or not td: 438 logger.info('Retry to read configuration file to find madanalysis/td') 439 self.set_configuration() 440 441 madir = self.options['madanalysis_path'] 442 td = self.options['td_path'] 443 444 if not madir: 445 error_msg = 'No Madanalysis path correctly set.' 446 error_msg += 'Please use the set command to define the path and retry.' 447 error_msg += 'You can also define it in the configuration file.' 448 raise self.InvalidCmd(error_msg) 449 if not td: 450 error_msg = 'No path to td directory correctly set.' 451 error_msg += 'Please use the set command to define the path and retry.' 452 error_msg += 'You can also define it in the configuration file.' 453 raise self.InvalidCmd(error_msg) 454 455 if len(args) == 0: 456 if not hasattr(self, 'run_name') or not self.run_name: 457 self.help_plot() 458 raise self.InvalidCmd('No run name currently define. Please add this information.') 459 args.append('all') 460 return 461 462 463 if args[0] not in self._plot_mode: 464 self.set_run_name(args[0], level='plot') 465 del args[0] 466 if len(args) == 0: 467 args.append('all') 468 elif not self.run_name: 469 self.help_plot() 470 raise self.InvalidCmd('No run name currently define. Please add this information.') 471 472 for arg in args: 473 if arg not in self._plot_mode and arg != self.run_name: 474 self.help_plot() 475 raise self.InvalidCmd('unknown options %s' % arg)
476
477 - def check_pgs(self, arg):
478 """Check the argument for pythia command 479 syntax: pgs [NAME] 480 Note that other option are already remove at this point 481 """ 482 483 # If not pythia-pgs path 484 if not self.options['pythia-pgs_path']: 485 logger.info('Retry to read configuration file to find pythia-pgs path') 486 self.set_configuration() 487 488 if not self.options['pythia-pgs_path'] or not \ 489 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 490 error_msg = 'No pythia-pgs path correctly set.' 491 error_msg += 'Please use the set command to define the path and retry.' 492 error_msg += 'You can also define it in the configuration file.' 493 raise self.InvalidCmd(error_msg) 494 495 tag = [a for a in arg if a.startswith('--tag=')] 496 if tag: 497 arg.remove(tag[0]) 498 tag = tag[0][6:] 499 500 501 if len(arg) == 0 and not self.run_name: 502 if self.results.lastrun: 503 arg.insert(0, self.results.lastrun) 504 else: 505 raise self.InvalidCmd('No run name currently define. Please add this information.') 506 507 if len(arg) == 1 and self.run_name == arg[0]: 508 arg.pop(0) 509 510 if not len(arg) and \ 511 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 512 self.help_pgs() 513 raise self.InvalidCmd('''No file file pythia_events.hep currently available 514 Please specify a valid run_name''') 515 516 lock = None 517 if len(arg) == 1: 518 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 519 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 520 521 if not filenames: 522 raise self.InvalidCmd('No events file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 523 else: 524 input_file = filenames[0] 525 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 526 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 527 argument=['-c', input_file], 528 close_fds=True) 529 else: 530 if tag: 531 self.run_card['run_tag'] = tag 532 self.set_run_name(self.run_name, tag, 'pgs') 533 534 return lock
535 536
537 - def check_delphes(self, arg):
538 """Check the argument for pythia command 539 syntax: delphes [NAME] 540 Note that other option are already remove at this point 541 """ 542 543 # If not pythia-pgs path 544 if not self.options['delphes_path']: 545 logger.info('Retry to read configuration file to find delphes path') 546 self.set_configuration() 547 548 if not self.options['delphes_path']: 549 error_msg = 'No delphes path correctly set.' 550 error_msg += 'Please use the set command to define the path and retry.' 551 error_msg += 'You can also define it in the configuration file.' 552 raise self.InvalidCmd(error_msg) 553 554 tag = [a for a in arg if a.startswith('--tag=')] 555 if tag: 556 arg.remove(tag[0]) 557 tag = tag[0][6:] 558 559 560 if len(arg) == 0 and not self.run_name: 561 if self.results.lastrun: 562 arg.insert(0, self.results.lastrun) 563 else: 564 raise self.InvalidCmd('No run name currently define. Please add this information.') 565 566 if len(arg) == 1 and self.run_name == arg[0]: 567 arg.pop(0) 568 569 if not len(arg) and \ 570 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 571 self.help_pgs() 572 raise self.InvalidCmd('''No file file pythia_events.hep currently available 573 Please specify a valid run_name''') 574 575 if len(arg) == 1: 576 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 577 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events')) 578 579 580 if not filenames: 581 raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\ 582 % (self.run_name, prev_tag, 583 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 584 else: 585 input_file = filenames[0] 586 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 587 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 588 argument=['-c', input_file], 589 close_fds=True) 590 else: 591 if tag: 592 self.run_card['run_tag'] = tag 593 self.set_run_name(self.run_name, tag, 'delphes')
594
595 - def check_calculate_xsect(self, args, options):
596 """check the validity of the line. args is ORDER, 597 ORDER being LO or NLO. If no mode is passed, NLO is used""" 598 # modify args in order to be DIR 599 # mode being either standalone or madevent 600 601 if options['force']: 602 self.force = True 603 604 if not args: 605 args.append('NLO') 606 return 607 608 if len(args) > 1: 609 self.help_calculate_xsect() 610 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 611 612 elif len(args) == 1: 613 if not args[0] in ['NLO', 'LO']: 614 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 615 mode = args[0] 616 617 # check for incompatible options/modes 618 if options['multicore'] and options['cluster']: 619 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 620 ' are not compatible. Please choose one.'
621 622
623 - def check_generate_events(self, args, options):
624 """check the validity of the line. args is ORDER, 625 ORDER being LO or NLO. If no mode is passed, NLO is used""" 626 # modify args in order to be DIR 627 # mode being either standalone or madevent 628 629 if not args: 630 args.append('NLO') 631 return 632 633 if len(args) > 1: 634 self.help_generate_events() 635 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 636 637 elif len(args) == 1: 638 if not args[0] in ['NLO', 'LO']: 639 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 640 mode = args[0] 641 642 # check for incompatible options/modes 643 if options['multicore'] and options['cluster']: 644 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 645 ' are not compatible. Please choose one.'
646
647 - def check_banner_run(self, args):
648 """check the validity of line""" 649 650 if len(args) == 0: 651 self.help_banner_run() 652 raise self.InvalidCmd('banner_run requires at least one argument.') 653 654 tag = [a[6:] for a in args if a.startswith('--tag=')] 655 656 657 if os.path.exists(args[0]): 658 type ='banner' 659 format = self.detect_card_type(args[0]) 660 if format != 'banner': 661 raise self.InvalidCmd('The file is not a valid banner.') 662 elif tag: 663 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 664 (args[0], tag)) 665 if not os.path.exists(args[0]): 666 raise self.InvalidCmd('No banner associates to this name and tag.') 667 else: 668 name = args[0] 669 type = 'run' 670 banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) 671 if not banners: 672 raise self.InvalidCmd('No banner associates to this name.') 673 elif len(banners) == 1: 674 args[0] = banners[0] 675 else: 676 #list the tag and propose those to the user 677 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 678 tag = self.ask('which tag do you want to use?', tags[0], tags) 679 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 680 (args[0], tag)) 681 682 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 683 if run_name: 684 try: 685 self.exec_cmd('remove %s all banner -f' % run_name) 686 except Exception: 687 pass 688 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 689 elif type == 'banner': 690 self.set_run_name(self.find_available_run_name(self.me_dir)) 691 elif type == 'run': 692 if not self.results[name].is_empty(): 693 run_name = self.find_available_run_name(self.me_dir) 694 logger.info('Run %s is not empty so will use run_name: %s' % \ 695 (name, run_name)) 696 self.set_run_name(run_name) 697 else: 698 try: 699 self.exec_cmd('remove %s all banner -f' % run_name) 700 except Exception: 701 pass 702 self.set_run_name(name)
703 704 705
706 - def check_launch(self, args, options):
707 """check the validity of the line. args is MODE 708 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 709 # modify args in order to be DIR 710 # mode being either standalone or madevent 711 712 if options['force']: 713 self.force = True 714 715 716 if not args: 717 args.append('auto') 718 return 719 720 if len(args) > 1: 721 self.help_launch() 722 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 723 724 elif len(args) == 1: 725 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 726 raise self.InvalidCmd, '%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0] 727 mode = args[0] 728 729 # check for incompatible options/modes 730 if options['multicore'] and options['cluster']: 731 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 732 ' are not compatible. Please choose one.' 733 if mode == 'NLO' and options['reweightonly']: 734 raise self.InvalidCmd, 'option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"'
735 736
737 - def check_compile(self, args, options):
738 """check the validity of the line. args is MODE 739 MODE being FO or MC. If no mode is passed, MC is used""" 740 # modify args in order to be DIR 741 # mode being either standalone or madevent 742 743 if options['force']: 744 self.force = True 745 746 if not args: 747 args.append('MC') 748 return 749 750 if len(args) > 1: 751 self.help_compile() 752 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 753 754 elif len(args) == 1: 755 if not args[0] in ['MC', 'FO']: 756 raise self.InvalidCmd, '%s is not a valid mode, please use "FO" or "MC"' % args[0] 757 mode = args[0]
758
759 # check for incompatible options/modes 760 761 762 #=============================================================================== 763 # CompleteForCmd 764 #=============================================================================== 765 -class CompleteForCmd(CheckValidForCmd):
766 """ The Series of help routine for the MadGraphCmd""" 767
768 - def complete_launch(self, text, line, begidx, endidx):
769 """auto-completion for launch command""" 770 771 args = self.split_arg(line[0:begidx]) 772 if len(args) == 1: 773 #return mode 774 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 775 elif len(args) == 2 and line[begidx-1] == '@': 776 return self.list_completion(text,['LO','NLO'],line) 777 else: 778 opts = [] 779 for opt in _launch_parser.option_list: 780 opts += opt._long_opts + opt._short_opts 781 return self.list_completion(text, opts, line)
782
783 - def complete_banner_run(self, text, line, begidx, endidx, formatting=True):
784 "Complete the banner run command" 785 try: 786 787 788 args = self.split_arg(line[0:begidx], error=False) 789 790 if args[-1].endswith(os.path.sep): 791 return self.path_completion(text, 792 os.path.join('.',*[a for a in args \ 793 if a.endswith(os.path.sep)])) 794 795 796 if len(args) > 1: 797 # only options are possible 798 tags = misc.glob('%s_*_banner.txt' % args[1],pjoin(self.me_dir, 'Events' , args[1])) 799 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 800 801 if args[-1] != '--tag=': 802 tags = ['--tag=%s' % t for t in tags] 803 else: 804 return self.list_completion(text, tags) 805 return self.list_completion(text, tags +['--name=','-f'], line) 806 807 # First argument 808 possibilites = {} 809 810 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 811 if a.endswith(os.path.sep)])) 812 if os.path.sep in line: 813 return comp 814 else: 815 possibilites['Path from ./'] = comp 816 817 run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) 818 run_list = [n.rsplit('/',2)[1] for n in run_list] 819 possibilites['RUN Name'] = self.list_completion(text, run_list) 820 821 return self.deal_multiple_categories(possibilites, formatting) 822 823 824 except Exception, error: 825 print error
826 827
828 - def complete_compile(self, text, line, begidx, endidx):
829 """auto-completion for launch command""" 830 831 args = self.split_arg(line[0:begidx]) 832 if len(args) == 1: 833 #return mode 834 return self.list_completion(text,['FO','MC'],line) 835 else: 836 opts = [] 837 for opt in _compile_parser.option_list: 838 opts += opt._long_opts + opt._short_opts 839 return self.list_completion(text, opts, line)
840
841 - def complete_calculate_xsect(self, text, line, begidx, endidx):
842 """auto-completion for launch command""" 843 844 args = self.split_arg(line[0:begidx]) 845 if len(args) == 1: 846 #return mode 847 return self.list_completion(text,['LO','NLO'],line) 848 else: 849 opts = [] 850 for opt in _calculate_xsect_parser.option_list: 851 opts += opt._long_opts + opt._short_opts 852 return self.list_completion(text, opts, line)
853
854 - def complete_generate_events(self, text, line, begidx, endidx):
855 """auto-completion for generate_events command 856 call the compeltion for launch""" 857 self.complete_launch(text, line, begidx, endidx)
858 859
860 - def complete_shower(self, text, line, begidx, endidx):
861 args = self.split_arg(line[0:begidx]) 862 if len(args) == 1: 863 #return valid run_name 864 data = misc.glob(pjoin('*','events.lhe.gz', pjoin(self.me_dir, 'Events'))) 865 data = [n.rsplit('/',2)[1] for n in data] 866 tmp1 = self.list_completion(text, data) 867 if not self.run_name: 868 return tmp1
869
870 - def complete_plot(self, text, line, begidx, endidx):
871 """ Complete the plot command """ 872 873 args = self.split_arg(line[0:begidx], error=False) 874 875 if len(args) == 1: 876 #return valid run_name 877 data = misc.glob(pjoin('*','events.lhe*', pjoin(self.me_dir, 'Events'))) 878 data = [n.rsplit('/',2)[1] for n in data] 879 tmp1 = self.list_completion(text, data) 880 if not self.run_name: 881 return tmp1 882 883 if len(args) > 1: 884 return self.list_completion(text, self._plot_mode)
885
886 - def complete_pgs(self,text, line, begidx, endidx):
887 "Complete the pgs command" 888 args = self.split_arg(line[0:begidx], error=False) 889 if len(args) == 1: 890 #return valid run_name 891 data = misc.glob(pjoin('*', 'events_*.hep.gz'), 892 pjoin(self.me_dir, 'Events')) 893 data = [n.rsplit('/',2)[1] for n in data] 894 tmp1 = self.list_completion(text, data) 895 if not self.run_name: 896 return tmp1 897 else: 898 tmp2 = self.list_completion(text, self._run_options + ['-f', 899 '--tag=' ,'--no_default'], line) 900 return tmp1 + tmp2 901 else: 902 return self.list_completion(text, self._run_options + ['-f', 903 '--tag=','--no_default'], line)
904 905 complete_delphes = complete_pgs
906
907 -class aMCatNLOAlreadyRunning(InvalidCmd):
908 pass
909
910 -class AskRunNLO(cmd.ControlSwitch):
911 912 to_control = [('order', 'Type of perturbative computation'), 913 ('fixed_order', 'No MC@[N]LO matching / event generation'), 914 ('shower', 'Shower the generated events'), 915 ('madspin', 'Decay onshell particles'), 916 ('reweight', 'Add weights to events for new hypp.'), 917 ('madanalysis','Run MadAnalysis5 on the events generated')] 918 919 quit_on = cmd.ControlSwitch.quit_on + ['onlyshower'] 920
921 - def __init__(self, question, line_args=[], mode=None, force=False, 922 *args, **opt):
923 924 self.me_dir = opt['mother_interface'].me_dir 925 self.check_available_module(opt['mother_interface'].options) 926 self.last_mode = opt['mother_interface'].last_mode 927 self.proc_characteristics = opt['mother_interface'].proc_characteristics 928 self.run_card = banner_mod.RunCard(pjoin(self.me_dir,'Cards', 'run_card.dat'), 929 consistency='warning') 930 super(AskRunNLO,self).__init__(self.to_control, opt['mother_interface'], 931 *args, **opt)
932 933 @property
934 - def answer(self):
935 936 out = super(AskRunNLO, self).answer 937 if out['shower'] == 'HERWIG7': 938 out['shower'] = 'HERWIGPP' 939 940 if out['shower'] not in self.get_allowed('shower') or out['shower'] =='OFF': 941 out['runshower'] = False 942 else: 943 out['runshower'] = True 944 return out
945 946
947 - def check_available_module(self, options):
948 949 self.available_module = set() 950 if options['madanalysis5_path']: 951 self.available_module.add('MA5') 952 if not aMCatNLO or ('mg5_path' in options and options['mg5_path']): 953 954 self.available_module.add('MadSpin') 955 if misc.has_f2py() or options['f2py_compiler']: 956 self.available_module.add('reweight') 957 if options['pythia8_path']: 958 self.available_module.add('PY8') 959 if options['hwpp_path'] and options['thepeg_path'] and options['hepmc_path']: 960 self.available_module.add('HW7') 961 962 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 963 if os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))): 964 self.available_module.add('StdHEP')
965 # 966 # shorcut 967 #
968 - def ans_lo(self, value):
969 """ function called if the user type lo=value. or lo (then value is None)""" 970 971 if value is None: 972 self.switch['order'] = 'LO' 973 self.switch['fixed_order'] = 'ON' 974 self.set_switch('shower', 'OFF') 975 else: 976 logger.warning('Invalid command: lo=%s' % value)
977
978 - def ans_nlo(self, value):
979 if value is None: 980 self.switch['order'] = 'NLO' 981 self.switch['fixed_order'] = 'ON' 982 self.set_switch('shower', 'OFF') 983 else: 984 logger.warning('Invalid command: nlo=%s' % value)
985
986 - def ans_amc__at__nlo(self, value):
987 if value is None: 988 self.switch['order'] = 'NLO' 989 self.switch['fixed_order'] = 'OFF' 990 self.set_switch('shower', 'ON') 991 else: 992 logger.warning('Invalid command: aMC@NLO=%s' % value)
993
994 - def ans_amc__at__lo(self, value):
995 if value is None: 996 self.switch['order'] = 'LO' 997 self.switch['fixed_order'] = 'OFF' 998 self.set_switch('shower', 'ON') 999 else: 1000 logger.warning('Invalid command: aMC@LO=%s' % value)
1001
1002 - def ans_noshower(self, value):
1003 if value is None: 1004 self.switch['order'] = 'NLO' 1005 self.switch['fixed_order'] = 'OFF' 1006 self.set_switch('shower', 'OFF') 1007 else: 1008 logger.warning('Invalid command: noshower=%s' % value)
1009
1010 - def ans_onlyshower(self, value):
1011 if value is None: 1012 self.switch['mode'] = 'onlyshower' 1013 self.switch['madspin'] = 'OFF' 1014 self.switch['reweight'] = 'OFF' 1015 else: 1016 logger.warning('Invalid command: onlyshower=%s' % value)
1017
1018 - def ans_noshowerlo(self, value):
1019 if value is None: 1020 self.switch['order'] = 'LO' 1021 self.switch['fixed_order'] = 'OFF' 1022 self.set_switch('shower', 'OFF') 1023 else: 1024 logger.warning('Invalid command: noshowerlo=%s' % value)
1025
1026 - def ans_madanalysis5(self, value):
1027 """ shortcut madanalysis5 -> madanalysis """ 1028 1029 if value is None: 1030 return self.onecmd('madanalysis') 1031 else: 1032 self.set_switch('madanalysis', value)
1033 # 1034 # ORDER 1035 #
1036 - def get_allowed_order(self):
1037 return ["LO", "NLO"]
1038
1039 - def set_default_order(self):
1040 1041 if self.last_mode in ['LO', 'aMC@L0', 'noshowerLO']: 1042 self.switch['order'] = 'LO' 1043 self.switch['order'] = 'NLO'
1044
1045 - def set_switch_off_order(self):
1046 return
1047 # 1048 # Fix order 1049 #
1050 - def get_allowed_fixed_order(self):
1051 """ """ 1052 if self.proc_characteristics['ninitial'] == 1: 1053 return ['ON'] 1054 else: 1055 return ['ON', 'OFF']
1056
1057 - def set_default_fixed_order(self):
1058 1059 if self.last_mode in ['LO', 'NLO']: 1060 self.switch['fixed_order'] = 'ON' 1061 if self.proc_characteristics['ninitial'] == 1: 1062 self.switch['fixed_order'] = 'ON' 1063 else: 1064 self.switch['fixed_order'] = 'OFF' 1065
1066 - def color_for_fixed_order(self, switch_value):
1067 1068 if switch_value in ['OFF']: 1069 return self.green % switch_value 1070 else: 1071 return self.red % switch_value
1072
1073 - def color_for_shower(self, switch_value):
1074 1075 if switch_value in ['ON']: 1076 return self.green % switch_value 1077 elif switch_value in self.get_allowed('shower'): 1078 return self.green % switch_value 1079 else: 1080 return self.red % switch_value
1081
1082 - def consistency_fixed_order_shower(self, vfix, vshower):
1083 """ consistency_XX_YY(val_XX, val_YY) 1084 -> XX is the new key set by the user to a new value val_XX 1085 -> YY is another key set by the user. 1086 -> return value should be None or "replace_YY" 1087 """ 1088 1089 if vfix == 'ON' and vshower != 'OFF' : 1090 return 'OFF' 1091 return None
1092 1093 consistency_fixed_order_madspin = consistency_fixed_order_shower 1094 consistency_fixed_order_reweight = consistency_fixed_order_shower 1095
1096 - def consistency_fixed_order_madanalysis(self, vfix, vma5):
1097 1098 if vfix == 'ON' and vma5 == 'ON' : 1099 return 'OFF' 1100 return None
1101 1102
1103 - def consistency_shower_fixed_order(self, vshower, vfix):
1104 """ consistency_XX_YY(val_XX, val_YY) 1105 -> XX is the new key set by the user to a new value val_XX 1106 -> YY is another key set by the user. 1107 -> return value should be None or "replace_YY" 1108 """ 1109 1110 if vshower != 'OFF' and vfix == 'ON': 1111 return 'OFF' 1112 return None
1113 1114 consistency_madspin_fixed_order = consistency_shower_fixed_order 1115 consistency_reweight_fixed_order = consistency_shower_fixed_order 1116 consistency_madanalysis_fixed_order = consistency_shower_fixed_order 1117 1118 1119 # 1120 # Shower 1121 #
1122 - def get_allowed_shower(self):
1123 """ """ 1124 1125 if hasattr(self, 'allowed_shower'): 1126 return self.allowed_shower 1127 1128 if not misc.which('bc'): 1129 return ['OFF'] 1130 1131 if self.proc_characteristics['ninitial'] == 1: 1132 self.allowed_shower = ['OFF'] 1133 return ['OFF'] 1134 else: 1135 if 'StdHEP' in self.available_module: 1136 allowed = ['HERWIG6','OFF', 'PYTHIA6Q', 'PYTHIA6PT', ] 1137 else: 1138 allowed = ['OFF'] 1139 if 'PY8' in self.available_module: 1140 allowed.append('PYTHIA8') 1141 if 'HW7' in self.available_module: 1142 allowed.append('HERWIGPP') 1143 1144 1145 self.allowed_shower = allowed 1146 1147 return allowed
1148
1149 - def check_value_shower(self, value):
1150 """ """ 1151 1152 if value.upper() in self.get_allowed_shower(): 1153 return True 1154 if value.upper() in ['PYTHIA8', 'HERWIGPP']: 1155 return True 1156 if value.upper() == 'ON': 1157 return self.run_card['parton_shower'] 1158 if value.upper() in ['P8','PY8','PYTHIA_8']: 1159 return 'PYTHIA8' 1160 if value.upper() in ['PY6','P6','PY6PT', 'PYTHIA_6', 'PYTHIA_6PT','PYTHIA6PT','PYTHIA6_PT']: 1161 return 'PYTHIA6PT' 1162 if value.upper() in ['PY6Q', 'PYTHIA_6Q','PYTHIA6Q', 'PYTHIA6_Q']: 1163 return 'PYTHIA6Q' 1164 if value.upper() in ['HW7', 'HERWIG7']: 1165 return 'HERWIG7' 1166 if value.upper() in ['HW++', 'HWPP', 'HERWIG++']: 1167 return 'HERWIGPP' 1168 if value.upper() in ['HW6', 'HERWIG_6']: 1169 return 'HERWIG6'
1170
1171 - def set_default_shower(self):
1172 1173 if self.last_mode in ['LO', 'NLO', 'noshower', 'noshowerLO']: 1174 self.switch['shower'] = 'OFF' 1175 return 1176 1177 if self.proc_characteristics['ninitial'] == 1: 1178 self.switch['shower'] = 'OFF' 1179 return 1180 1181 if not misc.which('bc'): 1182 logger.warning('bc command not available. Forbids to run the shower. please install it if you want to run the shower. (sudo apt-get install bc)') 1183 self.switch['shower'] = 'OFF' 1184 return 1185 1186 if os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 1187 self.switch['shower'] = self.run_card['parton_shower'] 1188 #self.switch['shower'] = 'ON' 1189 self.switch['fixed_order'] = "OFF" 1190 else: 1191 self.switch['shower'] = 'OFF' 1192
1193 - def consistency_shower_madanalysis(self, vshower, vma5):
1194 """ MA5 only possible with (N)LO+PS if shower is run""" 1195 1196 if vshower == 'OFF' and vma5 == 'ON': 1197 return 'OFF' 1198 return None
1199
1200 - def consistency_madanalysis_shower(self, vma5, vshower):
1201 1202 if vma5=='ON' and vshower == 'OFF': 1203 return 'ON' 1204 return None
1205
1206 - def get_cardcmd_for_shower(self, value):
1207 """ adpat run_card according to this setup. return list of cmd to run""" 1208 1209 if value != 'OFF': 1210 return ['set parton_shower %s' % self.switch['shower']] 1211 return []
1212 1213 # 1214 # madspin 1215 #
1216 - def get_allowed_madspin(self):
1217 """ """ 1218 1219 if hasattr(self, 'allowed_madspin'): 1220 return self.allowed_madspin 1221 1222 self.allowed_madspin = [] 1223 1224 1225 if 'MadSpin' not in self.available_module: 1226 return self.allowed_madspin 1227 if self.proc_characteristics['ninitial'] == 1: 1228 self.available_module.remove('MadSpin') 1229 self.allowed_madspin = ['OFF'] 1230 return self.allowed_madspin 1231 else: 1232 self.allowed_madspin = ['OFF', 'ON', 'onshell'] 1233 return self.allowed_madspin
1234
1235 - def check_value_madspin(self, value):
1236 """handle alias and valid option not present in get_allowed_madspin 1237 remember that this mode should always be OFF for 1>N. (ON not in allowed value)""" 1238 1239 if value.upper() in self.get_allowed_madspin(): 1240 if value == value.upper(): 1241 return True 1242 else: 1243 return value.upper() 1244 elif value.lower() in self.get_allowed_madspin(): 1245 if value == value.lower(): 1246 return True 1247 else: 1248 return value.lower() 1249 1250 if 'MadSpin' not in self.available_module or \ 1251 'ON' not in self.get_allowed_madspin(): 1252 return False 1253 1254 if value.lower() in ['madspin', 'full']: 1255 return 'full' 1256 elif value.lower() in ['none']: 1257 return 'none'
1258
1259 - def set_default_madspin(self):
1260 1261 if 'MadSpin' in self.available_module: 1262 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 1263 self.switch['madspin'] = 'ON' 1264 else: 1265 self.switch['madspin'] = 'OFF' 1266 else: 1267 self.switch['madspin'] = 'Not Avail.'
1268
1269 - def get_cardcmd_for_madspin(self, value):
1270 """set some command to run before allowing the user to modify the cards.""" 1271 1272 if value == 'onshell': 1273 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] 1274 elif value in ['full', 'madspin']: 1275 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode madspin"] 1276 elif value == 'none': 1277 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] 1278 else: 1279 return []
1280 1281 # 1282 # reweight 1283 #
1284 - def get_allowed_reweight(self):
1285 """set the valid (visible) options for reweight""" 1286 1287 if hasattr(self, 'allowed_reweight'): 1288 return getattr(self, 'allowed_reweight') 1289 1290 self.allowed_reweight = [] 1291 if 'reweight' not in self.available_module: 1292 return self.allowed_reweight 1293 if self.proc_characteristics['ninitial'] == 1: 1294 self.available_module.remove('reweight') 1295 self.allowed_reweight.append('OFF') 1296 return self.allowed_reweight 1297 else: 1298 self.allowed_reweight = [ 'OFF', 'ON', 'NLO', 'NLO_TREE','LO'] 1299 return self.allowed_reweight
1300
1301 - def set_default_reweight(self):
1302 """initialise the switch for reweight""" 1303 1304 if 'reweight' in self.available_module: 1305 if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): 1306 self.switch['reweight'] = 'ON' 1307 else: 1308 self.switch['reweight'] = 'OFF' 1309 else: 1310 self.switch['reweight'] = 'Not Avail.'
1311
1312 - def get_cardcmd_for_reweight(self, value):
1313 """ adpat run_card according to this setup. return list of cmd to run""" 1314 1315 if value == 'LO': 1316 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode LO"] 1317 elif value == 'NLO': 1318 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO", 1319 "set store_rwgt_info T"] 1320 elif value == 'NLO_TREE': 1321 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO_tree", 1322 "set store_rwgt_info T"] 1323 return []
1324 1325 # 1326 # MadAnalysis5 1327 #
1328 - def get_allowed_madanalysis(self):
1329 1330 if hasattr(self, 'allowed_madanalysis'): 1331 return self.allowed_madanalysis 1332 1333 self.allowed_madanalysis = [] 1334 1335 1336 if 'MA5' not in self.available_module: 1337 return self.allowed_madanalysis 1338 1339 if self.proc_characteristics['ninitial'] == 1: 1340 self.available_module.remove('MA5') 1341 self.allowed_madanalysis = ['OFF'] 1342 return self.allowed_madanalysis 1343 else: 1344 self.allowed_madanalysis = ['OFF', 'ON'] 1345 return self.allowed_madanalysis
1346
1347 - def set_default_madanalysis(self):
1348 """initialise the switch for reweight""" 1349 1350 if 'MA5' not in self.available_module: 1351 self.switch['madanalysis'] = 'Not Avail.' 1352 elif os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat')): 1353 self.switch['madanalysis'] = 'ON' 1354 else: 1355 self.switch['madanalysis'] = 'OFF'
1356
1357 - def check_value_madanalysis(self, value):
1358 """check an entry is valid. return the valid entry in case of shortcut""" 1359 1360 if value.upper() in self.get_allowed('madanalysis'): 1361 return True 1362 value = value.lower() 1363 if value == 'hadron': 1364 return 'ON' if 'ON' in self.get_allowed_madanalysis5 else False 1365 else: 1366 return False
1367
1368 1369 #=============================================================================== 1370 # aMCatNLOCmd 1371 #=============================================================================== 1372 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
1373 """The command line processor of MadGraph""" 1374 1375 # Truth values 1376 true = ['T','.true.',True,'true'] 1377 # Options and formats available 1378 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 1379 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 1380 _calculate_decay_options = ['-f', '--accuracy=0.'] 1381 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 1382 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 1383 _clean_mode = _plot_mode + ['channel', 'banner'] 1384 _display_opts = ['run_name', 'options', 'variable'] 1385 # survey options, dict from name to type, default value, and help text 1386 # Variables to store object information 1387 web = False 1388 cluster_mode = 0 1389 queue = 'madgraph' 1390 nb_core = None 1391 make_opts_var = {} 1392 1393 next_possibility = { 1394 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 1395 'help generate_events'], 1396 'generate_events': ['generate_events [OPTIONS]', 'shower'], 1397 'launch': ['launch [OPTIONS]', 'shower'], 1398 'shower' : ['generate_events [OPTIONS]'] 1399 } 1400 1401 1402 ############################################################################
1403 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
1404 """ add information to the cmd """ 1405 1406 self.start_time = 0 1407 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 1408 #common_run.CommonRunCmd.__init__(self, me_dir, options) 1409 1410 self.mode = 'aMCatNLO' 1411 self.nb_core = 0 1412 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 1413 1414 1415 self.load_results_db() 1416 self.results.def_web_mode(self.web) 1417 # check that compiler is gfortran 4.6 or later if virtuals have been exported 1418 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 1419 1420 if not '[real=QCD]' in proc_card: 1421 check_compiler(self.options, block=True)
1422 1423 1424 ############################################################################
1425 - def do_shower(self, line):
1426 """ run the shower on a given parton level file """ 1427 argss = self.split_arg(line) 1428 (options, argss) = _launch_parser.parse_args(argss) 1429 # check argument validity and normalise argument 1430 options = options.__dict__ 1431 options['reweightonly'] = False 1432 self.check_shower(argss, options) 1433 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 1434 self.ask_run_configuration('onlyshower', options) 1435 self.run_mcatnlo(evt_file, options) 1436 1437 self.update_status('', level='all', update_results=True)
1438 1439 ################################################################################
1440 - def do_plot(self, line):
1441 """Create the plot for a given run""" 1442 1443 # Since in principle, all plot are already done automaticaly 1444 args = self.split_arg(line) 1445 # Check argument's validity 1446 self.check_plot(args) 1447 logger.info('plot for run %s' % self.run_name) 1448 1449 if not self.force: 1450 self.ask_edit_cards([], args, plot=True) 1451 1452 if any([arg in ['parton'] for arg in args]): 1453 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 1454 if os.path.exists(filename+'.gz'): 1455 misc.gunzip(filename) 1456 if os.path.exists(filename): 1457 logger.info('Found events.lhe file for run %s' % self.run_name) 1458 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 1459 self.create_plot('parton') 1460 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 1461 misc.gzip(filename) 1462 1463 if any([arg in ['all','parton'] for arg in args]): 1464 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 1465 if os.path.exists(filename): 1466 logger.info('Found MADatNLO.top file for run %s' % \ 1467 self.run_name) 1468 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 1469 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 1470 1471 if not os.path.isdir(plot_dir): 1472 os.makedirs(plot_dir) 1473 top_file = pjoin(plot_dir, 'plots.top') 1474 files.cp(filename, top_file) 1475 madir = self.options['madanalysis_path'] 1476 tag = self.run_card['run_tag'] 1477 td = self.options['td_path'] 1478 misc.call(['%s/plot' % self.dirbin, madir, td], 1479 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1480 stderr = subprocess.STDOUT, 1481 cwd=plot_dir) 1482 1483 misc.call(['%s/plot_page-pl' % self.dirbin, 1484 os.path.basename(plot_dir), 1485 'parton'], 1486 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1487 stderr = subprocess.STDOUT, 1488 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1489 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1490 output) 1491 1492 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1493 1494 if any([arg in ['all','shower'] for arg in args]): 1495 filenames = misc.glob('events_*.lhe.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1496 if len(filenames) != 1: 1497 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1498 if len(filenames) != 1: 1499 logger.info('No shower level file found for run %s' % \ 1500 self.run_name) 1501 return 1502 filename = filenames[0] 1503 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1504 1505 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1506 if aMCatNLO and not self.options['mg5_path']: 1507 raise "plotting NLO HEP file needs MG5 utilities" 1508 1509 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1510 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1511 self.run_hep2lhe() 1512 else: 1513 filename = filenames[0] 1514 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1515 1516 self.create_plot('shower') 1517 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1518 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1519 lhe_file_name) 1520 misc.gzip(lhe_file_name) 1521 1522 if any([arg in ['all','pgs'] for arg in args]): 1523 filename = pjoin(self.me_dir, 'Events', self.run_name, 1524 '%s_pgs_events.lhco' % self.run_tag) 1525 if os.path.exists(filename+'.gz'): 1526 misc.gunzip(filename) 1527 if os.path.exists(filename): 1528 self.create_plot('PGS') 1529 misc.gzip(filename) 1530 else: 1531 logger.info('No valid files for pgs plot') 1532 1533 if any([arg in ['all','delphes'] for arg in args]): 1534 filename = pjoin(self.me_dir, 'Events', self.run_name, 1535 '%s_delphes_events.lhco' % self.run_tag) 1536 if os.path.exists(filename+'.gz'): 1537 misc.gunzip(filename) 1538 if os.path.exists(filename): 1539 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1540 self.create_plot('Delphes') 1541 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1542 misc.gzip(filename) 1543 else: 1544 logger.info('No valid files for delphes plot')
1545 1546 1547 ############################################################################
1548 - def do_calculate_xsect(self, line):
1549 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1550 this function wraps the do_launch one""" 1551 1552 self.start_time = time.time() 1553 argss = self.split_arg(line) 1554 # check argument validity and normalise argument 1555 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1556 options = options.__dict__ 1557 options['reweightonly'] = False 1558 options['parton'] = True 1559 self.check_calculate_xsect(argss, options) 1560 self.do_launch(line, options, argss)
1561 1562 ############################################################################
1563 - def do_banner_run(self, line):
1564 """Make a run from the banner file""" 1565 1566 args = self.split_arg(line) 1567 #check the validity of the arguments 1568 self.check_banner_run(args) 1569 1570 # Remove previous cards 1571 for name in ['shower_card.dat', 'madspin_card.dat']: 1572 try: 1573 os.remove(pjoin(self.me_dir, 'Cards', name)) 1574 except Exception: 1575 pass 1576 1577 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1578 1579 # Check if we want to modify the run 1580 if not self.force: 1581 ans = self.ask('Do you want to modify the Cards/Run Type?', 'n', ['y','n']) 1582 if ans == 'n': 1583 self.force = True 1584 1585 # Compute run mode: 1586 if self.force: 1587 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1588 banner = banner_mod.Banner(args[0]) 1589 for line in banner['run_settings']: 1590 if '=' in line: 1591 mode, value = [t.strip() for t in line.split('=')] 1592 mode_status[mode] = value 1593 else: 1594 mode_status = {} 1595 1596 # Call Generate events 1597 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1598 switch=mode_status)
1599 1600 ############################################################################
1601 - def do_generate_events(self, line):
1602 """Main commands: generate events 1603 this function just wraps the do_launch one""" 1604 self.do_launch(line)
1605 1606 1607 ############################################################################
1608 - def do_treatcards(self, line, amcatnlo=True,mode=''):
1609 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1610 #check if no 'Auto' are present in the file 1611 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1612 1613 # propagate the FO_card entry FO_LHE_weight_ratio to the run_card. 1614 # this variable is system only in the run_card 1615 # can not be done in EditCard since this parameter is not written in the 1616 # run_card directly. 1617 if mode in ['LO', 'NLO']: 1618 name = 'fo_lhe_weight_ratio' 1619 FO_card = analyse_card.FOAnalyseCard(pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')) 1620 if name in FO_card: 1621 self.run_card.set(name, FO_card[name], user=False) 1622 name = 'fo_lhe_postprocessing' 1623 if name in FO_card: 1624 self.run_card.set(name, FO_card[name], user=False) 1625 1626 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1627 1628 ############################################################################
1629 - def set_configuration(self, amcatnlo=True, **opt):
1630 """assign all configuration variable from file 1631 loop over the different config file if config_file not define """ 1632 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1633 1634 ############################################################################
1635 - def do_launch(self, line, options={}, argss=[], switch={}):
1636 """Main commands: launch the full chain 1637 options and args are relevant if the function is called from other 1638 functions, such as generate_events or calculate_xsect 1639 mode gives the list of switch needed for the computation (usefull for banner_run) 1640 """ 1641 1642 if not argss and not options: 1643 self.start_time = time.time() 1644 argss = self.split_arg(line) 1645 # check argument validity and normalise argument 1646 (options, argss) = _launch_parser.parse_args(argss) 1647 options = options.__dict__ 1648 self.check_launch(argss, options) 1649 1650 1651 if 'run_name' in options.keys() and options['run_name']: 1652 self.run_name = options['run_name'] 1653 # if a dir with the given run_name already exists 1654 # remove it and warn the user 1655 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1656 logger.warning('Removing old run information in \n'+ 1657 pjoin(self.me_dir, 'Events', self.run_name)) 1658 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1659 self.results.delete_run(self.run_name) 1660 else: 1661 self.run_name = '' # will be set later 1662 1663 if options['multicore']: 1664 self.cluster_mode = 2 1665 elif options['cluster']: 1666 self.cluster_mode = 1 1667 1668 if not switch: 1669 mode = argss[0] 1670 1671 if mode in ['LO', 'NLO']: 1672 options['parton'] = True 1673 mode = self.ask_run_configuration(mode, options) 1674 else: 1675 mode = self.ask_run_configuration('auto', options, switch) 1676 1677 self.results.add_detail('run_mode', mode) 1678 1679 self.update_status('Starting run', level=None, update_results=True) 1680 1681 if self.options['automatic_html_opening']: 1682 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1683 self.options['automatic_html_opening'] = False 1684 1685 if '+' in mode: 1686 mode = mode.split('+')[0] 1687 self.compile(mode, options) 1688 evt_file = self.run(mode, options) 1689 1690 if self.run_card['nevents'] == 0 and not mode in ['LO', 'NLO']: 1691 logger.info('No event file generated: grids have been set-up with a '\ 1692 'relative precision of %s' % self.run_card['req_acc']) 1693 return 1694 1695 if not mode in ['LO', 'NLO']: 1696 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1697 1698 if self.run_card['systematics_program'] == 'systematics': 1699 self.exec_cmd('systematics %s %s ' % (self.run_name, ' '.join(self.run_card['systematics_arguments']))) 1700 1701 self.exec_cmd('reweight -from_cards', postcmd=False) 1702 self.exec_cmd('decay_events -from_cards', postcmd=False) 1703 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1704 1705 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1706 and not options['parton']: 1707 self.run_mcatnlo(evt_file, options) 1708 self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) 1709 1710 elif mode == 'noshower': 1711 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 1712 Please, shower the Les Houches events before using them for physics analyses.""") 1713 1714 1715 self.update_status('', level='all', update_results=True) 1716 if self.run_card['ickkw'] == 3 and \ 1717 (mode in ['noshower'] or \ 1718 (('PYTHIA8' not in self.run_card['parton_shower'].upper()) and (mode in ['aMC@NLO']))): 1719 logger.warning("""You are running with FxFx merging enabled. 1720 To be able to merge samples of various multiplicities without double counting, 1721 you have to remove some events after showering 'by hand'. 1722 Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 1723 1724 self.store_result() 1725 #check if the param_card defines a scan. 1726 if self.param_card_iterator: 1727 cpath = pjoin(self.me_dir,'Cards','param_card.dat') 1728 param_card_iterator = self.param_card_iterator 1729 self.param_card_iterator = [] #avoid to next generate go trough here 1730 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1731 error=self.results.current['error'], 1732 param_card_path=cpath) 1733 orig_name = self.run_name 1734 #go trough the scal 1735 with misc.TMP_variable(self, 'allow_notification_center', False): 1736 for i,card in enumerate(param_card_iterator): 1737 card.write(cpath) 1738 self.check_param_card(cpath, dependent=True) 1739 if not options['force']: 1740 options['force'] = True 1741 if options['run_name']: 1742 options['run_name'] = '%s_%s' % (orig_name, i+1) 1743 if not argss: 1744 argss = [mode, "-f"] 1745 elif argss[0] == "auto": 1746 argss[0] = mode 1747 self.do_launch("", options=options, argss=argss, switch=switch) 1748 #self.exec_cmd("launch -f ",precmd=True, postcmd=True,errorhandling=False) 1749 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1750 error=self.results.current['error'], 1751 param_card_path=cpath) 1752 #restore original param_card 1753 param_card_iterator.write(pjoin(self.me_dir,'Cards','param_card.dat')) 1754 name = misc.get_scan_name(orig_name, self.run_name) 1755 path = pjoin(self.me_dir, 'Events','scan_%s.txt' % name) 1756 logger.info("write all cross-section results in %s" % path, '$MG:BOLD') 1757 param_card_iterator.write_summary(path) 1758 1759 if self.allow_notification_center: 1760 misc.apple_notify('Run %s finished' % os.path.basename(self.me_dir), 1761 '%s: %s +- %s ' % (self.results.current['run_name'], 1762 self.results.current['cross'], 1763 self.results.current['error']))
1764 1765 1766 ############################################################################
1767 - def do_compile(self, line):
1768 """Advanced commands: just compile the executables """ 1769 argss = self.split_arg(line) 1770 # check argument validity and normalise argument 1771 (options, argss) = _compile_parser.parse_args(argss) 1772 options = options.__dict__ 1773 options['reweightonly'] = False 1774 options['nocompile'] = False 1775 self.check_compile(argss, options) 1776 1777 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1778 self.ask_run_configuration(mode, options) 1779 self.compile(mode, options) 1780 1781 1782 self.update_status('', level='all', update_results=True)
1783 1784
1785 - def update_random_seed(self):
1786 """Update random number seed with the value from the run_card. 1787 If this is 0, update the number according to a fresh one""" 1788 iseed = self.run_card['iseed'] 1789 if iseed == 0: 1790 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1791 iseed = int(randinit.read()[2:]) + 1 1792 randinit.close() 1793 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1794 randinit.write('r=%d' % iseed) 1795 randinit.close()
1796 1797
1798 - def run(self, mode, options):
1799 """runs aMC@NLO. Returns the name of the event file created""" 1800 logger.info('Starting run') 1801 1802 if not 'only_generation' in options.keys(): 1803 options['only_generation'] = False 1804 1805 # for second step in applgrid mode, do only the event generation step 1806 if mode in ['LO', 'NLO'] and self.run_card['iappl'] == 2 and not options['only_generation']: 1807 options['only_generation'] = True 1808 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1809 self.setup_cluster_or_multicore() 1810 self.update_random_seed() 1811 #find and keep track of all the jobs 1812 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1813 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1814 folder_names['noshower'] = folder_names['aMC@NLO'] 1815 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1816 p_dirs = [d for d in \ 1817 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1818 #Clean previous results 1819 self.clean_previous_results(options,p_dirs,folder_names[mode]) 1820 1821 mcatnlo_status = ['Setting up grids', 'Computing upper envelope', 'Generating events'] 1822 1823 1824 if options['reweightonly']: 1825 event_norm=self.run_card['event_norm'] 1826 nevents=self.run_card['nevents'] 1827 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1828 1829 if mode in ['LO', 'NLO']: 1830 # this is for fixed order runs 1831 mode_dict = {'NLO': 'all', 'LO': 'born'} 1832 logger.info('Doing fixed order %s' % mode) 1833 req_acc = self.run_card['req_acc_FO'] 1834 1835 # Re-distribute the grids for the 2nd step of the applgrid 1836 # running 1837 if self.run_card['iappl'] == 2: 1838 self.applgrid_distribute(options,mode_dict[mode],p_dirs) 1839 1840 # create a list of dictionaries "jobs_to_run" with all the 1841 # jobs that need to be run 1842 integration_step=-1 1843 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1844 req_acc,mode_dict[mode],integration_step,mode,fixed_order=True) 1845 self.prepare_directories(jobs_to_run,mode) 1846 1847 # loop over the integration steps. After every step, check 1848 # if we have the required accuracy. If this is the case, 1849 # stop running, else do another step. 1850 while True: 1851 integration_step=integration_step+1 1852 self.run_all_jobs(jobs_to_run,integration_step) 1853 self.collect_log_files(jobs_to_run,integration_step) 1854 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1855 jobs_to_collect,integration_step,mode,mode_dict[mode]) 1856 if not jobs_to_run: 1857 # there are no more jobs to run (jobs_to_run is empty) 1858 break 1859 # We are done. 1860 self.finalise_run_FO(folder_names[mode],jobs_to_collect) 1861 self.update_status('Run complete', level='parton', update_results=True) 1862 return 1863 1864 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1865 if self.ninitial == 1: 1866 raise aMCatNLOError('Decay processes can only be run at fixed order.') 1867 mode_dict = {'aMC@NLO': 'all', 'aMC@LO': 'born',\ 1868 'noshower': 'all', 'noshowerLO': 'born'} 1869 shower = self.run_card['parton_shower'].upper() 1870 nevents = self.run_card['nevents'] 1871 req_acc = self.run_card['req_acc'] 1872 if nevents == 0 and req_acc < 0 : 1873 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1874 'of events, because 0 events requested. Please set '\ 1875 'the "req_acc" parameter in the run_card to a value '\ 1876 'between 0 and 1') 1877 elif req_acc >1 or req_acc == 0 : 1878 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1879 'be between larger than 0 and smaller than 1, '\ 1880 'or set to -1 for automatic determination. Current '\ 1881 'value is %f' % req_acc) 1882 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1883 elif req_acc < 0 and nevents > 1000000 : 1884 req_acc=0.001 1885 1886 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1887 1888 if not shower in shower_list: 1889 raise aMCatNLOError('%s is not a valid parton shower. '\ 1890 'Please use one of the following: %s' \ 1891 % (shower, ', '.join(shower_list))) 1892 1893 # check that PYTHIA6PT is not used for processes with FSR 1894 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1895 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1896 1897 if mode in ['aMC@NLO', 'aMC@LO']: 1898 logger.info('Doing %s matched to parton shower' % mode[4:]) 1899 elif mode in ['noshower','noshowerLO']: 1900 logger.info('Generating events without running the shower.') 1901 elif options['only_generation']: 1902 logger.info('Generating events starting from existing results') 1903 1904 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1905 req_acc,mode_dict[mode],1,mode,fixed_order=False) 1906 # Make sure to update all the jobs to be ready for the event generation step 1907 if options['only_generation']: 1908 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1909 jobs_to_collect,1,mode,mode_dict[mode],fixed_order=False) 1910 else: 1911 self.prepare_directories(jobs_to_run,mode,fixed_order=False) 1912 1913 1914 # Main loop over the three MINT generation steps: 1915 for mint_step, status in enumerate(mcatnlo_status): 1916 if options['only_generation'] and mint_step < 2: 1917 continue 1918 self.update_status(status, level='parton') 1919 self.run_all_jobs(jobs_to_run,mint_step,fixed_order=False) 1920 self.collect_log_files(jobs_to_run,mint_step) 1921 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1922 jobs_to_collect,mint_step,mode,mode_dict[mode],fixed_order=False) 1923 if mint_step+1==2 and nevents==0: 1924 self.print_summary(options,2,mode) 1925 return 1926 1927 # Sanity check on the event files. If error the jobs are resubmitted 1928 self.check_event_files(jobs_to_collect) 1929 1930 if self.cluster_mode == 1: 1931 #if cluster run, wait 10 sec so that event files are transferred back 1932 self.update_status( 1933 'Waiting while files are transferred back from the cluster nodes', 1934 level='parton') 1935 time.sleep(10) 1936 1937 event_norm=self.run_card['event_norm'] 1938 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
1939
1940 - def create_jobs_to_run(self,options,p_dirs,req_acc,run_mode,\ 1941 integration_step,mode,fixed_order=True):
1942 """Creates a list of dictionaries with all the jobs to be run""" 1943 jobs_to_run=[] 1944 if not options['only_generation']: 1945 # Fresh, new run. Check all the P*/channels.txt files 1946 # (created by the 'gensym' executable) to set-up all the 1947 # jobs using the default inputs. 1948 npoints = self.run_card['npoints_FO_grid'] 1949 niters = self.run_card['niters_FO_grid'] 1950 for p_dir in p_dirs: 1951 try: 1952 with open(pjoin(self.me_dir,'SubProcesses',p_dir,'channels.txt')) as chan_file: 1953 channels=chan_file.readline().split() 1954 except IOError: 1955 logger.warning('No integration channels found for contribution %s' % p_dir) 1956 continue 1957 if fixed_order: 1958 lch=len(channels) 1959 maxchannels=20 # combine up to 20 channels in a single job 1960 if self.run_card['iappl'] != 0: maxchannels=1 1961 njobs=(int(lch/maxchannels)+1 if lch%maxchannels!= 0 \ 1962 else int(lch/maxchannels)) 1963 for nj in range(1,njobs+1): 1964 job={} 1965 job['p_dir']=p_dir 1966 job['channel']=str(nj) 1967 job['nchans']=(int(lch/njobs)+1 if nj <= lch%njobs else int(lch/njobs)) 1968 job['configs']=' '.join(channels[:job['nchans']]) 1969 del channels[:job['nchans']] 1970 job['split']=0 1971 if req_acc == -1: 1972 job['accuracy']=0 1973 job['niters']=niters 1974 job['npoints']=npoints 1975 elif req_acc > 0: 1976 job['accuracy']=0.05 1977 job['niters']=6 1978 job['npoints']=-1 1979 else: 1980 raise aMCatNLOError('No consistent "req_acc_FO" set. Use a value '+ 1981 'between 0 and 1 or set it equal to -1.') 1982 job['mint_mode']=0 1983 job['run_mode']=run_mode 1984 job['wgt_frac']=1.0 1985 job['wgt_mult']=1.0 1986 jobs_to_run.append(job) 1987 if channels: 1988 raise aMCatNLOError('channels is not empty %s' % channels) 1989 else: 1990 for channel in channels: 1991 job={} 1992 job['p_dir']=p_dir 1993 job['channel']=channel 1994 job['split']=0 1995 job['accuracy']=0.03 1996 job['niters']=12 1997 job['npoints']=-1 1998 job['mint_mode']=0 1999 job['run_mode']=run_mode 2000 job['wgt_frac']=1.0 2001 jobs_to_run.append(job) 2002 jobs_to_collect=copy.copy(jobs_to_run) # These are all jobs 2003 else: 2004 # if options['only_generation'] is true, just read the current jobs from file 2005 try: 2006 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'rb') as f: 2007 jobs_to_collect=pickle.load(f) 2008 for job in jobs_to_collect: 2009 job['dirname']=pjoin(self.me_dir,'SubProcesses',job['dirname'].rsplit('/SubProcesses/',1)[1]) 2010 jobs_to_run=copy.copy(jobs_to_collect) 2011 except: 2012 raise aMCatNLOError('Cannot reconstruct saved job status in %s' % \ 2013 pjoin(self.me_dir,'SubProcesses','job_status.pkl')) 2014 # Update cross sections and determine which jobs to run next 2015 if fixed_order: 2016 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, 2017 jobs_to_collect,integration_step,mode,run_mode) 2018 # Update the integration_step to make sure that nothing will be overwritten 2019 integration_step=1 2020 for job in jobs_to_run: 2021 while os.path.exists(pjoin(job['dirname'],'res_%s.dat' % integration_step)): 2022 integration_step=integration_step+1 2023 integration_step=integration_step-1 2024 else: 2025 self.append_the_results(jobs_to_collect,integration_step) 2026 return jobs_to_run,jobs_to_collect,integration_step
2027
2028 - def prepare_directories(self,jobs_to_run,mode,fixed_order=True):
2029 """Set-up the G* directories for running""" 2030 name_suffix={'born' :'B' , 'all':'F'} 2031 for job in jobs_to_run: 2032 if job['split'] == 0: 2033 if fixed_order : 2034 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2035 job['run_mode']+'_G'+job['channel']) 2036 else: 2037 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2038 'G'+name_suffix[job['run_mode']]+job['channel']) 2039 else: 2040 if fixed_order : 2041 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2042 job['run_mode']+'_G'+job['channel']+'_'+str(job['split'])) 2043 else: 2044 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2045 'G'+name_suffix[job['run_mode']]+job['channel']+'_'+str(job['split'])) 2046 job['dirname']=dirname 2047 if not os.path.isdir(dirname): 2048 os.makedirs(dirname) 2049 self.write_input_file(job,fixed_order) 2050 # link or copy the grids from the base directory to the split directory: 2051 if not fixed_order: 2052 if job['split'] != 0: 2053 for f in ['grid.MC_integer','mint_grids','res_1']: 2054 if not os.path.isfile(pjoin(job['dirname'],f)): 2055 files.ln(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname']) 2056 else: 2057 if job['split'] != 0: 2058 for f in ['grid.MC_integer','mint_grids']: 2059 files.cp(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname'])
2060 2061
2062 - def write_input_file(self,job,fixed_order):
2063 """write the input file for the madevent_mint* executable in the appropriate directory""" 2064 if fixed_order: 2065 content= \ 2066 """NPOINTS = %(npoints)s 2067 NITERATIONS = %(niters)s 2068 ACCURACY = %(accuracy)s 2069 ADAPT_GRID = 2 2070 MULTICHANNEL = 1 2071 SUM_HELICITY = 1 2072 NCHANS = %(nchans)s 2073 CHANNEL = %(configs)s 2074 SPLIT = %(split)s 2075 WGT_MULT= %(wgt_mult)s 2076 RUN_MODE = %(run_mode)s 2077 RESTART = %(mint_mode)s 2078 """ \ 2079 % job 2080 else: 2081 content = \ 2082 """-1 12 ! points, iterations 2083 %(accuracy)s ! desired fractional accuracy 2084 1 -0.1 ! alpha, beta for Gsoft 2085 -1 -0.1 ! alpha, beta for Gazi 2086 1 ! Suppress amplitude (0 no, 1 yes)? 2087 1 ! Exact helicity sum (0 yes, n = number/event)? 2088 %(channel)s ! Enter Configuration Number: 2089 %(mint_mode)s ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 2090 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 2091 %(run_mode)s ! all, born, real, virt 2092 """ \ 2093 % job 2094 with open(pjoin(job['dirname'], 'input_app.txt'), 'w') as input_file: 2095 input_file.write(content)
2096 2097
2098 - def run_all_jobs(self,jobs_to_run,integration_step,fixed_order=True):
2099 """Loops over the jobs_to_run and executes them using the function 'run_exe'""" 2100 if fixed_order: 2101 if integration_step == 0: 2102 self.update_status('Setting up grids', level=None) 2103 else: 2104 self.update_status('Refining results, step %i' % integration_step, level=None) 2105 self.ijob = 0 2106 name_suffix={'born' :'B', 'all':'F'} 2107 if fixed_order: 2108 run_type="Fixed order integration step %s" % integration_step 2109 else: 2110 run_type="MINT step %s" % integration_step 2111 self.njobs=len(jobs_to_run) 2112 for job in jobs_to_run: 2113 executable='ajob1' 2114 if fixed_order: 2115 arguments=[job['channel'],job['run_mode'], \ 2116 str(job['split']),str(integration_step)] 2117 else: 2118 arguments=[job['channel'],name_suffix[job['run_mode']], \ 2119 str(job['split']),str(integration_step)] 2120 self.run_exe(executable,arguments,run_type, 2121 cwd=pjoin(self.me_dir,'SubProcesses',job['p_dir'])) 2122 2123 if self.cluster_mode == 2: 2124 time.sleep(1) # security to allow all jobs to be launched 2125 self.wait_for_complete(run_type)
2126 2127
2128 - def collect_the_results(self,options,req_acc,jobs_to_run,jobs_to_collect,\ 2129 integration_step,mode,run_mode,fixed_order=True):
2130 """Collect the results, make HTML pages, print the summary and 2131 determine if there are more jobs to run. Returns the list 2132 of the jobs that still need to be run, as well as the 2133 complete list of jobs that need to be collected to get the 2134 final answer. 2135 """ 2136 # Get the results of the current integration/MINT step 2137 self.append_the_results(jobs_to_run,integration_step) 2138 self.cross_sect_dict = self.write_res_txt_file(jobs_to_collect,integration_step) 2139 # Update HTML pages 2140 if fixed_order: 2141 cross, error = self.make_make_all_html_results(folder_names=['%s*' % run_mode], 2142 jobs=jobs_to_collect) 2143 else: 2144 name_suffix={'born' :'B' , 'all':'F'} 2145 cross, error = self.make_make_all_html_results(folder_names=['G%s*' % name_suffix[run_mode]]) 2146 self.results.add_detail('cross', cross) 2147 self.results.add_detail('error', error) 2148 # Combine grids from split fixed order jobs 2149 if fixed_order: 2150 jobs_to_run=self.combine_split_order_run(jobs_to_run) 2151 # Set-up jobs for the next iteration/MINT step 2152 jobs_to_run_new=self.update_jobs_to_run(req_acc,integration_step,jobs_to_run,fixed_order) 2153 # IF THERE ARE NO MORE JOBS, WE ARE DONE!!! 2154 if fixed_order: 2155 # Write the jobs_to_collect directory to file so that we 2156 # can restart them later (with only-generation option) 2157 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2158 pickle.dump(jobs_to_collect,f) 2159 # Print summary 2160 if (not jobs_to_run_new) and fixed_order: 2161 # print final summary of results (for fixed order) 2162 scale_pdf_info=self.collect_scale_pdf_info(options,jobs_to_collect) 2163 self.print_summary(options,integration_step,mode,scale_pdf_info,done=True) 2164 return jobs_to_run_new,jobs_to_collect 2165 elif jobs_to_run_new: 2166 # print intermediate summary of results 2167 scale_pdf_info=[] 2168 self.print_summary(options,integration_step,mode,scale_pdf_info,done=False) 2169 else: 2170 # When we are done for (N)LO+PS runs, do not print 2171 # anything yet. This will be done after the reweighting 2172 # and collection of the events 2173 scale_pdf_info=[] 2174 # Prepare for the next integration/MINT step 2175 if (not fixed_order) and integration_step+1 == 2 : 2176 # Write the jobs_to_collect directory to file so that we 2177 # can restart them later (with only-generation option) 2178 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2179 pickle.dump(jobs_to_collect,f) 2180 # next step is event generation (mint_step 2) 2181 jobs_to_run_new,jobs_to_collect_new= \ 2182 self.check_the_need_to_split(jobs_to_run_new,jobs_to_collect) 2183 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2184 self.write_nevents_unweighted_file(jobs_to_collect_new,jobs_to_collect) 2185 self.write_nevts_files(jobs_to_run_new) 2186 else: 2187 if fixed_order and self.run_card['iappl'] == 0 \ 2188 and self.run_card['req_acc_FO'] > 0: 2189 jobs_to_run_new,jobs_to_collect= \ 2190 self.split_jobs_fixed_order(jobs_to_run_new,jobs_to_collect) 2191 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2192 jobs_to_collect_new=jobs_to_collect 2193 return jobs_to_run_new,jobs_to_collect_new
2194 2195
2196 - def write_nevents_unweighted_file(self,jobs,jobs0events):
2197 """writes the nevents_unweighted file in the SubProcesses directory. 2198 We also need to write the jobs that will generate 0 events, 2199 because that makes sure that the cross section from those channels 2200 is taken into account in the event weights (by collect_events.f). 2201 """ 2202 content=[] 2203 for job in jobs: 2204 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2205 lhefile=pjoin(path,'events.lhe') 2206 content.append(' %s %d %9e %9e' % \ 2207 (lhefile.ljust(40),job['nevents'],job['resultABS']*job['wgt_frac'],job['wgt_frac'])) 2208 for job in jobs0events: 2209 if job['nevents']==0: 2210 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2211 lhefile=pjoin(path,'events.lhe') 2212 content.append(' %s %d %9e %9e' % \ 2213 (lhefile.ljust(40),job['nevents'],job['resultABS'],1.)) 2214 with open(pjoin(self.me_dir,'SubProcesses',"nevents_unweighted"),'w') as f: 2215 f.write('\n'.join(content)+'\n')
2216
2217 - def write_nevts_files(self,jobs):
2218 """write the nevts files in the SubProcesses/P*/G*/ directories""" 2219 for job in jobs: 2220 with open(pjoin(job['dirname'],'nevts'),'w') as f: 2221 if self.run_card['event_norm'].lower()=='bias': 2222 f.write('%i %f\n' % (job['nevents'],self.cross_sect_dict['xseca'])) 2223 else: 2224 f.write('%i\n' % job['nevents'])
2225
2226 - def combine_split_order_run(self,jobs_to_run):
2227 """Combines jobs and grids from split jobs that have been run""" 2228 # combine the jobs that need to be combined in job 2229 # groups. Simply combine the ones that have the same p_dir and 2230 # same channel. 2231 jobgroups_to_combine=[] 2232 jobs_to_run_new=[] 2233 for job in jobs_to_run: 2234 if job['split'] == 0: 2235 job['combined']=1 2236 jobs_to_run_new.append(job) # this jobs wasn't split 2237 elif job['split'] == 1: 2238 jobgroups_to_combine.append(filter(lambda j: j['p_dir'] == job['p_dir'] and \ 2239 j['channel'] == job['channel'], jobs_to_run)) 2240 else: 2241 continue 2242 for job_group in jobgroups_to_combine: 2243 # Combine the grids (mint-grids & MC-integer grids) first 2244 self.combine_split_order_grids(job_group) 2245 jobs_to_run_new.append(self.combine_split_order_jobs(job_group)) 2246 return jobs_to_run_new
2247
2248 - def combine_split_order_jobs(self,job_group):
2249 """combine the jobs in job_group and return a single summed job""" 2250 # first copy one of the jobs in 'jobs' 2251 sum_job=copy.copy(job_group[0]) 2252 # update the information to have a 'non-split' job: 2253 sum_job['dirname']=pjoin(sum_job['dirname'].rsplit('_',1)[0]) 2254 sum_job['split']=0 2255 sum_job['wgt_mult']=1.0 2256 sum_job['combined']=len(job_group) 2257 # information to be summed: 2258 keys=['niters_done','npoints_done','niters','npoints',\ 2259 'result','resultABS','time_spend'] 2260 keys2=['error','errorABS'] 2261 # information to be summed in quadrature: 2262 for key in keys2: 2263 sum_job[key]=math.pow(sum_job[key],2) 2264 # Loop over the jobs and sum the information 2265 for i,job in enumerate(job_group): 2266 if i==0 : continue # skip the first 2267 for key in keys: 2268 sum_job[key]+=job[key] 2269 for key in keys2: 2270 sum_job[key]+=math.pow(job[key],2) 2271 for key in keys2: 2272 sum_job[key]=math.sqrt(sum_job[key]) 2273 sum_job['err_percABS'] = sum_job['errorABS']/sum_job['resultABS']*100. 2274 sum_job['err_perc'] = sum_job['error']/sum_job['result']*100. 2275 sum_job['niters']=int(sum_job['niters_done']/len(job_group)) 2276 sum_job['niters_done']=int(sum_job['niters_done']/len(job_group)) 2277 return sum_job
2278 2279
2280 - def combine_split_order_grids(self,job_group):
2281 """Combines the mint_grids and MC-integer grids from the split order 2282 jobs (fixed order only). 2283 """ 2284 files_mint_grids=[] 2285 files_MC_integer=[] 2286 location=None 2287 for job in job_group: 2288 files_mint_grids.append(pjoin(job['dirname'],'mint_grids')) 2289 files_MC_integer.append(pjoin(job['dirname'],'grid.MC_integer')) 2290 if not location: 2291 location=pjoin(job['dirname'].rsplit('_',1)[0]) 2292 else: 2293 if location != pjoin(job['dirname'].rsplit('_',1)[0]) : 2294 raise aMCatNLOError('Not all jobs have the same location. '\ 2295 +'Cannot combine them.') 2296 # Needed to average the grids (both xgrids, ave_virt and 2297 # MC_integer grids), but sum the cross section info. The 2298 # latter is only the only line that contains integers. 2299 for j,fs in enumerate([files_mint_grids,files_MC_integer]): 2300 linesoffiles=[] 2301 for f in fs: 2302 with open(f,'r+') as fi: 2303 linesoffiles.append(fi.readlines()) 2304 to_write=[] 2305 for rowgrp in zip(*linesoffiles): 2306 try: 2307 # check that last element on the line is an 2308 # integer (will raise ValueError if not the 2309 # case). If integer, this is the line that 2310 # contains information that needs to be 2311 # summed. All other lines can be averaged. 2312 is_integer = [[int(row.strip().split()[-1])] for row in rowgrp] 2313 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2314 floatgrps = zip(*floatsbyfile) 2315 special=[] 2316 for i,floatgrp in enumerate(floatgrps): 2317 if i==0: # sum X-sec 2318 special.append(sum(floatgrp)) 2319 elif i==1: # sum unc in quadrature 2320 special.append(math.sqrt(sum([err**2 for err in floatgrp]))) 2321 elif i==2: # average number of PS per iteration 2322 special.append(int(sum(floatgrp)/len(floatgrp))) 2323 elif i==3: # sum the number of iterations 2324 special.append(int(sum(floatgrp))) 2325 elif i==4: # average the nhits_in_grids 2326 special.append(int(sum(floatgrp)/len(floatgrp))) 2327 else: 2328 raise aMCatNLOError('"mint_grids" files not in correct format. '+\ 2329 'Cannot combine them.') 2330 to_write.append(" ".join(str(s) for s in special) + "\n") 2331 except ValueError: 2332 # just average all 2333 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2334 floatgrps = zip(*floatsbyfile) 2335 averages = [sum(floatgrp)/len(floatgrp) for floatgrp in floatgrps] 2336 to_write.append(" ".join(str(a) for a in averages) + "\n") 2337 # write the data over the master location 2338 if j==0: 2339 with open(pjoin(location,'mint_grids'),'w') as f: 2340 f.writelines(to_write) 2341 elif j==1: 2342 with open(pjoin(location,'grid.MC_integer'),'w') as f: 2343 f.writelines(to_write)
2344 2345
2346 - def split_jobs_fixed_order(self,jobs_to_run,jobs_to_collect):
2347 """Looks in the jobs_to_run to see if there is the need to split the 2348 jobs, depending on the expected time they take. Updates 2349 jobs_to_run and jobs_to_collect to replace the split-job by 2350 its splits. 2351 """ 2352 # determine the number jobs we should have (this is per p_dir) 2353 if self.options['run_mode'] ==2: 2354 nb_submit = int(self.options['nb_core']) 2355 elif self.options['run_mode'] ==1: 2356 nb_submit = int(self.options['cluster_size']) 2357 else: 2358 nb_submit =1 2359 # total expected aggregated running time 2360 time_expected=0 2361 for job in jobs_to_run: 2362 time_expected+=job['time_spend']*(job['niters']*job['npoints'])/ \ 2363 (job['niters_done']*job['npoints_done']) 2364 # this means that we must expect the following per job (in 2365 # ideal conditions) 2366 time_per_job=time_expected/(nb_submit*(1+len(jobs_to_run)/2)) 2367 jobs_to_run_new=[] 2368 jobs_to_collect_new=copy.copy(jobs_to_collect) 2369 for job in jobs_to_run: 2370 # remove current job from jobs_to_collect. Make sure 2371 # to remove all the split ones in case the original 2372 # job had been a split one (before it was re-combined) 2373 for j in filter(lambda j: j['p_dir'] == job['p_dir'] and \ 2374 j['channel'] == job['channel'], jobs_to_collect_new): 2375 jobs_to_collect_new.remove(j) 2376 time_expected=job['time_spend']*(job['niters']*job['npoints'])/ \ 2377 (job['niters_done']*job['npoints_done']) 2378 # if the time expected for this job is (much) larger than 2379 # the time spend in the previous iteration, and larger 2380 # than the expected time per job, split it 2381 if time_expected > max(2*job['time_spend']/job['combined'],time_per_job): 2382 # determine the number of splits needed 2383 nsplit=min(max(int(time_expected/max(2*job['time_spend']/job['combined'],time_per_job)),2),nb_submit) 2384 for i in range(1,nsplit+1): 2385 job_new=copy.copy(job) 2386 job_new['split']=i 2387 job_new['wgt_mult']=1./float(nsplit) 2388 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2389 job_new['accuracy']=min(job['accuracy']*math.sqrt(float(nsplit)),0.1) 2390 if nsplit >= job['niters']: 2391 job_new['npoints']=int(job['npoints']*job['niters']/nsplit) 2392 job_new['niters']=1 2393 else: 2394 job_new['npoints']=int(job['npoints']/nsplit) 2395 jobs_to_collect_new.append(job_new) 2396 jobs_to_run_new.append(job_new) 2397 else: 2398 jobs_to_collect_new.append(job) 2399 jobs_to_run_new.append(job) 2400 return jobs_to_run_new,jobs_to_collect_new
2401 2402
2403 - def check_the_need_to_split(self,jobs_to_run,jobs_to_collect):
2404 """Looks in the jobs_to_run to see if there is the need to split the 2405 event generation step. Updates jobs_to_run and 2406 jobs_to_collect to replace the split-job by its 2407 splits. Also removes jobs that do not need any events. 2408 """ 2409 nevt_job=self.run_card['nevt_job'] 2410 if nevt_job > 0: 2411 jobs_to_collect_new=copy.copy(jobs_to_collect) 2412 for job in jobs_to_run: 2413 nevents=job['nevents'] 2414 if nevents == 0: 2415 jobs_to_collect_new.remove(job) 2416 elif nevents > nevt_job: 2417 jobs_to_collect_new.remove(job) 2418 if nevents % nevt_job != 0 : 2419 nsplit=int(nevents/nevt_job)+1 2420 else: 2421 nsplit=int(nevents/nevt_job) 2422 for i in range(1,nsplit+1): 2423 job_new=copy.copy(job) 2424 left_over=nevents % nsplit 2425 if i <= left_over: 2426 job_new['nevents']=int(nevents/nsplit)+1 2427 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2428 else: 2429 job_new['nevents']=int(nevents/nsplit) 2430 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2431 job_new['split']=i 2432 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2433 jobs_to_collect_new.append(job_new) 2434 jobs_to_run_new=copy.copy(jobs_to_collect_new) 2435 else: 2436 jobs_to_run_new=copy.copy(jobs_to_collect) 2437 for job in jobs_to_collect: 2438 if job['nevents'] == 0: 2439 jobs_to_run_new.remove(job) 2440 jobs_to_collect_new=copy.copy(jobs_to_run_new) 2441 2442 return jobs_to_run_new,jobs_to_collect_new
2443 2444
2445 - def update_jobs_to_run(self,req_acc,step,jobs,fixed_order=True):
2446 """ 2447 For (N)LO+PS: determines the number of events and/or the required 2448 accuracy per job. 2449 For fixed order: determines which jobs need higher precision and 2450 returns those with the newly requested precision. 2451 """ 2452 err=self.cross_sect_dict['errt'] 2453 tot=self.cross_sect_dict['xsect'] 2454 errABS=self.cross_sect_dict['erra'] 2455 totABS=self.cross_sect_dict['xseca'] 2456 jobs_new=[] 2457 if fixed_order: 2458 if req_acc == -1: 2459 if step+1 == 1: 2460 npoints = self.run_card['npoints_FO'] 2461 niters = self.run_card['niters_FO'] 2462 for job in jobs: 2463 job['mint_mode']=-1 2464 job['niters']=niters 2465 job['npoints']=npoints 2466 jobs_new.append(job) 2467 elif step+1 == 2: 2468 pass 2469 elif step+1 > 2: 2470 raise aMCatNLOError('Cannot determine number of iterations and PS points '+ 2471 'for integration step %i' % step ) 2472 elif ( req_acc > 0 and err/abs(tot) > req_acc*1.2 ) or step <= 0: 2473 req_accABS=req_acc*abs(tot)/totABS # overal relative required accuracy on ABS Xsec. 2474 for job in jobs: 2475 job['mint_mode']=-1 2476 # Determine relative required accuracy on the ABS for this job 2477 job['accuracy']=req_accABS*math.sqrt(totABS/job['resultABS']) 2478 # If already accurate enough, skip the job (except when doing the first 2479 # step for the iappl=2 run: we need to fill all the applgrid grids!) 2480 if (job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0) \ 2481 and not (step==-1 and self.run_card['iappl'] == 2): 2482 continue 2483 # Update the number of PS points based on errorABS, ncall and accuracy 2484 itmax_fl=job['niters_done']*math.pow(job['errorABS']/ 2485 (job['accuracy']*job['resultABS']),2) 2486 if itmax_fl <= 4.0 : 2487 job['niters']=max(int(round(itmax_fl)),2) 2488 job['npoints']=job['npoints_done']*2 2489 elif itmax_fl > 4.0 and itmax_fl <= 16.0 : 2490 job['niters']=4 2491 job['npoints']=int(round(job['npoints_done']*itmax_fl/4.0))*2 2492 else: 2493 if itmax_fl > 100.0 : itmax_fl=50.0 2494 job['niters']=int(round(math.sqrt(itmax_fl))) 2495 job['npoints']=int(round(job['npoints_done']*itmax_fl/ 2496 round(math.sqrt(itmax_fl))))*2 2497 # Add the job to the list of jobs that need to be run 2498 jobs_new.append(job) 2499 return jobs_new 2500 elif step+1 <= 2: 2501 nevents=self.run_card['nevents'] 2502 # Total required accuracy for the upper bounding envelope 2503 if req_acc<0: 2504 req_acc2_inv=nevents 2505 else: 2506 req_acc2_inv=1/(req_acc*req_acc) 2507 if step+1 == 1 or step+1 == 2 : 2508 # determine the req. accuracy for each of the jobs for Mint-step = 1 2509 for job in jobs: 2510 accuracy=min(math.sqrt(totABS/(req_acc2_inv*job['resultABS'])),0.2) 2511 job['accuracy']=accuracy 2512 if step+1 == 2: 2513 # Randomly (based on the relative ABS Xsec of the job) determine the 2514 # number of events each job needs to generate for MINT-step = 2. 2515 r=self.get_randinit_seed() 2516 random.seed(r) 2517 totevts=nevents 2518 for job in jobs: 2519 job['nevents'] = 0 2520 while totevts : 2521 target = random.random() * totABS 2522 crosssum = 0. 2523 i = 0 2524 while i<len(jobs) and crosssum < target: 2525 job = jobs[i] 2526 crosssum += job['resultABS'] 2527 i += 1 2528 totevts -= 1 2529 i -= 1 2530 jobs[i]['nevents'] += 1 2531 for job in jobs: 2532 job['mint_mode']=step+1 # next step 2533 return jobs 2534 else: 2535 return []
2536 2537
2538 - def get_randinit_seed(self):
2539 """ Get the random number seed from the randinit file """ 2540 with open(pjoin(self.me_dir,"SubProcesses","randinit")) as randinit: 2541 # format of the file is "r=%d". 2542 iseed = int(randinit.read()[2:]) 2543 return iseed
2544 2545
2546 - def append_the_results(self,jobs,integration_step):
2547 """Appends the results for each of the jobs in the job list""" 2548 error_found=False 2549 for job in jobs: 2550 try: 2551 if integration_step >= 0 : 2552 with open(pjoin(job['dirname'],'res_%s.dat' % integration_step)) as res_file: 2553 results=res_file.readline().split() 2554 else: 2555 # should only be here when doing fixed order with the 'only_generation' 2556 # option equal to True. Take the results from the final run done. 2557 with open(pjoin(job['dirname'],'res.dat')) as res_file: 2558 results=res_file.readline().split() 2559 except IOError: 2560 if not error_found: 2561 error_found=True 2562 error_log=[] 2563 error_log.append(pjoin(job['dirname'],'log.txt')) 2564 continue 2565 job['resultABS']=float(results[0]) 2566 job['errorABS']=float(results[1]) 2567 job['result']=float(results[2]) 2568 job['error']=float(results[3]) 2569 job['niters_done']=int(results[4]) 2570 job['npoints_done']=int(results[5]) 2571 job['time_spend']=float(results[6]) 2572 job['err_percABS'] = job['errorABS']/job['resultABS']*100. 2573 job['err_perc'] = job['error']/job['result']*100. 2574 if error_found: 2575 raise aMCatNLOError('An error occurred during the collection of results.\n' + 2576 'Please check the .log files inside the directories which failed:\n' + 2577 '\n'.join(error_log)+'\n')
2578 2579 2580
2581 - def write_res_txt_file(self,jobs,integration_step):
2582 """writes the res.txt files in the SubProcess dir""" 2583 jobs.sort(key = lambda job: -job['errorABS']) 2584 content=[] 2585 content.append('\n\nCross section per integration channel:') 2586 for job in jobs: 2587 content.append('%(p_dir)20s %(channel)15s %(result)10.8e %(error)6.4e %(err_perc)6.4f%% ' % job) 2588 content.append('\n\nABS cross section per integration channel:') 2589 for job in jobs: 2590 content.append('%(p_dir)20s %(channel)15s %(resultABS)10.8e %(errorABS)6.4e %(err_percABS)6.4f%% ' % job) 2591 totABS=0 2592 errABS=0 2593 tot=0 2594 err=0 2595 for job in jobs: 2596 totABS+= job['resultABS']*job['wgt_frac'] 2597 errABS+= math.pow(job['errorABS'],2)*job['wgt_frac'] 2598 tot+= job['result']*job['wgt_frac'] 2599 err+= math.pow(job['error'],2)*job['wgt_frac'] 2600 if jobs: 2601 content.append('\nTotal ABS and \nTotal: \n %10.8e +- %6.4e (%6.4e%%)\n %10.8e +- %6.4e (%6.4e%%) \n' %\ 2602 (totABS, math.sqrt(errABS), math.sqrt(errABS)/totABS *100.,\ 2603 tot, math.sqrt(err), math.sqrt(err)/tot *100.)) 2604 with open(pjoin(self.me_dir,'SubProcesses','res_%s.txt' % integration_step),'w') as res_file: 2605 res_file.write('\n'.join(content)) 2606 randinit=self.get_randinit_seed() 2607 return {'xsect':tot,'xseca':totABS,'errt':math.sqrt(err),\ 2608 'erra':math.sqrt(errABS),'randinit':randinit}
2609 2610
2611 - def collect_scale_pdf_info(self,options,jobs):
2612 """read the scale_pdf_dependence.dat files and collects there results""" 2613 scale_pdf_info=[] 2614 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 2615 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1: 2616 evt_files=[] 2617 evt_wghts=[] 2618 for job in jobs: 2619 evt_files.append(pjoin(job['dirname'],'scale_pdf_dependence.dat')) 2620 evt_wghts.append(job['wgt_frac']) 2621 scale_pdf_info = self.pdf_scale_from_reweighting(evt_files,evt_wghts) 2622 return scale_pdf_info
2623 2624
2625 - def combine_plots_FO(self,folder_name,jobs):
2626 """combines the plots and puts then in the Events/run* directory""" 2627 devnull = open(os.devnull, 'w') 2628 2629 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 2630 topfiles = [] 2631 for job in jobs: 2632 if job['dirname'].endswith('.top'): 2633 topfiles.append(job['dirname']) 2634 else: 2635 topfiles.append(pjoin(job['dirname'],'MADatNLO.top')) 2636 misc.call(['./combine_plots_FO.sh'] + topfiles, \ 2637 stdout=devnull, 2638 cwd=pjoin(self.me_dir, 'SubProcesses')) 2639 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 2640 pjoin(self.me_dir, 'Events', self.run_name)) 2641 logger.info('The results of this run and the TopDrawer file with the plots' + \ 2642 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2643 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 2644 out=pjoin(self.me_dir,'Events',self.run_name,'MADatNLO') 2645 self.combine_plots_HwU(jobs,out) 2646 try: 2647 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 2648 stdout=devnull,stderr=devnull,\ 2649 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2650 except Exception: 2651 pass 2652 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 2653 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2654 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 2655 rootfiles = [] 2656 for job in jobs: 2657 if job['dirname'].endswith('.root'): 2658 rootfiles.append(job['dirname']) 2659 else: 2660 rootfiles.append(pjoin(job['dirname'],'MADatNLO.root')) 2661 misc.call(['./combine_root.sh'] + folder_name + rootfiles, \ 2662 stdout=devnull, 2663 cwd=pjoin(self.me_dir, 'SubProcesses')) 2664 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 2665 pjoin(self.me_dir, 'Events', self.run_name)) 2666 logger.info('The results of this run and the ROOT file with the plots' + \ 2667 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2668 elif self.analyse_card['fo_analysis_format'].lower() == 'lhe': 2669 self.combine_FO_lhe(jobs) 2670 logger.info('The results of this run and the LHE File (to be used for plotting only)' + \ 2671 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2672 else: 2673 logger.info('The results of this run' + \ 2674 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
2675
2676 - def combine_FO_lhe(self,jobs):
2677 """combine the various lhe file generated in each directory. 2678 They are two steps: 2679 1) banner 2680 2) reweight each sample by the factor written at the end of each file 2681 3) concatenate each of the new files (gzip those). 2682 """ 2683 2684 logger.info('Combining lhe events for plotting analysis') 2685 start = time.time() 2686 self.run_card['fo_lhe_postprocessing'] = [i.lower() for i in self.run_card['fo_lhe_postprocessing']] 2687 output = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2688 if os.path.exists(output): 2689 os.remove(output) 2690 2691 2692 2693 2694 # 1. write the banner 2695 text = open(pjoin(jobs[0]['dirname'],'header.txt'),'r').read() 2696 i1, i2 = text.find('<initrwgt>'),text.find('</initrwgt>') 2697 self.banner['initrwgt'] = text[10+i1:i2] 2698 # 2699 # <init> 2700 # 2212 2212 6.500000e+03 6.500000e+03 0 0 247000 247000 -4 1 2701 # 8.430000e+02 2.132160e+00 8.430000e+02 1 2702 # <generator name='MadGraph5_aMC@NLO' version='2.5.2'>please cite 1405.0301 </generator> 2703 # </init> 2704 2705 cross = sum(j['result'] for j in jobs) 2706 error = math.sqrt(sum(j['error'] for j in jobs)) 2707 self.banner['init'] = "0 0 0e0 0e0 0 0 0 0 -4 1\n %s %s %s 1" % (cross, error, cross) 2708 self.banner.write(output[:-3], close_tag=False) 2709 misc.gzip(output[:-3]) 2710 2711 2712 2713 fsock = lhe_parser.EventFile(output,'a') 2714 if 'nogrouping' in self.run_card['fo_lhe_postprocessing']: 2715 fsock.eventgroup = False 2716 else: 2717 fsock.eventgroup = True 2718 2719 if 'norandom' in self.run_card['fo_lhe_postprocessing']: 2720 for job in jobs: 2721 dirname = job['dirname'] 2722 #read last line 2723 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2724 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2725 # get normalisation ratio 2726 ratio = cross/sumwgt 2727 lhe = lhe_parser.EventFile(pjoin(dirname,'events.lhe')) 2728 lhe.eventgroup = True # read the events by eventgroup 2729 for eventsgroup in lhe: 2730 neweventsgroup = [] 2731 for i,event in enumerate(eventsgroup): 2732 event.rescale_weights(ratio) 2733 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2734 and event == neweventsgroup[-1]: 2735 neweventsgroup[-1].wgt += event.wgt 2736 for key in event.reweight_data: 2737 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2738 else: 2739 neweventsgroup.append(event) 2740 fsock.write_events(neweventsgroup) 2741 lhe.close() 2742 os.remove(pjoin(dirname,'events.lhe')) 2743 else: 2744 lhe = [] 2745 lenlhe = [] 2746 misc.sprint('need to combine %s event file' % len(jobs)) 2747 globallhe = lhe_parser.MultiEventFile() 2748 globallhe.eventgroup = True 2749 for job in jobs: 2750 dirname = job['dirname'] 2751 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2752 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2753 lastlhe = globallhe.add(pjoin(dirname,'events.lhe'),cross, 0, cross, 2754 nb_event=int(nb_event), scale=cross/sumwgt) 2755 for eventsgroup in globallhe: 2756 neweventsgroup = [] 2757 for i,event in enumerate(eventsgroup): 2758 event.rescale_weights(event.sample_scale) 2759 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2760 and event == neweventsgroup[-1]: 2761 neweventsgroup[-1].wgt += event.wgt 2762 for key in event.reweight_data: 2763 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2764 else: 2765 neweventsgroup.append(event) 2766 fsock.write_events(neweventsgroup) 2767 globallhe.close() 2768 fsock.write('</LesHouchesEvents>\n') 2769 fsock.close() 2770 misc.sprint('combining lhe file done in ', time.time()-start) 2771 for job in jobs: 2772 dirname = job['dirname'] 2773 os.remove(pjoin(dirname,'events.lhe')) 2774 2775 2776 2777 misc.sprint('combining lhe file done in ', time.time()-start)
2778 2779 2780 2781 2782 2783
2784 - def combine_plots_HwU(self,jobs,out,normalisation=None):
2785 """Sums all the plots in the HwU format.""" 2786 logger.debug('Combining HwU plots.') 2787 2788 command = [] 2789 command.append(pjoin(self.me_dir, 'bin', 'internal','histograms.py')) 2790 for job in jobs: 2791 if job['dirname'].endswith('.HwU'): 2792 command.append(job['dirname']) 2793 else: 2794 command.append(pjoin(job['dirname'],'MADatNLO.HwU')) 2795 command.append("--out="+out) 2796 command.append("--gnuplot") 2797 command.append("--band=[]") 2798 command.append("--lhapdf-config="+self.options['lhapdf']) 2799 if normalisation: 2800 command.append("--multiply="+(','.join([str(n) for n in normalisation]))) 2801 command.append("--sum") 2802 command.append("--keep_all_weights") 2803 command.append("--no_open") 2804 2805 p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=self.me_dir) 2806 2807 while p.poll() is None: 2808 line = p.stdout.readline() 2809 if any(t in line for t in ['INFO:','WARNING:','CRITICAL:','ERROR:','KEEP:']): 2810 print line[:-1] 2811 elif __debug__ and line: 2812 logger.debug(line[:-1])
2813 2814
2815 - def applgrid_combine(self,cross,error,jobs):
2816 """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories""" 2817 logger.debug('Combining APPLgrids \n') 2818 applcomb=pjoin(self.options['applgrid'].rstrip('applgrid-config'), 2819 'applgrid-combine') 2820 all_jobs=[] 2821 for job in jobs: 2822 all_jobs.append(job['dirname']) 2823 ngrids=len(all_jobs) 2824 nobs =len([name for name in os.listdir(all_jobs[0]) if name.endswith("_out.root")]) 2825 for obs in range(0,nobs): 2826 gdir = [pjoin(job,"grid_obs_"+str(obs)+"_out.root") for job in all_jobs] 2827 # combine APPLgrids from different channels for observable 'obs' 2828 if self.run_card["iappl"] == 1: 2829 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events",self.run_name, 2830 "aMCfast_obs_"+str(obs)+"_starting_grid.root"), '--optimise']+ gdir) 2831 elif self.run_card["iappl"] == 2: 2832 unc2_inv=pow(cross/error,2) 2833 unc2_inv_ngrids=pow(cross/error,2)*ngrids 2834 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events", 2835 self.run_name,"aMCfast_obs_"+str(obs)+".root"),'-s', 2836 str(unc2_inv),'--weight',str(unc2_inv)]+ gdir) 2837 for job in all_jobs: 2838 os.remove(pjoin(job,"grid_obs_"+str(obs)+"_in.root")) 2839 else: 2840 raise aMCatNLOError('iappl parameter can only be 0, 1 or 2') 2841 # after combining, delete the original grids 2842 for ggdir in gdir: 2843 os.remove(ggdir)
2844 2845
2846 - def applgrid_distribute(self,options,mode,p_dirs):
2847 """Distributes the APPLgrids ready to be filled by a second run of the code""" 2848 # if no appl_start_grid argument given, guess it from the time stamps 2849 # of the starting grid files 2850 if not('appl_start_grid' in options.keys() and options['appl_start_grid']): 2851 gfiles = misc.glob(pjoin('*', 'aMCfast_obs_0_starting_grid.root'), 2852 pjoin(self.me_dir,'Events')) 2853 2854 time_stamps={} 2855 for root_file in gfiles: 2856 time_stamps[root_file]=os.path.getmtime(root_file) 2857 options['appl_start_grid']= \ 2858 max(time_stamps.iterkeys(), key=(lambda key: 2859 time_stamps[key])).split('/')[-2] 2860 logger.info('No --appl_start_grid option given. '+\ 2861 'Guessing that start grid from run "%s" should be used.' \ 2862 % options['appl_start_grid']) 2863 2864 if 'appl_start_grid' in options.keys() and options['appl_start_grid']: 2865 self.appl_start_grid = options['appl_start_grid'] 2866 start_grid_dir=pjoin(self.me_dir, 'Events', self.appl_start_grid) 2867 # check that this dir exists and at least one grid file is there 2868 if not os.path.exists(pjoin(start_grid_dir, 2869 'aMCfast_obs_0_starting_grid.root')): 2870 raise self.InvalidCmd('APPLgrid file not found: %s' % \ 2871 pjoin(start_grid_dir,'aMCfast_obs_0_starting_grid.root')) 2872 else: 2873 all_grids=[pjoin(start_grid_dir,name) for name in os.listdir( \ 2874 start_grid_dir) if name.endswith("_starting_grid.root")] 2875 nobs =len(all_grids) 2876 gstring=" ".join(all_grids) 2877 if not hasattr(self, 'appl_start_grid') or not self.appl_start_grid: 2878 raise self.InvalidCmd('No APPLgrid name currently defined.'+ 2879 'Please provide this information.') 2880 #copy the grid to all relevant directories 2881 for pdir in p_dirs: 2882 g_dirs = [file for file in os.listdir(pjoin(self.me_dir, 2883 "SubProcesses",pdir)) if file.startswith(mode+'_G') and 2884 os.path.isdir(pjoin(self.me_dir,"SubProcesses",pdir, file))] 2885 for g_dir in g_dirs: 2886 for grid in all_grids: 2887 obs=grid.split('_')[-3] 2888 files.cp(grid,pjoin(self.me_dir,"SubProcesses",pdir,g_dir, 2889 'grid_obs_'+obs+'_in.root'))
2890 2891 2892 2893
2894 - def collect_log_files(self, jobs, integration_step):
2895 """collect the log files and put them in a single, html-friendly file 2896 inside the Events/run_.../ directory""" 2897 log_file = pjoin(self.me_dir, 'Events', self.run_name, 2898 'alllogs_%d.html' % integration_step) 2899 outfile = open(log_file, 'w') 2900 2901 content = '' 2902 content += '<HTML><BODY>\n<font face="courier" size=2>' 2903 for job in jobs: 2904 # put an anchor 2905 log=pjoin(job['dirname'],'log_MINT%s.txt' % integration_step) 2906 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 2907 pjoin(self.me_dir,'SubProcesses'),'')) 2908 # and put some nice header 2909 content += '<font color="red">\n' 2910 content += '<br>LOG file for integration channel %s, %s <br>' % \ 2911 (os.path.dirname(log).replace(pjoin(self.me_dir, 2912 'SubProcesses'), ''), 2913 integration_step) 2914 content += '</font>\n' 2915 #then just flush the content of the small log inside the big log 2916 #the PRE tag prints everything verbatim 2917 with open(log) as l: 2918 content += '<PRE>\n' + l.read() + '\n</PRE>' 2919 content +='<br>\n' 2920 outfile.write(content) 2921 content='' 2922 2923 outfile.write('</font>\n</BODY></HTML>\n') 2924 outfile.close()
2925 2926
2927 - def finalise_run_FO(self,folder_name,jobs):
2928 """Combine the plots and put the res*.txt files in the Events/run.../ folder.""" 2929 # Copy the res_*.txt files to the Events/run* folder 2930 res_files = misc.glob('res_*.txt', pjoin(self.me_dir, 'SubProcesses')) 2931 for res_file in res_files: 2932 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 2933 # Collect the plots and put them in the Events/run* folder 2934 self.combine_plots_FO(folder_name,jobs) 2935 # If doing the applgrid-stuff, also combine those grids 2936 # and put those in the Events/run* folder 2937 if self.run_card['iappl'] != 0: 2938 cross=self.cross_sect_dict['xsect'] 2939 error=self.cross_sect_dict['errt'] 2940 self.applgrid_combine(cross,error,jobs)
2941 2942
2943 - def setup_cluster_or_multicore(self):
2944 """setup the number of cores for multicore, and the cluster-type for cluster runs""" 2945 if self.cluster_mode == 1: 2946 cluster_name = self.options['cluster_type'] 2947 try: 2948 self.cluster = cluster.from_name[cluster_name](**self.options) 2949 except KeyError: 2950 # Check if a plugin define this type of cluster 2951 # check for PLUGIN format 2952 cluster_class = misc.from_plugin_import(self.plugin_path, 2953 'new_cluster', cluster_name, 2954 info = 'cluster handling will be done with PLUGIN: %{plug}s' ) 2955 if cluster_class: 2956 self.cluster = cluster_class(**self.options) 2957 2958 if self.cluster_mode == 2: 2959 try: 2960 import multiprocessing 2961 if not self.nb_core: 2962 try: 2963 self.nb_core = int(self.options['nb_core']) 2964 except TypeError: 2965 self.nb_core = multiprocessing.cpu_count() 2966 logger.info('Using %d cores' % self.nb_core) 2967 except ImportError: 2968 self.nb_core = 1 2969 logger.warning('Impossible to detect the number of cores => Using One.\n'+ 2970 'Use set nb_core X in order to set this number and be able to'+ 2971 'run in multicore.') 2972 2973 self.cluster = cluster.MultiCore(**self.options)
2974 2975
2976 - def clean_previous_results(self,options,p_dirs,folder_name):
2977 """Clean previous results. 2978 o. If doing only the reweighting step, do not delete anything and return directlty. 2979 o. Always remove all the G*_* files (from split event generation). 2980 o. Remove the G* (or born_G* or all_G*) only when NOT doing only_generation or reweight_only.""" 2981 if options['reweightonly']: 2982 return 2983 if not options['only_generation']: 2984 self.update_status('Cleaning previous results', level=None) 2985 for dir in p_dirs: 2986 #find old folders to be removed 2987 for obj in folder_name: 2988 # list all the G* (or all_G* or born_G*) directories 2989 to_rm = [file for file in \ 2990 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 2991 if file.startswith(obj[:-1]) and \ 2992 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 2993 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 2994 # list all the G*_* directories (from split event generation) 2995 to_always_rm = [file for file in \ 2996 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 2997 if file.startswith(obj[:-1]) and 2998 '_' in file and not '_G' in file and \ 2999 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3000 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3001 3002 if not options['only_generation']: 3003 to_always_rm.extend(to_rm) 3004 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 3005 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 3006 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 3007 return
3008 3009
3010 - def print_summary(self, options, step, mode, scale_pdf_info=[], done=True):
3011 """print a summary of the results contained in self.cross_sect_dict. 3012 step corresponds to the mintMC step, if =2 (i.e. after event generation) 3013 some additional infos are printed""" 3014 # find process name 3015 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 3016 process = '' 3017 for line in proc_card_lines: 3018 if line.startswith('generate') or line.startswith('add process'): 3019 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 3020 lpp = {0:'l', 1:'p', -1:'pbar', 2:'elastic photon from p', 3:'elastic photon from e'} 3021 if self.ninitial == 1: 3022 proc_info = '\n Process %s' % process[:-3] 3023 else: 3024 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 3025 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 3026 self.run_card['ebeam1'], self.run_card['ebeam2']) 3027 3028 if self.ninitial == 1: 3029 self.cross_sect_dict['unit']='GeV' 3030 self.cross_sect_dict['xsec_string']='(Partial) decay width' 3031 self.cross_sect_dict['axsec_string']='(Partial) abs(decay width)' 3032 else: 3033 self.cross_sect_dict['unit']='pb' 3034 self.cross_sect_dict['xsec_string']='Total cross section' 3035 self.cross_sect_dict['axsec_string']='Total abs(cross section)' 3036 if self.run_card['event_norm'].lower()=='bias': 3037 self.cross_sect_dict['xsec_string']+=', incl. bias (DO NOT USE)' 3038 3039 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3040 status = ['Determining the number of unweighted events per channel', 3041 'Updating the number of unweighted events per channel', 3042 'Summary:'] 3043 computed='(computed from LHE events)' 3044 elif mode in ['NLO', 'LO']: 3045 status = ['Results after grid setup:','Current results:', 3046 'Final results and run summary:'] 3047 computed='(computed from histogram information)' 3048 3049 if step != 2 and mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3050 message = status[step] + '\n\n Intermediate results:' + \ 3051 ('\n Random seed: %(randinit)d' + \ 3052 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \ 3053 '\n %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \ 3054 % self.cross_sect_dict 3055 elif mode in ['NLO','LO'] and not done: 3056 if step == 0: 3057 message = '\n ' + status[0] + \ 3058 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3059 self.cross_sect_dict 3060 else: 3061 message = '\n ' + status[1] + \ 3062 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3063 self.cross_sect_dict 3064 3065 else: 3066 message = '\n --------------------------------------------------------------' 3067 message = message + \ 3068 '\n ' + status[2] + proc_info 3069 if mode not in ['LO', 'NLO']: 3070 message = message + \ 3071 '\n Number of events generated: %s' % self.run_card['nevents'] 3072 message = message + \ 3073 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3074 self.cross_sect_dict 3075 message = message + \ 3076 '\n --------------------------------------------------------------' 3077 if scale_pdf_info and (self.run_card['nevents']>=10000 or mode in ['NLO', 'LO']): 3078 if scale_pdf_info[0]: 3079 # scale uncertainties 3080 message = message + '\n Scale variation %s:' % computed 3081 for s in scale_pdf_info[0]: 3082 if s['unc']: 3083 if self.run_card['ickkw'] != -1: 3084 message = message + \ 3085 ('\n Dynamical_scale_choice %(label)i (envelope of %(size)s values): '\ 3086 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % s 3087 else: 3088 message = message + \ 3089 ('\n Soft and hard scale dependence (added in quadrature): '\ 3090 '\n %(cen)8.3e pb +%(max_q)0.1f%% -%(min_q)0.1f%%') % s 3091 3092 else: 3093 message = message + \ 3094 ('\n Dynamical_scale_choice %(label)i: '\ 3095 '\n %(cen)8.3e pb') % s 3096 3097 if scale_pdf_info[1]: 3098 message = message + '\n PDF variation %s:' % computed 3099 for p in scale_pdf_info[1]: 3100 if p['unc']=='none': 3101 message = message + \ 3102 ('\n %(name)s (central value only): '\ 3103 '\n %(cen)8.3e pb') % p 3104 3105 elif p['unc']=='unknown': 3106 message = message + \ 3107 ('\n %(name)s (%(size)s members; combination method unknown): '\ 3108 '\n %(cen)8.3e pb') % p 3109 else: 3110 message = message + \ 3111 ('\n %(name)s (%(size)s members; using %(unc)s method): '\ 3112 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % p 3113 # pdf uncertainties 3114 message = message + \ 3115 '\n --------------------------------------------------------------' 3116 3117 3118 if (mode in ['NLO', 'LO'] and not done) or \ 3119 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 3120 logger.info(message+'\n') 3121 return 3122 3123 # Some advanced general statistics are shown in the debug message at the 3124 # end of the run 3125 # Make sure it never stops a run 3126 # Gather some basic statistics for the run and extracted from the log files. 3127 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3128 log_GV_files = misc.glob(pjoin('P*','G*','log_MINT*.txt'), 3129 pjoin(self.me_dir, 'SubProcesses')) 3130 all_log_files = log_GV_files 3131 elif mode == 'NLO': 3132 log_GV_files = misc.glob(pjoin('P*','all_G*','log_MINT*.txt'), 3133 pjoin(self.me_dir, 'SubProcesses')) 3134 all_log_files = log_GV_files 3135 3136 elif mode == 'LO': 3137 log_GV_files = '' 3138 all_log_files = misc.glob(pjoin('P*','born_G*','log_MINT*.txt'), 3139 pjoin(self.me_dir, 'SubProcesses')) 3140 else: 3141 raise aMCatNLOError, 'Running mode %s not supported.'%mode 3142 3143 try: 3144 message, debug_msg = \ 3145 self.compile_advanced_stats(log_GV_files, all_log_files, message) 3146 except Exception as e: 3147 debug_msg = 'Advanced statistics collection failed with error "%s"\n'%str(e) 3148 err_string = StringIO.StringIO() 3149 traceback.print_exc(limit=4, file=err_string) 3150 debug_msg += 'Please report this backtrace to a MadGraph developer:\n%s'\ 3151 %err_string.getvalue() 3152 3153 logger.debug(debug_msg+'\n') 3154 logger.info(message+'\n') 3155 3156 # Now copy relevant information in the Events/Run_<xxx> directory 3157 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 3158 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 3159 open(pjoin(evt_path, '.full_summary.txt'), 3160 'w').write(message+'\n\n'+debug_msg+'\n') 3161 3162 self.archive_files(evt_path,mode)
3163
3164 - def archive_files(self, evt_path, mode):
3165 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 3166 the run.""" 3167 3168 files_to_arxiv = [pjoin('Cards','param_card.dat'), 3169 pjoin('Cards','MadLoopParams.dat'), 3170 pjoin('Cards','FKS_params.dat'), 3171 pjoin('Cards','run_card.dat'), 3172 pjoin('Subprocesses','setscales.f'), 3173 pjoin('Subprocesses','cuts.f')] 3174 3175 if mode in ['NLO', 'LO']: 3176 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 3177 3178 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 3179 os.mkdir(pjoin(evt_path,'RunMaterial')) 3180 3181 for path in files_to_arxiv: 3182 if os.path.isfile(pjoin(self.me_dir,path)): 3183 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 3184 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 3185 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
3186
3187 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
3188 """ This functions goes through the log files given in arguments and 3189 compiles statistics about MadLoop stability, virtual integration 3190 optimization and detection of potential error messages into a nice 3191 debug message to printed at the end of the run """ 3192 3193 def safe_float(str_float): 3194 try: 3195 return float(str_float) 3196 except ValueError: 3197 logger.debug('Could not convert the following float during'+ 3198 ' advanced statistics printout: %s'%str(str_float)) 3199 return -1.0
3200 3201 3202 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 3203 # > Errors is a list of tuples with this format (log_file,nErrors) 3204 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 3205 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 3206 3207 # ================================== 3208 # == MadLoop stability statistics == 3209 # ================================== 3210 3211 # Recuperate the fraction of unstable PS points found in the runs for 3212 # the virtuals 3213 UPS_stat_finder = re.compile( 3214 r"Satistics from MadLoop:.*"+\ 3215 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 3216 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 3217 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 3218 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 3219 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 3220 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 3221 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 3222 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 3223 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 3224 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 3225 3226 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 3227 1 : 'CutTools (double precision)', 3228 2 : 'PJFry++', 3229 3 : 'IREGI', 3230 4 : 'Golem95', 3231 5 : 'Samurai', 3232 6 : 'Ninja (double precision)', 3233 7 : 'COLLIER', 3234 8 : 'Ninja (quadruple precision)', 3235 9 : 'CutTools (quadruple precision)'} 3236 RetUnit_finder =re.compile( 3237 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 3238 #Unit 3239 3240 for gv_log in log_GV_files: 3241 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 3242 log=open(gv_log,'r').read() 3243 UPS_stats = re.search(UPS_stat_finder,log) 3244 for retunit_stats in re.finditer(RetUnit_finder, log): 3245 if channel_name not in stats['UPS'].keys(): 3246 stats['UPS'][channel_name] = [0]*10+[[0]*10] 3247 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 3248 += int(retunit_stats.group('n_occurences')) 3249 if not UPS_stats is None: 3250 try: 3251 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 3252 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 3253 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 3254 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 3255 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 3256 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 3257 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 3258 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 3259 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 3260 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 3261 except KeyError: 3262 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 3263 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 3264 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 3265 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 3266 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 3267 int(UPS_stats.group('n10')),[0]*10] 3268 debug_msg = "" 3269 if len(stats['UPS'].keys())>0: 3270 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 3271 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 3272 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 3273 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 3274 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 3275 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 3276 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 3277 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 3278 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 3279 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 3280 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 3281 for i in range(10)] 3282 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 3283 safe_float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 3284 maxUPS = max(UPSfracs, key = lambda w: w[1]) 3285 3286 tmpStr = "" 3287 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 3288 tmpStr += '\n Stability unknown: %d'%nTotsun 3289 tmpStr += '\n Stable PS point: %d'%nTotsps 3290 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 3291 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 3292 tmpStr += '\n Only double precision used: %d'%nTotddp 3293 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 3294 tmpStr += '\n Initialization phase-space points: %d'%nTotini 3295 tmpStr += '\n Reduction methods used:' 3296 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 3297 unit_code_meaning.keys() if nTot1[i]>0] 3298 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 3299 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 3300 if nTot100 != 0: 3301 debug_msg += '\n Unknown return code (100): %d'%nTot100 3302 if nTot10 != 0: 3303 debug_msg += '\n Unknown return code (10): %d'%nTot10 3304 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 3305 not in unit_code_meaning.keys()) 3306 if nUnknownUnit != 0: 3307 debug_msg += '\n Unknown return code (1): %d'\ 3308 %nUnknownUnit 3309 3310 if maxUPS[1]>0.001: 3311 message += tmpStr 3312 message += '\n Total number of unstable PS point detected:'+\ 3313 ' %d (%4.2f%%)'%(nToteps,safe_float(100*nToteps)/nTotPS) 3314 message += '\n Maximum fraction of UPS points in '+\ 3315 'channel %s (%4.2f%%)'%maxUPS 3316 message += '\n Please report this to the authors while '+\ 3317 'providing the file' 3318 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 3319 maxUPS[0],'UPS.log')) 3320 else: 3321 debug_msg += tmpStr 3322 3323 3324 # ==================================================== 3325 # == aMC@NLO virtual integration optimization stats == 3326 # ==================================================== 3327 3328 virt_tricks_finder = re.compile( 3329 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 3330 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 3331 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 3332 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 3333 3334 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 3335 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*-?(?P<v_average>[\d\+-Eed\.]*)") 3336 3337 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 3338 3339 channel_contr_list = {} 3340 for gv_log in log_GV_files: 3341 logfile=open(gv_log,'r') 3342 log = logfile.read() 3343 logfile.close() 3344 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3345 vf_stats = None 3346 for vf_stats in re.finditer(virt_frac_finder, log): 3347 pass 3348 if not vf_stats is None: 3349 v_frac = safe_float(vf_stats.group('v_frac')) 3350 v_average = safe_float(vf_stats.group('v_average')) 3351 try: 3352 if v_frac < stats['virt_stats']['v_frac_min'][0]: 3353 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 3354 if v_frac > stats['virt_stats']['v_frac_max'][0]: 3355 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 3356 stats['virt_stats']['v_frac_avg'][0] += v_frac 3357 stats['virt_stats']['v_frac_avg'][1] += 1 3358 except KeyError: 3359 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 3360 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 3361 stats['virt_stats']['v_frac_avg']=[v_frac,1] 3362 3363 3364 ccontr_stats = None 3365 for ccontr_stats in re.finditer(channel_contr_finder, log): 3366 pass 3367 if not ccontr_stats is None: 3368 contrib = safe_float(ccontr_stats.group('v_contr')) 3369 try: 3370 if contrib>channel_contr_list[channel_name]: 3371 channel_contr_list[channel_name]=contrib 3372 except KeyError: 3373 channel_contr_list[channel_name]=contrib 3374 3375 3376 # Now build the list of relevant virt log files to look for the maxima 3377 # of virt fractions and such. 3378 average_contrib = 0.0 3379 for value in channel_contr_list.values(): 3380 average_contrib += value 3381 if len(channel_contr_list.values()) !=0: 3382 average_contrib = average_contrib / len(channel_contr_list.values()) 3383 3384 relevant_log_GV_files = [] 3385 excluded_channels = set([]) 3386 all_channels = set([]) 3387 for log_file in log_GV_files: 3388 channel_name = '/'.join(log_file.split('/')[-3:-1]) 3389 all_channels.add(channel_name) 3390 try: 3391 if channel_contr_list[channel_name] > (0.1*average_contrib): 3392 relevant_log_GV_files.append(log_file) 3393 else: 3394 excluded_channels.add(channel_name) 3395 except KeyError: 3396 relevant_log_GV_files.append(log_file) 3397 3398 # Now we want to use the latest occurence of accumulated result in the log file 3399 for gv_log in relevant_log_GV_files: 3400 logfile=open(gv_log,'r') 3401 log = logfile.read() 3402 logfile.close() 3403 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3404 3405 vt_stats = None 3406 for vt_stats in re.finditer(virt_tricks_finder, log): 3407 pass 3408 if not vt_stats is None: 3409 vt_stats_group = vt_stats.groupdict() 3410 v_ratio = safe_float(vt_stats.group('v_ratio')) 3411 v_ratio_err = safe_float(vt_stats.group('v_ratio_err')) 3412 v_contr = safe_float(vt_stats.group('v_abs_contr')) 3413 v_contr_err = safe_float(vt_stats.group('v_abs_contr_err')) 3414 try: 3415 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 3416 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 3417 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 3418 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 3419 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 3420 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 3421 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 3422 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 3423 if v_contr < stats['virt_stats']['v_contr_min'][0]: 3424 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 3425 if v_contr > stats['virt_stats']['v_contr_max'][0]: 3426 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 3427 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 3428 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 3429 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 3430 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 3431 except KeyError: 3432 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 3433 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 3434 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 3435 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 3436 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 3437 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 3438 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 3439 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 3440 3441 vf_stats = None 3442 for vf_stats in re.finditer(virt_frac_finder, log): 3443 pass 3444 if not vf_stats is None: 3445 v_frac = safe_float(vf_stats.group('v_frac')) 3446 v_average = safe_float(vf_stats.group('v_average')) 3447 try: 3448 if v_average < stats['virt_stats']['v_average_min'][0]: 3449 stats['virt_stats']['v_average_min']=(v_average,channel_name) 3450 if v_average > stats['virt_stats']['v_average_max'][0]: 3451 stats['virt_stats']['v_average_max']=(v_average,channel_name) 3452 stats['virt_stats']['v_average_avg'][0] += v_average 3453 stats['virt_stats']['v_average_avg'][1] += 1 3454 except KeyError: 3455 stats['virt_stats']['v_average_min']=[v_average,channel_name] 3456 stats['virt_stats']['v_average_max']=[v_average,channel_name] 3457 stats['virt_stats']['v_average_avg']=[v_average,1] 3458 3459 try: 3460 debug_msg += '\n\n Statistics on virtual integration optimization : ' 3461 3462 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 3463 %tuple(stats['virt_stats']['v_frac_max']) 3464 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 3465 %tuple(stats['virt_stats']['v_frac_min']) 3466 debug_msg += '\n Average virt fraction computed %.3f'\ 3467 %safe_float(stats['virt_stats']['v_frac_avg'][0]/safe_float(stats['virt_stats']['v_frac_avg'][1])) 3468 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 3469 (len(excluded_channels),len(all_channels)) 3470 debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 3471 %tuple(stats['virt_stats']['v_average_max']) 3472 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 3473 %tuple(stats['virt_stats']['v_ratio_max']) 3474 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 3475 %tuple(stats['virt_stats']['v_ratio_err_max']) 3476 debug_msg += tmpStr 3477 # After all it was decided that it is better not to alarm the user unecessarily 3478 # with such printout of the statistics. 3479 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 3480 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3481 # message += "\n Suspiciously large MC error in :" 3482 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3483 # message += tmpStr 3484 3485 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 3486 %tuple(stats['virt_stats']['v_contr_err_max']) 3487 debug_msg += tmpStr 3488 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 3489 # message += tmpStr 3490 3491 3492 except KeyError: 3493 debug_msg += '\n Could not find statistics on the integration optimization. ' 3494 3495 # ======================================= 3496 # == aMC@NLO timing profile statistics == 3497 # ======================================= 3498 3499 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 3500 "(?P<time>[\d\+-Eed\.]*)\s*") 3501 3502 for logf in log_GV_files: 3503 logfile=open(logf,'r') 3504 log = logfile.read() 3505 logfile.close() 3506 channel_name = '/'.join(logf.split('/')[-3:-1]) 3507 mint = re.search(mint_search,logf) 3508 if not mint is None: 3509 channel_name = channel_name+' [step %s]'%mint.group('ID') 3510 3511 for time_stats in re.finditer(timing_stat_finder, log): 3512 try: 3513 stats['timings'][time_stats.group('name')][channel_name]+=\ 3514 safe_float(time_stats.group('time')) 3515 except KeyError: 3516 if time_stats.group('name') not in stats['timings'].keys(): 3517 stats['timings'][time_stats.group('name')] = {} 3518 stats['timings'][time_stats.group('name')][channel_name]=\ 3519 safe_float(time_stats.group('time')) 3520 3521 # useful inline function 3522 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 3523 try: 3524 totTimeList = [(time, chan) for chan, time in \ 3525 stats['timings']['Total'].items()] 3526 except KeyError: 3527 totTimeList = [] 3528 3529 totTimeList.sort() 3530 if len(totTimeList)>0: 3531 debug_msg += '\n\n Inclusive timing profile :' 3532 debug_msg += '\n Overall slowest channel %s (%s)'%\ 3533 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 3534 debug_msg += '\n Average channel running time %s'%\ 3535 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 3536 debug_msg += '\n Aggregated total running time %s'%\ 3537 Tstr(sum([el[0] for el in totTimeList])) 3538 else: 3539 debug_msg += '\n\n Inclusive timing profile non available.' 3540 3541 sorted_keys = sorted(stats['timings'].keys(), key= lambda stat: \ 3542 sum(stats['timings'][stat].values()), reverse=True) 3543 for name in sorted_keys: 3544 if name=='Total': 3545 continue 3546 if sum(stats['timings'][name].values())<=0.0: 3547 debug_msg += '\n Zero time record for %s.'%name 3548 continue 3549 try: 3550 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 3551 chan) for chan, time in stats['timings'][name].items()] 3552 except KeyError, ZeroDivisionError: 3553 debug_msg += '\n\n Timing profile for %s unavailable.'%name 3554 continue 3555 TimeList.sort() 3556 debug_msg += '\n Timing profile for <%s> :'%name 3557 try: 3558 debug_msg += '\n Overall fraction of time %.3f %%'%\ 3559 safe_float((100.0*(sum(stats['timings'][name].values())/ 3560 sum(stats['timings']['Total'].values())))) 3561 except KeyError, ZeroDivisionError: 3562 debug_msg += '\n Overall fraction of time unavailable.' 3563 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 3564 (TimeList[-1][0],TimeList[-1][1]) 3565 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 3566 (TimeList[0][0],TimeList[0][1]) 3567 3568 # ============================= 3569 # == log file eror detection == 3570 # ============================= 3571 3572 # Find the number of potential errors found in all log files 3573 # This re is a simple match on a case-insensitve 'error' but there is 3574 # also some veto added for excluding the sentence 3575 # "See Section 6 of paper for error calculation." 3576 # which appear in the header of lhapdf in the logs. 3577 err_finder = re.compile(\ 3578 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 3579 for log in all_log_files: 3580 logfile=open(log,'r') 3581 nErrors = len(re.findall(err_finder, logfile.read())) 3582 logfile.close() 3583 if nErrors != 0: 3584 stats['Errors'].append((str(log),nErrors)) 3585 3586 nErrors = sum([err[1] for err in stats['Errors']],0) 3587 if nErrors != 0: 3588 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 3589 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 3590 'found in the following log file%s:'%('s' if \ 3591 len(stats['Errors'])>1 else '') 3592 for error in stats['Errors'][:3]: 3593 log_name = '/'.join(error[0].split('/')[-5:]) 3594 debug_msg += '\n > %d error%s in %s'%\ 3595 (error[1],'s' if error[1]>1 else '',log_name) 3596 if len(stats['Errors'])>3: 3597 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 3598 nRemainingLogs = len(stats['Errors'])-3 3599 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 3600 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 3601 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 3602 3603 return message, debug_msg 3604 3605
3606 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
3607 """this function calls the reweighting routines and creates the event file in the 3608 Event dir. Return the name of the event file created 3609 """ 3610 scale_pdf_info=[] 3611 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 3612 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1\ 3613 or self.run_card['store_rwgt_info']: 3614 scale_pdf_info = self.run_reweight(options['reweightonly']) 3615 self.update_status('Collecting events', level='parton', update_results=True) 3616 misc.compile(['collect_events'], 3617 cwd=pjoin(self.me_dir, 'SubProcesses'), nocompile=options['nocompile']) 3618 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 3619 stdin=subprocess.PIPE, 3620 stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w')) 3621 if event_norm.lower() == 'sum': 3622 p.communicate(input = '1\n') 3623 elif event_norm.lower() == 'unity': 3624 p.communicate(input = '3\n') 3625 elif event_norm.lower() == 'bias': 3626 p.communicate(input = '0\n') 3627 else: 3628 p.communicate(input = '2\n') 3629 3630 #get filename from collect events 3631 filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1] 3632 3633 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 3634 raise aMCatNLOError('An error occurred during event generation. ' + \ 3635 'The event file has not been created. Check collect_events.log') 3636 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 3637 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 3638 if not options['reweightonly']: 3639 self.print_summary(options, 2, mode, scale_pdf_info) 3640 res_files = misc.glob('res*.txt', pjoin(self.me_dir, 'SubProcesses')) 3641 for res_file in res_files: 3642 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3643 3644 logger.info('The %s file has been generated.\n' % (evt_file)) 3645 self.results.add_detail('nb_event', nevents) 3646 self.update_status('Events generated', level='parton', update_results=True) 3647 return evt_file[:-3]
3648 3649
3650 - def run_mcatnlo(self, evt_file, options):
3651 """runs mcatnlo on the generated event file, to produce showered-events 3652 """ 3653 logger.info('Preparing MCatNLO run') 3654 try: 3655 misc.gunzip(evt_file) 3656 except Exception: 3657 pass 3658 3659 self.banner = banner_mod.Banner(evt_file) 3660 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 3661 3662 #check that the number of split event files divides the number of 3663 # events, otherwise set it to 1 3664 if int(self.banner.get_detail('run_card', 'nevents') / \ 3665 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 3666 != self.banner.get_detail('run_card', 'nevents'): 3667 logger.warning(\ 3668 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 3669 'Setting it to 1.') 3670 self.shower_card['nsplit_jobs'] = 1 3671 3672 # don't split jobs if the user asks to shower only a part of the events 3673 if self.shower_card['nevents'] > 0 and \ 3674 self.shower_card['nevents'] < self.banner.get_detail('run_card', 'nevents') and \ 3675 self.shower_card['nsplit_jobs'] != 1: 3676 logger.warning(\ 3677 'Only a part of the events will be showered.\n' + \ 3678 'Setting nsplit_jobs in the shower_card to 1.') 3679 self.shower_card['nsplit_jobs'] = 1 3680 3681 self.banner_to_mcatnlo(evt_file) 3682 3683 # if fastjet has to be linked (in extralibs) then 3684 # add lib /include dirs for fastjet if fastjet-config is present on the 3685 # system, otherwise add fjcore to the files to combine 3686 if 'fastjet' in self.shower_card['extralibs']: 3687 #first, check that stdc++ is also linked 3688 if not 'stdc++' in self.shower_card['extralibs']: 3689 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 3690 self.shower_card['extralibs'] += ' stdc++' 3691 # then check if options[fastjet] corresponds to a valid fj installation 3692 try: 3693 #this is for a complete fj installation 3694 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 3695 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3696 output, error = p.communicate() 3697 #remove the line break from output (last character) 3698 output = output[:-1] 3699 # add lib/include paths 3700 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 3701 logger.warning('Linking FastJet: updating EXTRAPATHS') 3702 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 3703 if not pjoin(output, 'include') in self.shower_card['includepaths']: 3704 logger.warning('Linking FastJet: updating INCLUDEPATHS') 3705 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 3706 # to be changed in the fortran wrapper 3707 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 3708 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 3709 except Exception: 3710 logger.warning('Linking FastJet: using fjcore') 3711 # this is for FJcore, so no FJ library has to be linked 3712 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 3713 if not 'fjcore.o' in self.shower_card['analyse']: 3714 self.shower_card['analyse'] += ' fjcore.o' 3715 # to be changed in the fortran wrapper 3716 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 3717 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 3718 # change the fortran wrapper with the correct namespaces/include 3719 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 3720 for line in fjwrapper_lines: 3721 if '//INCLUDE_FJ' in line: 3722 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 3723 if '//NAMESPACE_FJ' in line: 3724 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 3725 with open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w') as fsock: 3726 fsock.write('\n'.join(fjwrapper_lines) + '\n') 3727 3728 extrapaths = self.shower_card['extrapaths'].split() 3729 3730 # check that the path needed by HW++ and PY8 are set if one uses these shower 3731 if shower in ['HERWIGPP', 'PYTHIA8']: 3732 path_dict = {'HERWIGPP': ['hepmc_path', 3733 'thepeg_path', 3734 'hwpp_path'], 3735 'PYTHIA8': ['pythia8_path']} 3736 3737 if not all([self.options[ppath] and os.path.exists(self.options[ppath]) for ppath in path_dict[shower]]): 3738 raise aMCatNLOError('Some paths are missing or invalid in the configuration file.\n' + \ 3739 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 3740 3741 if shower == 'HERWIGPP': 3742 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 3743 self.shower_card['extrapaths'] += ' %s' % pjoin(self.options['hepmc_path'], 'lib') 3744 3745 # add the HEPMC path of the pythia8 installation 3746 if shower == 'PYTHIA8': 3747 hepmc = subprocess.Popen([pjoin(self.options['pythia8_path'], 'bin', 'pythia8-config'), '--hepmc2'], 3748 stdout = subprocess.PIPE).stdout.read().strip() 3749 #this gives all the flags, i.e. 3750 #-I/Path/to/HepMC/include -L/Path/to/HepMC/lib -lHepMC 3751 # we just need the path to the HepMC libraries 3752 extrapaths.append(hepmc.split()[1].replace('-L', '')) 3753 3754 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3755 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 3756 3757 # set the PATH for the dynamic libraries 3758 if sys.platform == 'darwin': 3759 ld_library_path = 'DYLD_LIBRARY_PATH' 3760 else: 3761 ld_library_path = 'LD_LIBRARY_PATH' 3762 if ld_library_path in os.environ.keys(): 3763 paths = os.environ[ld_library_path] 3764 else: 3765 paths = '' 3766 paths += ':' + ':'.join(extrapaths) 3767 os.putenv(ld_library_path, paths) 3768 3769 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 3770 self.shower_card.write_card(shower, shower_card_path) 3771 3772 # overwrite if shower_card_set.dat exists in MCatNLO 3773 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 3774 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 3775 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 3776 3777 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 3778 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 3779 3780 3781 # libdl may be needded for pythia 82xx 3782 #if shower == 'PYTHIA8' and not \ 3783 # os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')) and \ 3784 # 'dl' not in self.shower_card['extralibs'].split(): 3785 # # 'dl' has to be linked with the extralibs 3786 # self.shower_card['extralibs'] += ' dl' 3787 # logger.warning("'dl' was added to extralibs from the shower_card.dat.\n" + \ 3788 # "It is needed for the correct running of PY8.2xx.\n" + \ 3789 # "If this library cannot be found on your system, a crash will occur.") 3790 3791 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 3792 stderr=open(mcatnlo_log, 'w'), 3793 cwd=pjoin(self.me_dir, 'MCatNLO'), 3794 close_fds=True) 3795 3796 exe = 'MCATNLO_%s_EXE' % shower 3797 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 3798 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 3799 print open(mcatnlo_log).read() 3800 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 3801 logger.info(' ... done') 3802 3803 # create an empty dir where to run 3804 count = 1 3805 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3806 (shower, count))): 3807 count += 1 3808 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3809 (shower, count)) 3810 os.mkdir(rundir) 3811 files.cp(shower_card_path, rundir) 3812 3813 #look for the event files (don't resplit if one asks for the 3814 # same number of event files as in the previous run) 3815 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3816 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 3817 logger.info('Cleaning old files and splitting the event file...') 3818 #clean the old files 3819 files.rm([f for f in event_files if 'events.lhe' not in f]) 3820 if self.shower_card['nsplit_jobs'] > 1: 3821 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities'), nocompile=options['nocompile']) 3822 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 3823 stdin=subprocess.PIPE, 3824 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 3825 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 3826 p.communicate(input = 'events.lhe\n%d\n' % self.shower_card['nsplit_jobs']) 3827 logger.info('Splitting done.') 3828 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3829 3830 event_files.sort() 3831 3832 self.update_status('Showering events...', level='shower') 3833 logger.info('(Running in %s)' % rundir) 3834 if shower != 'PYTHIA8': 3835 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 3836 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 3837 else: 3838 # special treatment for pythia8 3839 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 3840 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 3841 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): # this is PY8.1xxx 3842 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 3843 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 3844 else: # this is PY8.2xxx 3845 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 3846 #link the hwpp exe in the rundir 3847 if shower == 'HERWIGPP': 3848 try: 3849 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 3850 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 3851 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 3852 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig'), rundir) 3853 except Exception: 3854 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 3855 3856 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 3857 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 3858 3859 files.ln(evt_file, rundir, 'events.lhe') 3860 for i, f in enumerate(event_files): 3861 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 3862 3863 if not self.shower_card['analyse']: 3864 # an hep/hepmc file as output 3865 out_id = 'HEP' 3866 else: 3867 # one or more .top file(s) as output 3868 if "HwU" in self.shower_card['analyse']: 3869 out_id = 'HWU' 3870 else: 3871 out_id = 'TOP' 3872 3873 # write the executable 3874 with open(pjoin(rundir, 'shower.sh'), 'w') as fsock: 3875 # set the PATH for the dynamic libraries 3876 if sys.platform == 'darwin': 3877 ld_library_path = 'DYLD_LIBRARY_PATH' 3878 else: 3879 ld_library_path = 'LD_LIBRARY_PATH' 3880 fsock.write(open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 3881 % {'ld_library_path': ld_library_path, 3882 'extralibs': ':'.join(extrapaths)}) 3883 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 3884 3885 if event_files: 3886 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 3887 for i in range(len(event_files))] 3888 else: 3889 arg_list = [[shower, out_id, self.run_name]] 3890 3891 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 3892 self.njobs = 1 3893 self.wait_for_complete('shower') 3894 3895 # now collect the results 3896 message = '' 3897 warning = '' 3898 to_gzip = [evt_file] 3899 if out_id == 'HEP': 3900 #copy the showered stdhep/hepmc file back in events 3901 if shower in ['PYTHIA8', 'HERWIGPP']: 3902 hep_format = 'HEPMC' 3903 ext = 'hepmc' 3904 else: 3905 hep_format = 'StdHEP' 3906 ext = 'hep' 3907 3908 hep_file = '%s_%s_0.%s.gz' % \ 3909 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 3910 count = 0 3911 3912 # find the first available name for the output: 3913 # check existing results with or without event splitting 3914 while os.path.exists(hep_file) or \ 3915 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 3916 count +=1 3917 hep_file = '%s_%s_%d.%s.gz' % \ 3918 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 3919 3920 try: 3921 if self.shower_card['nsplit_jobs'] == 1: 3922 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 3923 message = ('The file %s has been generated. \nIt contains showered' + \ 3924 ' and hadronized events in the %s format obtained' + \ 3925 ' showering the parton-level event file %s.gz with %s') % \ 3926 (hep_file, hep_format, evt_file, shower) 3927 else: 3928 hep_list = [] 3929 for i in range(self.shower_card['nsplit_jobs']): 3930 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 3931 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 3932 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 3933 ' and hadronized events in the %s format obtained' + \ 3934 ' showering the (split) parton-level event file %s.gz with %s') % \ 3935 ('\n '.join(hep_list), hep_format, evt_file, shower) 3936 3937 except OSError, IOError: 3938 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 3939 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 3940 3941 # run the plot creation in a secure way 3942 if hep_format == 'StdHEP': 3943 try: 3944 self.do_plot('%s -f' % self.run_name) 3945 except Exception, error: 3946 logger.info("Fail to make the plot. Continue...") 3947 pass 3948 3949 elif out_id == 'TOP' or out_id == 'HWU': 3950 #copy the topdrawer or HwU file(s) back in events 3951 if out_id=='TOP': 3952 ext='top' 3953 elif out_id=='HWU': 3954 ext='HwU' 3955 topfiles = [] 3956 top_tars = [tarfile.TarFile(f) for f in misc.glob('histfile*.tar', rundir)] 3957 for top_tar in top_tars: 3958 topfiles.extend(top_tar.getnames()) 3959 3960 # safety check 3961 if len(top_tars) != self.shower_card['nsplit_jobs']: 3962 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 3963 (self.shower_card['nsplit_jobs'], len(top_tars))) 3964 3965 # find the first available name for the output: 3966 # check existing results with or without event splitting 3967 filename = 'plot_%s_%d_' % (shower, 1) 3968 count = 1 3969 while os.path.exists(pjoin(self.me_dir, 'Events', 3970 self.run_name, '%s0.%s' % (filename,ext))) or \ 3971 os.path.exists(pjoin(self.me_dir, 'Events', 3972 self.run_name, '%s0__1.%s' % (filename,ext))): 3973 count += 1 3974 filename = 'plot_%s_%d_' % (shower, count) 3975 3976 if out_id=='TOP': 3977 hist_format='TopDrawer format' 3978 elif out_id=='HWU': 3979 hist_format='HwU and GnuPlot formats' 3980 3981 if not topfiles: 3982 # if no topfiles are found just warn the user 3983 warning = 'No .top file has been generated. For the results of your ' +\ 3984 'run, please check inside %s' % rundir 3985 elif self.shower_card['nsplit_jobs'] == 1: 3986 # only one job for the shower 3987 top_tars[0].extractall(path = rundir) 3988 plotfiles = [] 3989 for i, file in enumerate(topfiles): 3990 if out_id=='TOP': 3991 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 3992 '%s%d.top' % (filename, i)) 3993 files.mv(pjoin(rundir, file), plotfile) 3994 elif out_id=='HWU': 3995 out=pjoin(self.me_dir,'Events', 3996 self.run_name,'%s%d'% (filename,i)) 3997 histos=[{'dirname':pjoin(rundir,file)}] 3998 self.combine_plots_HwU(histos,out) 3999 try: 4000 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 4001 stdout=os.open(os.devnull, os.O_RDWR),\ 4002 stderr=os.open(os.devnull, os.O_RDWR),\ 4003 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4004 except Exception: 4005 pass 4006 plotfile=pjoin(self.me_dir,'Events',self.run_name, 4007 '%s%d.HwU'% (filename,i)) 4008 plotfiles.append(plotfile) 4009 4010 ffiles = 'files' 4011 have = 'have' 4012 if len(plotfiles) == 1: 4013 ffiles = 'file' 4014 have = 'has' 4015 4016 message = ('The %s %s %s been generated, with histograms in the' + \ 4017 ' %s, obtained by showering the parton-level' + \ 4018 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 4019 hist_format, evt_file, shower) 4020 else: 4021 # many jobs for the shower have been run 4022 topfiles_set = set(topfiles) 4023 plotfiles = [] 4024 for j, top_tar in enumerate(top_tars): 4025 top_tar.extractall(path = rundir) 4026 for i, file in enumerate(topfiles_set): 4027 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4028 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 4029 files.mv(pjoin(rundir, file), plotfile) 4030 plotfiles.append(plotfile) 4031 4032 # check if the user asked to combine the .top into a single file 4033 if self.shower_card['combine_td']: 4034 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 4035 4036 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 4037 norm = 1. 4038 else: 4039 norm = 1./float(self.shower_card['nsplit_jobs']) 4040 4041 plotfiles2 = [] 4042 for i, file in enumerate(topfiles_set): 4043 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 4044 for j in range(self.shower_card['nsplit_jobs'])] 4045 if out_id=='TOP': 4046 infile="%d\n%s\n%s\n" % \ 4047 (self.shower_card['nsplit_jobs'], 4048 '\n'.join(filelist), 4049 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 4050 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 4051 stdin=subprocess.PIPE, 4052 stdout=os.open(os.devnull, os.O_RDWR), 4053 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4054 p.communicate(input = infile) 4055 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 4056 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 4057 elif out_id=='HWU': 4058 out=pjoin(self.me_dir,'Events', 4059 self.run_name,'%s%d'% (filename,i)) 4060 histos=[] 4061 norms=[] 4062 for plotfile in plotfiles: 4063 histos.append({'dirname':plotfile}) 4064 norms.append(norm) 4065 self.combine_plots_HwU(histos,out,normalisation=norms) 4066 try: 4067 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 4068 stdout=os.open(os.devnull, os.O_RDWR),\ 4069 stderr=os.open(os.devnull, os.O_RDWR),\ 4070 cwd=pjoin(self.me_dir, 'Events',self.run_name)) 4071 except Exception: 4072 pass 4073 4074 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 4075 tar = tarfile.open( 4076 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 4077 for f in filelist: 4078 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 4079 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 4080 4081 tar.close() 4082 4083 ffiles = 'files' 4084 have = 'have' 4085 if len(plotfiles2) == 1: 4086 ffiles = 'file' 4087 have = 'has' 4088 4089 message = ('The %s %s %s been generated, with histograms in the' + \ 4090 ' %s, obtained by showering the parton-level' + \ 4091 ' file %s.gz with %s.\n' + \ 4092 'The files from the different shower ' + \ 4093 'jobs (before combining them) can be found inside %s.') % \ 4094 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 4095 evt_file, shower, 4096 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 4097 4098 else: 4099 message = ('The following files have been generated:\n %s\n' + \ 4100 'They contain histograms in the' + \ 4101 ' %s, obtained by showering the parton-level' + \ 4102 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 4103 hist_format, evt_file, shower) 4104 4105 # Now arxiv the shower card used if RunMaterial is present 4106 run_dir_path = pjoin(rundir, self.run_name) 4107 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 4108 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 4109 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 4110 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 4111 %(shower, count))) 4112 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 4113 cwd=run_dir_path) 4114 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 4115 # end of the run, gzip files and print out the message/warning 4116 for f in to_gzip: 4117 misc.gzip(f) 4118 if message: 4119 logger.info(message) 4120 if warning: 4121 logger.warning(warning) 4122 4123 self.update_status('Run complete', level='shower', update_results=True)
4124 4125 ############################################################################
4126 - def set_run_name(self, name, tag=None, level='parton', reload_card=False,**opts):
4127 """define the run name, the run_tag, the banner and the results.""" 4128 4129 # when are we force to change the tag new_run:previous run requiring changes 4130 upgrade_tag = {'parton': ['parton','delphes','shower','madanalysis5_hadron'], 4131 'shower': ['shower','delphes','madanalysis5_hadron'], 4132 'delphes':['delphes'], 4133 'madanalysis5_hadron':['madanalysis5_hadron'], 4134 'plot':[]} 4135 4136 if name == self.run_name: 4137 if reload_card: 4138 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4139 self.run_card = banner_mod.RunCardNLO(run_card) 4140 4141 #check if we need to change the tag 4142 if tag: 4143 self.run_card['run_tag'] = tag 4144 self.run_tag = tag 4145 self.results.add_run(self.run_name, self.run_card) 4146 else: 4147 for tag in upgrade_tag[level]: 4148 if getattr(self.results[self.run_name][-1], tag): 4149 tag = self.get_available_tag() 4150 self.run_card['run_tag'] = tag 4151 self.run_tag = tag 4152 self.results.add_run(self.run_name, self.run_card) 4153 break 4154 return # Nothing to do anymore 4155 4156 # save/clean previous run 4157 if self.run_name: 4158 self.store_result() 4159 # store new name 4160 self.run_name = name 4161 4162 # Read run_card 4163 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4164 self.run_card = banner_mod.RunCardNLO(run_card) 4165 4166 new_tag = False 4167 # First call for this run -> set the banner 4168 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 4169 if 'mgruncard' in self.banner: 4170 self.run_card = self.banner.charge_card('run_card') 4171 if tag: 4172 self.run_card['run_tag'] = tag 4173 new_tag = True 4174 elif not self.run_name in self.results and level =='parton': 4175 pass # No results yet, so current tag is fine 4176 elif not self.run_name in self.results: 4177 #This is only for case when you want to trick the interface 4178 logger.warning('Trying to run data on unknown run.') 4179 self.results.add_run(name, self.run_card) 4180 self.results.update('add run %s' % name, 'all', makehtml=True) 4181 else: 4182 for tag in upgrade_tag[level]: 4183 4184 if getattr(self.results[self.run_name][-1], tag): 4185 # LEVEL is already define in the last tag -> need to switch tag 4186 tag = self.get_available_tag() 4187 self.run_card['run_tag'] = tag 4188 new_tag = True 4189 break 4190 if not new_tag: 4191 # We can add the results to the current run 4192 tag = self.results[self.run_name][-1]['tag'] 4193 self.run_card['run_tag'] = tag # ensure that run_tag is correct 4194 4195 4196 if name in self.results and not new_tag: 4197 self.results.def_current(self.run_name) 4198 else: 4199 self.results.add_run(self.run_name, self.run_card) 4200 4201 self.run_tag = self.run_card['run_tag'] 4202 4203 # Return the tag of the previous run having the required data for this 4204 # tag/run to working wel. 4205 if level == 'parton': 4206 return 4207 elif level == 'pythia': 4208 return self.results[self.run_name][0]['tag'] 4209 else: 4210 for i in range(-1,-len(self.results[self.run_name])-1,-1): 4211 tagRun = self.results[self.run_name][i] 4212 if tagRun.pythia: 4213 return tagRun['tag']
4214 4215
4216 - def store_result(self):
4217 """ tar the pythia results. This is done when we are quite sure that 4218 the pythia output will not be use anymore """ 4219 4220 if not self.run_name: 4221 return 4222 4223 self.results.save() 4224 4225 if not self.to_store: 4226 return 4227 4228 if 'event' in self.to_store: 4229 if os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')): 4230 if not os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')): 4231 self.update_status('gzipping output file: events.lhe', level='parton', error=True) 4232 misc.gzip(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4233 else: 4234 os.remove(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4235 if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): 4236 os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) 4237 4238 4239 tag = self.run_card['run_tag'] 4240 4241 self.to_store = []
4242 4243 4244 ############################################################################
4245 - def get_Gdir(self, Pdir=None):
4246 """get the list of Gdirectory if not yet saved.""" 4247 4248 if hasattr(self, "Gdirs"): 4249 if self.me_dir in self.Gdirs: 4250 if Pdir is None: 4251 return sum(self.Gdirs.values()) 4252 else: 4253 return self.Gdirs[Pdir] 4254 4255 Pdirs = self.get_Pdir() 4256 Gdirs = {self.me_dir:[]} 4257 for P in Pdirs: 4258 Gdirs[P] = [pjoin(P,G) for G in os.listdir(P) if G.startswith('G') and 4259 os.path.isdir(pjoin(P,G))] 4260 4261 self.Gdirs = Gdirs 4262 return self.getGdir(Pdir)
4263 4264
4265 - def get_init_dict(self, evt_file):
4266 """reads the info in the init block and returns them in a dictionary""" 4267 ev_file = open(evt_file) 4268 init = "" 4269 found = False 4270 while True: 4271 line = ev_file.readline() 4272 if "<init>" in line: 4273 found = True 4274 elif found and not line.startswith('#'): 4275 init += line 4276 if "</init>" in line or "<event>" in line: 4277 break 4278 ev_file.close() 4279 4280 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 4281 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 4282 # these are not included (so far) in the init_dict 4283 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 4284 4285 init_dict = {} 4286 init_dict['idbmup1'] = int(init.split()[0]) 4287 init_dict['idbmup2'] = int(init.split()[1]) 4288 init_dict['ebmup1'] = float(init.split()[2]) 4289 init_dict['ebmup2'] = float(init.split()[3]) 4290 init_dict['pdfgup1'] = int(init.split()[4]) 4291 init_dict['pdfgup2'] = int(init.split()[5]) 4292 init_dict['pdfsup1'] = int(init.split()[6]) 4293 init_dict['pdfsup2'] = int(init.split()[7]) 4294 init_dict['idwtup'] = int(init.split()[8]) 4295 init_dict['nprup'] = int(init.split()[9]) 4296 4297 return init_dict
4298 4299
4300 - def banner_to_mcatnlo(self, evt_file):
4301 """creates the mcatnlo input script using the values set in the header of the event_file. 4302 It also checks if the lhapdf library is used""" 4303 4304 shower = self.banner.get('run_card', 'parton_shower').upper() 4305 pdlabel = self.banner.get('run_card', 'pdlabel') 4306 itry = 0 4307 nevents = self.shower_card['nevents'] 4308 init_dict = self.get_init_dict(evt_file) 4309 4310 if nevents < 0 or \ 4311 nevents > self.banner.get_detail('run_card', 'nevents'): 4312 nevents = self.banner.get_detail('run_card', 'nevents') 4313 4314 nevents = nevents / self.shower_card['nsplit_jobs'] 4315 4316 mcmass_dict = {} 4317 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 4318 pdg = int(line.split()[0]) 4319 mass = float(line.split()[1]) 4320 mcmass_dict[pdg] = mass 4321 4322 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 4323 content += 'NEVENTS=%d\n' % nevents 4324 content += 'NEVENTS_TOT=%d\n' % (self.banner.get_detail('run_card', 'nevents') /\ 4325 self.shower_card['nsplit_jobs']) 4326 content += 'MCMODE=%s\n' % shower 4327 content += 'PDLABEL=%s\n' % pdlabel 4328 4329 try: 4330 aewm1 = self.banner.get_detail('param_card', 'sminputs', 1).value 4331 raise KeyError 4332 except KeyError: 4333 mod = self.get_model() 4334 if not hasattr(mod, 'parameter_dict'): 4335 from models import model_reader 4336 mod = model_reader.ModelReader(mod) 4337 mod.set_parameters_and_couplings(self.banner.param_card) 4338 aewm1 = 0 4339 for key in ['aEWM1', 'AEWM1', 'aEWm1', 'aewm1']: 4340 if key in mod['parameter_dict']: 4341 aewm1 = mod['parameter_dict'][key] 4342 break 4343 elif 'mdl_%s' % key in mod['parameter_dict']: 4344 aewm1 = mod['parameter_dict']['mod_%s' % key] 4345 break 4346 else: 4347 for key in ['aEW', 'AEW', 'aEw', 'aew']: 4348 if key in mod['parameter_dict']: 4349 aewm1 = 1./mod['parameter_dict'][key] 4350 break 4351 elif 'mdl_%s' % key in mod['parameter_dict']: 4352 aewm1 = 1./mod['parameter_dict']['mod_%s' % key] 4353 break 4354 4355 content += 'ALPHAEW=%s\n' % aewm1 4356 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 4357 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4358 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 4359 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 4360 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 4361 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 4362 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 4363 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 4364 try: 4365 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 4366 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 4367 except KeyError: 4368 content += 'HGGMASS=120.\n' 4369 content += 'HGGWIDTH=0.00575308848\n' 4370 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 4371 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 4372 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 4373 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 4374 content += 'DMASS=%s\n' % mcmass_dict[1] 4375 content += 'UMASS=%s\n' % mcmass_dict[2] 4376 content += 'SMASS=%s\n' % mcmass_dict[3] 4377 content += 'CMASS=%s\n' % mcmass_dict[4] 4378 content += 'BMASS=%s\n' % mcmass_dict[5] 4379 try: 4380 content += 'EMASS=%s\n' % mcmass_dict[11] 4381 content += 'MUMASS=%s\n' % mcmass_dict[13] 4382 content += 'TAUMASS=%s\n' % mcmass_dict[15] 4383 except KeyError: 4384 # this is for backward compatibility 4385 mcmass_lines = [l for l in \ 4386 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 4387 ).read().split('\n') if l] 4388 new_mcmass_dict = {} 4389 for l in mcmass_lines: 4390 key, val = l.split('=') 4391 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 4392 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 4393 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 4394 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 4395 4396 content += 'GMASS=%s\n' % mcmass_dict[21] 4397 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 4398 # check if need to link lhapdf 4399 if int(self.shower_card['pdfcode']) > 1 or \ 4400 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1) or \ 4401 shower=='HERWIGPP' : 4402 # Use LHAPDF (should be correctly installed, because 4403 # either events were already generated with them, or the 4404 # user explicitly gives an LHAPDF number in the 4405 # shower_card). 4406 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4407 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4408 stdout = subprocess.PIPE).stdout.read().strip() 4409 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4410 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4411 if self.shower_card['pdfcode']==0: 4412 lhaid_list = '' 4413 content += '' 4414 elif self.shower_card['pdfcode']==1: 4415 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4416 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4417 else: 4418 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 4419 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 4420 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4421 elif int(self.shower_card['pdfcode'])==1 or \ 4422 int(self.shower_card['pdfcode'])==-1 and True: 4423 # Try to use LHAPDF because user wants to use the same PDF 4424 # as was used for the event generation. However, for the 4425 # event generation, LHAPDF was not used, so non-trivial to 4426 # see if if LHAPDF is available with the corresponding PDF 4427 # set. If not found, give a warning and use build-in PDF 4428 # set instead. 4429 try: 4430 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4431 stdout = subprocess.PIPE).stdout.read().strip() 4432 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4433 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4434 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4435 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4436 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4437 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4438 except Exception: 4439 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 4440 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 4441 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 4442 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 4443 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 4444 content += 'LHAPDFPATH=\n' 4445 content += 'PDFCODE=0\n' 4446 else: 4447 content += 'LHAPDFPATH=\n' 4448 content += 'PDFCODE=0\n' 4449 4450 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 4451 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 4452 # add the pythia8/hwpp path(s) 4453 if self.options['pythia8_path']: 4454 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 4455 if self.options['hwpp_path']: 4456 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 4457 if self.options['thepeg_path'] and self.options['thepeg_path'] != self.options['hwpp_path']: 4458 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 4459 if self.options['hepmc_path'] and self.options['hepmc_path'] != self.options['hwpp_path']: 4460 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 4461 4462 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 4463 output.write(content) 4464 output.close() 4465 return shower
4466 4467
4468 - def run_reweight(self, only):
4469 """runs the reweight_xsec_events executables on each sub-event file generated 4470 to compute on the fly scale and/or PDF uncertainities""" 4471 logger.info(' Doing reweight') 4472 4473 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 4474 # if only doing reweight, copy back the nevents_unweighted file 4475 if only: 4476 if os.path.exists(nev_unw + '.orig'): 4477 files.cp(nev_unw + '.orig', nev_unw) 4478 else: 4479 raise aMCatNLOError('Cannot find event file information') 4480 4481 #read the nevents_unweighted file to get the list of event files 4482 file = open(nev_unw) 4483 lines = file.read().split('\n') 4484 file.close() 4485 # make copy of the original nevent_unweighted file 4486 files.cp(nev_unw, nev_unw + '.orig') 4487 # loop over lines (all but the last one whith is empty) and check that the 4488 # number of events is not 0 4489 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 4490 evt_wghts = [float(line.split()[3]) for line in lines[:-1] if line.split()[1] != '0'] 4491 if self.run_card['event_norm'].lower()=='bias' and self.run_card['nevents'] != 0: 4492 evt_wghts[:]=[1./float(self.run_card['nevents']) for wgt in evt_wghts] 4493 #prepare the job_dict 4494 job_dict = {} 4495 exe = 'reweight_xsec_events.local' 4496 for i, evt_file in enumerate(evt_files): 4497 path, evt = os.path.split(evt_file) 4498 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 4499 pjoin(self.me_dir, 'SubProcesses', path)) 4500 job_dict[path] = [exe] 4501 4502 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 4503 4504 #check that the new event files are complete 4505 for evt_file in evt_files: 4506 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 4507 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 4508 stdout = subprocess.PIPE).stdout.read().strip() 4509 if last_line != "</LesHouchesEvents>": 4510 raise aMCatNLOError('An error occurred during reweight. Check the' + \ 4511 '\'reweight_xsec_events.output\' files inside the ' + \ 4512 '\'SubProcesses/P*/G*/ directories for details') 4513 4514 #update file name in nevents_unweighted 4515 newfile = open(nev_unw, 'w') 4516 for line in lines: 4517 if line: 4518 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 4519 newfile.close() 4520 4521 return self.pdf_scale_from_reweighting(evt_files,evt_wghts)
4522
4523 - def pdf_scale_from_reweighting(self, evt_files,evt_wghts):
4524 """This function takes the files with the scale and pdf values 4525 written by the reweight_xsec_events.f code 4526 (P*/G*/pdf_scale_dependence.dat) and computes the overall 4527 scale and PDF uncertainty (the latter is computed using the 4528 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 4529 and returns it in percents. The expected format of the file 4530 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 4531 xsec_pdf0 xsec_pdf1 ....""" 4532 4533 scales=[] 4534 pdfs=[] 4535 for i,evt_file in enumerate(evt_files): 4536 path, evt=os.path.split(evt_file) 4537 with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f: 4538 data_line=f.readline() 4539 if "scale variations:" in data_line: 4540 for j,scale in enumerate(self.run_card['dynamical_scale_choice']): 4541 data_line = f.readline().split() 4542 scales_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4543 try: 4544 scales[j] = [a + b for a, b in zip(scales[j], scales_this)] 4545 except IndexError: 4546 scales+=[scales_this] 4547 data_line=f.readline() 4548 if "pdf variations:" in data_line: 4549 for j,pdf in enumerate(self.run_card['lhaid']): 4550 data_line = f.readline().split() 4551 pdfs_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4552 try: 4553 pdfs[j] = [a + b for a, b in zip(pdfs[j], pdfs_this)] 4554 except IndexError: 4555 pdfs+=[pdfs_this] 4556 4557 # get the scale uncertainty in percent 4558 scale_info=[] 4559 for j,scale in enumerate(scales): 4560 s_cen=scale[0] 4561 if s_cen != 0.0 and self.run_card['reweight_scale'][j]: 4562 # max and min of the full envelope 4563 s_max=(max(scale)/s_cen-1)*100 4564 s_min=(1-min(scale)/s_cen)*100 4565 # ren and fac scale dependence added in quadrature 4566 ren_var=[] 4567 fac_var=[] 4568 for i in range(len(self.run_card['rw_rscale'])): 4569 ren_var.append(scale[i]-s_cen) # central fac scale 4570 for i in range(len(self.run_card['rw_fscale'])): 4571 fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale 4572 s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100 4573 s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100 4574 s_size=len(scale) 4575 else: 4576 s_max=0.0 4577 s_min=0.0 4578 s_max_q=0.0 4579 s_min_q=0.0 4580 s_size=len(scale) 4581 scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \ 4582 'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \ 4583 'label':self.run_card['dynamical_scale_choice'][j], \ 4584 'unc':self.run_card['reweight_scale'][j]}) 4585 4586 # check if we can use LHAPDF to compute the PDF uncertainty 4587 if any(self.run_card['reweight_pdf']): 4588 use_lhapdf=False 4589 lhapdf_libdir=subprocess.Popen([self.options['lhapdf'],'--libdir'],\ 4590 stdout=subprocess.PIPE).stdout.read().strip() 4591 4592 try: 4593 candidates=[dirname for dirname in os.listdir(lhapdf_libdir) \ 4594 if os.path.isdir(pjoin(lhapdf_libdir,dirname))] 4595 except OSError: 4596 candidates=[] 4597 for candidate in candidates: 4598 if os.path.isfile(pjoin(lhapdf_libdir,candidate,'site-packages','lhapdf.so')): 4599 sys.path.insert(0,pjoin(lhapdf_libdir,candidate,'site-packages')) 4600 try: 4601 import lhapdf 4602 use_lhapdf=True 4603 break 4604 except ImportError: 4605 sys.path.pop(0) 4606 continue 4607 4608 if not use_lhapdf: 4609 try: 4610 candidates=[dirname for dirname in os.listdir(lhapdf_libdir+'64') \ 4611 if os.path.isdir(pjoin(lhapdf_libdir+'64',dirname))] 4612 except OSError: 4613 candidates=[] 4614 for candidate in candidates: 4615 if os.path.isfile(pjoin(lhapdf_libdir+'64',candidate,'site-packages','lhapdf.so')): 4616 sys.path.insert(0,pjoin(lhapdf_libdir+'64',candidate,'site-packages')) 4617 try: 4618 import lhapdf 4619 use_lhapdf=True 4620 break 4621 except ImportError: 4622 sys.path.pop(0) 4623 continue 4624 4625 if not use_lhapdf: 4626 try: 4627 import lhapdf 4628 use_lhapdf=True 4629 except ImportError: 4630 logger.warning("Failed to access python version of LHAPDF: "\ 4631 "cannot compute PDF uncertainty from the "\ 4632 "weights in the events. The weights in the LHE " \ 4633 "event files will still cover all PDF set members, "\ 4634 "but there will be no PDF uncertainty printed in the run summary. \n "\ 4635 "If the python interface to LHAPDF is available on your system, try "\ 4636 "adding its location to the PYTHONPATH environment variable and the"\ 4637 "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).") 4638 use_lhapdf=False 4639 4640 # turn off lhapdf printing any messages 4641 if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0) 4642 4643 pdf_info=[] 4644 for j,pdfset in enumerate(pdfs): 4645 p_cen=pdfset[0] 4646 if p_cen != 0.0 and self.run_card['reweight_pdf'][j]: 4647 if use_lhapdf: 4648 pdfsetname=self.run_card['lhapdfsetname'][j] 4649 try: 4650 p=lhapdf.getPDFSet(pdfsetname) 4651 ep=p.uncertainty(pdfset,-1) 4652 p_cen=ep.central 4653 p_min=abs(ep.errminus/p_cen)*100 4654 p_max=abs(ep.errplus/p_cen)*100 4655 p_type=p.errorType 4656 p_size=p.size 4657 p_conf=p.errorConfLevel 4658 except: 4659 logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname) 4660 p_min=0.0 4661 p_max=0.0 4662 p_type='unknown' 4663 p_conf='unknown' 4664 p_size=len(pdfset) 4665 else: 4666 p_min=0.0 4667 p_max=0.0 4668 p_type='unknown' 4669 p_conf='unknown' 4670 p_size=len(pdfset) 4671 pdfsetname=self.run_card['lhaid'][j] 4672 else: 4673 p_min=0.0 4674 p_max=0.0 4675 p_type='none' 4676 p_conf='unknown' 4677 p_size=len(pdfset) 4678 pdfsetname=self.run_card['lhaid'][j] 4679 pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \ 4680 'unc':p_type, 'name':pdfsetname, 'size':p_size, \ 4681 'label':self.run_card['lhaid'][j], 'conf':p_conf}) 4682 4683 scale_pdf_info=[scale_info,pdf_info] 4684 return scale_pdf_info
4685 4686
4687 - def wait_for_complete(self, run_type):
4688 """this function waits for jobs on cluster to complete their run.""" 4689 starttime = time.time() 4690 #logger.info(' Waiting for submitted jobs to complete') 4691 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 4692 starttime=starttime, level='parton', update_results=True) 4693 try: 4694 self.cluster.wait(self.me_dir, update_status) 4695 except: 4696 self.cluster.remove() 4697 raise
4698
4699 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
4700 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 4701 self.ijob = 0 4702 if run_type != 'shower': 4703 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 4704 for args in arg_list: 4705 for Pdir, jobs in job_dict.items(): 4706 for job in jobs: 4707 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 4708 if self.cluster_mode == 2: 4709 time.sleep(1) # security to allow all jobs to be launched 4710 else: 4711 self.njobs = len(arg_list) 4712 for args in arg_list: 4713 [(cwd, exe)] = job_dict.items() 4714 self.run_exe(exe, args, run_type, cwd) 4715 4716 self.wait_for_complete(run_type)
4717 4718 4719
4720 - def check_event_files(self,jobs):
4721 """check the integrity of the event files after splitting, and resubmit 4722 those which are not nicely terminated""" 4723 jobs_to_resubmit = [] 4724 for job in jobs: 4725 last_line = '' 4726 try: 4727 last_line = subprocess.Popen( 4728 ['tail', '-n1', pjoin(job['dirname'], 'events.lhe')], \ 4729 stdout = subprocess.PIPE).stdout.read().strip() 4730 except IOError: 4731 pass 4732 if last_line != "</LesHouchesEvents>": 4733 jobs_to_resubmit.append(job) 4734 self.njobs = 0 4735 if jobs_to_resubmit: 4736 run_type = 'Resubmitting broken jobs' 4737 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 4738 for job in jobs_to_resubmit: 4739 logger.debug('Resubmitting ' + job['dirname'] + '\n') 4740 self.run_all_jobs(jobs_to_resubmit,2,fixed_order=False)
4741 4742
4743 - def find_jobs_to_split(self, pdir, job, arg):
4744 """looks into the nevents_unweighed_splitted file to check how many 4745 split jobs are needed for this (pdir, job). arg is F, B or V""" 4746 # find the number of the integration channel 4747 splittings = [] 4748 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 4749 pattern = re.compile('for i in (\d+) ; do') 4750 match = re.search(pattern, ajob) 4751 channel = match.groups()[0] 4752 # then open the nevents_unweighted_splitted file and look for the 4753 # number of splittings to be done 4754 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 4755 # This skips the channels with zero events, because they are 4756 # not of the form GFXX_YY, but simply GFXX 4757 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 4758 pjoin(pdir, 'G%s%s' % (arg,channel))) 4759 matches = re.findall(pattern, nevents_file) 4760 for m in matches: 4761 splittings.append(m) 4762 return splittings
4763 4764
4765 - def run_exe(self, exe, args, run_type, cwd=None):
4766 """this basic function launch locally/on cluster exe with args as argument. 4767 """ 4768 # first test that exe exists: 4769 execpath = None 4770 if cwd and os.path.exists(pjoin(cwd, exe)): 4771 execpath = pjoin(cwd, exe) 4772 elif not cwd and os.path.exists(exe): 4773 execpath = exe 4774 else: 4775 raise aMCatNLOError('Cannot find executable %s in %s' \ 4776 % (exe, os.getcwd())) 4777 # check that the executable has exec permissions 4778 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 4779 subprocess.call(['chmod', '+x', exe], cwd=cwd) 4780 # finally run it 4781 if self.cluster_mode == 0: 4782 #this is for the serial run 4783 misc.call(['./'+exe] + args, cwd=cwd) 4784 self.ijob += 1 4785 self.update_status((max([self.njobs - self.ijob - 1, 0]), 4786 min([1, self.njobs - self.ijob]), 4787 self.ijob, run_type), level='parton') 4788 4789 #this is for the cluster/multicore run 4790 elif 'reweight' in exe: 4791 # a reweight run 4792 # Find the correct PDF input file 4793 input_files, output_files = [], [] 4794 pdfinput = self.get_pdf_input_filename() 4795 if os.path.exists(pdfinput): 4796 input_files.append(pdfinput) 4797 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 4798 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 4799 input_files.append(args[0]) 4800 output_files.append('%s.rwgt' % os.path.basename(args[0])) 4801 output_files.append('reweight_xsec_events.output') 4802 output_files.append('scale_pdf_dependence.dat') 4803 4804 return self.cluster.submit2(exe, args, cwd=cwd, 4805 input_files=input_files, output_files=output_files, 4806 required_output=output_files) 4807 4808 elif 'ajob' in exe: 4809 # the 'standard' amcatnlo job 4810 # check if args is a list of string 4811 if type(args[0]) == str: 4812 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd,args) 4813 #submitting 4814 self.cluster.submit2(exe, args, cwd=cwd, 4815 input_files=input_files, output_files=output_files, 4816 required_output=required_output) 4817 4818 # # keep track of folders and arguments for splitted evt gen 4819 # subfolder=output_files[-1].split('/')[0] 4820 # if len(args) == 4 and '_' in subfolder: 4821 # self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 4822 4823 elif 'shower' in exe: 4824 # a shower job 4825 # args are [shower, output(HEP or TOP), run_name] 4826 # cwd is the shower rundir, where the executable are found 4827 input_files, output_files = [], [] 4828 shower = args[0] 4829 # the input files 4830 if shower == 'PYTHIA8': 4831 input_files.append(pjoin(cwd, 'Pythia8.exe')) 4832 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 4833 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 4834 input_files.append(pjoin(cwd, 'config.sh')) 4835 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 4836 else: 4837 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 4838 else: 4839 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 4840 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 4841 if shower == 'HERWIGPP': 4842 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 4843 input_files.append(pjoin(cwd, 'Herwig++')) 4844 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 4845 input_files.append(pjoin(cwd, 'Herwig')) 4846 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 4847 if len(args) == 3: 4848 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 4849 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 4850 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 4851 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 4852 else: 4853 raise aMCatNLOError, 'Event file not present in %s' % \ 4854 pjoin(self.me_dir, 'Events', self.run_name) 4855 else: 4856 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 4857 # the output files 4858 if len(args) == 3: 4859 output_files.append('mcatnlo_run.log') 4860 else: 4861 output_files.append('mcatnlo_run_%s.log' % args[3]) 4862 if args[1] == 'HEP': 4863 if len(args) == 3: 4864 fname = 'events' 4865 else: 4866 fname = 'events_%s' % args[3] 4867 if shower in ['PYTHIA8', 'HERWIGPP']: 4868 output_files.append(fname + '.hepmc.gz') 4869 else: 4870 output_files.append(fname + '.hep.gz') 4871 elif args[1] == 'TOP' or args[1] == 'HWU': 4872 if len(args) == 3: 4873 fname = 'histfile' 4874 else: 4875 fname = 'histfile_%s' % args[3] 4876 output_files.append(fname + '.tar') 4877 else: 4878 raise aMCatNLOError, 'Not a valid output argument for shower job : %d' % args[1] 4879 #submitting 4880 self.cluster.submit2(exe, args, cwd=cwd, 4881 input_files=input_files, output_files=output_files) 4882 4883 else: 4884 return self.cluster.submit(exe, args, cwd=cwd)
4885
4886 - def getIO_ajob(self,exe,cwd, args):
4887 # use local disk if possible => need to stands what are the 4888 # input/output files 4889 4890 output_files = [] 4891 required_output = [] 4892 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 4893 pjoin(cwd, 'symfact.dat'), 4894 pjoin(cwd, 'iproc.dat'), 4895 pjoin(cwd, 'initial_states_map.dat'), 4896 pjoin(cwd, 'configs_and_props_info.dat'), 4897 pjoin(cwd, 'leshouche_info.dat'), 4898 pjoin(cwd, 'FKS_params.dat')] 4899 4900 # For GoSam interface, we must copy the SLHA card as well 4901 if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): 4902 input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) 4903 4904 if os.path.exists(pjoin(cwd,'nevents.tar')): 4905 input_files.append(pjoin(cwd,'nevents.tar')) 4906 4907 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 4908 input_files.append(pjoin(cwd, 'OLE_order.olc')) 4909 4910 # File for the loop (might not be present if MadLoop is not used) 4911 if os.path.exists(pjoin(cwd,'MadLoop5_resources.tar.gz')) and \ 4912 cluster.need_transfer(self.options): 4913 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4914 elif os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 4915 cluster.need_transfer(self.options): 4916 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 4917 dereference=True) 4918 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 4919 tf.close() 4920 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4921 4922 if args[1] == 'born' or args[1] == 'all': 4923 # MADEVENT MINT FO MODE 4924 input_files.append(pjoin(cwd, 'madevent_mintFO')) 4925 if args[2] == '0': 4926 current = '%s_G%s' % (args[1],args[0]) 4927 else: 4928 current = '%s_G%s_%s' % (args[1],args[0],args[2]) 4929 if os.path.exists(pjoin(cwd,current)): 4930 input_files.append(pjoin(cwd, current)) 4931 output_files.append(current) 4932 4933 required_output.append('%s/results.dat' % current) 4934 required_output.append('%s/res_%s.dat' % (current,args[3])) 4935 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4936 required_output.append('%s/mint_grids' % current) 4937 required_output.append('%s/grid.MC_integer' % current) 4938 if args[3] != '0': 4939 required_output.append('%s/scale_pdf_dependence.dat' % current) 4940 4941 elif args[1] == 'F' or args[1] == 'B': 4942 # MINTMC MODE 4943 input_files.append(pjoin(cwd, 'madevent_mintMC')) 4944 4945 if args[2] == '0': 4946 current = 'G%s%s' % (args[1],args[0]) 4947 else: 4948 current = 'G%s%s_%s' % (args[1],args[0],args[2]) 4949 if os.path.exists(pjoin(cwd,current)): 4950 input_files.append(pjoin(cwd, current)) 4951 output_files.append(current) 4952 if args[2] > '0': 4953 # this is for the split event generation 4954 output_files.append('G%s%s_%s' % (args[1], args[0], args[2])) 4955 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1],args[0],args[2],args[3])) 4956 4957 else: 4958 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4959 if args[3] in ['0','1']: 4960 required_output.append('%s/results.dat' % current) 4961 if args[3] == '1': 4962 output_files.append('%s/results.dat' % current) 4963 4964 else: 4965 raise aMCatNLOError, 'not valid arguments: %s' %(', '.join(args)) 4966 4967 #Find the correct PDF input file 4968 pdfinput = self.get_pdf_input_filename() 4969 if os.path.exists(pdfinput): 4970 input_files.append(pdfinput) 4971 return input_files, output_files, required_output, args
4972 4973
4974 - def compile(self, mode, options):
4975 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 4976 specified in mode""" 4977 4978 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 4979 4980 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 4981 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 4982 4983 self.get_characteristics(pjoin(self.me_dir, 4984 'SubProcesses', 'proc_characteristics')) 4985 4986 #define a bunch of log files 4987 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 4988 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 4989 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 4990 test_log = pjoin(self.me_dir, 'test.log') 4991 4992 # environmental variables to be included in make_opts 4993 self.make_opts_var = {} 4994 if self.proc_characteristics['has_loops'] and \ 4995 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 4996 self.make_opts_var['madloop'] = 'true' 4997 4998 self.update_status('Compiling the code', level=None, update_results=True) 4999 5000 libdir = pjoin(self.me_dir, 'lib') 5001 sourcedir = pjoin(self.me_dir, 'Source') 5002 5003 #clean files 5004 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 5005 #define which executable/tests to compile 5006 if '+' in mode: 5007 mode = mode.split('+')[0] 5008 if mode in ['NLO', 'LO']: 5009 exe = 'madevent_mintFO' 5010 tests = ['test_ME'] 5011 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 5012 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 5013 exe = 'madevent_mintMC' 5014 tests = ['test_ME', 'test_MC'] 5015 # write an analyse_opts with a dummy analysis so that compilation goes through 5016 with open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w') as fsock: 5017 fsock.write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 5018 5019 #directory where to compile exe 5020 p_dirs = [d for d in \ 5021 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 5022 # create param_card.inc and run_card.inc 5023 self.do_treatcards('', amcatnlo=True, mode=mode) 5024 # if --nocompile option is specified, check here that all exes exists. 5025 # If they exists, return 5026 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 5027 for p_dir in p_dirs]) and options['nocompile']: 5028 return 5029 5030 # rm links to lhapdflib/ PDFsets if exist 5031 if os.path.exists(pjoin(libdir, 'PDFsets')): 5032 files.rm(pjoin(libdir, 'PDFsets')) 5033 5034 # read the run_card to find if lhapdf is used or not 5035 if self.run_card['pdlabel'] == 'lhapdf' and \ 5036 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 5037 self.banner.get_detail('run_card', 'lpp2') != 0): 5038 5039 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 5040 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 5041 lhaid_list = self.run_card['lhaid'] 5042 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 5043 5044 else: 5045 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 5046 logger.info('Using built-in libraries for PDFs') 5047 5048 self.make_opts_var['lhapdf'] = "" 5049 5050 # read the run_card to find if applgrid is used or not 5051 if self.run_card['iappl'] != 0: 5052 self.make_opts_var['applgrid'] = 'True' 5053 # check versions of applgrid and amcfast 5054 for code in ['applgrid','amcfast']: 5055 try: 5056 p = subprocess.Popen([self.options[code], '--version'], \ 5057 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 5058 except OSError: 5059 raise aMCatNLOError(('No valid %s installation found. \n' + \ 5060 'Please set the path to %s-config by using \n' + \ 5061 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 5062 else: 5063 output, _ = p.communicate() 5064 if code is 'applgrid' and output < '1.4.63': 5065 raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 5066 +' You are using %s',output) 5067 if code is 'amcfast' and output < '1.1.1': 5068 raise aMCatNLOError('Version of aMCfast is too old. Use 1.1.1 or later.'\ 5069 +' You are using %s',output) 5070 5071 # set-up the Source/make_opts with the correct applgrid-config file 5072 appllibs=" APPLLIBS=$(shell %s --ldflags) $(shell %s --ldcflags) \n" \ 5073 % (self.options['amcfast'],self.options['applgrid']) 5074 text=open(pjoin(self.me_dir,'Source','make_opts'),'r').readlines() 5075 text_out=[] 5076 for line in text: 5077 if line.strip().startswith('APPLLIBS=$'): 5078 line=appllibs 5079 text_out.append(line) 5080 with open(pjoin(self.me_dir,'Source','make_opts'),'w') as fsock: 5081 fsock.writelines(text_out) 5082 else: 5083 self.make_opts_var['applgrid'] = "" 5084 5085 if 'fastjet' in self.options.keys() and self.options['fastjet']: 5086 self.make_opts_var['fastjet_config'] = self.options['fastjet'] 5087 5088 # add the make_opts_var to make_opts 5089 self.update_make_opts() 5090 5091 # make Source 5092 self.update_status('Compiling source...', level=None) 5093 misc.compile(['clean4pdf'], cwd = sourcedir) 5094 misc.compile(cwd = sourcedir) 5095 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 5096 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 5097 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 5098 and os.path.exists(pjoin(libdir, 'libpdf.a')): 5099 logger.info(' ...done, continuing with P* directories') 5100 else: 5101 raise aMCatNLOError('Compilation failed') 5102 5103 # make StdHep (only necessary with MG option output_dependencies='internal') 5104 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 5105 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 5106 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 5107 if os.path.exists(pjoin(sourcedir,'StdHEP')): 5108 logger.info('Compiling StdHEP (can take a couple of minutes) ...') 5109 try: 5110 misc.compile(['StdHEP'], cwd = sourcedir) 5111 except Exception as error: 5112 logger.debug(str(error)) 5113 logger.warning("StdHep failed to compiled. This forbids to run NLO+PS with PY6 and Herwig6") 5114 logger.info("details on the compilation error are available if the code is run with --debug flag") 5115 else: 5116 logger.info(' ...done.') 5117 else: 5118 logger.warning('Could not compile StdHEP because its'+\ 5119 ' source directory could not be found in the SOURCE folder.\n'+\ 5120 " Check the MG5_aMC option 'output_dependencies'.\n"+\ 5121 " This will prevent the use of HERWIG6/Pythia6 shower.") 5122 5123 5124 # make CutTools (only necessary with MG option output_dependencies='internal') 5125 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5126 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5127 if os.path.exists(pjoin(sourcedir,'CutTools')): 5128 logger.info('Compiling CutTools (can take a couple of minutes) ...') 5129 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5130 logger.info(' ...done.') 5131 else: 5132 raise aMCatNLOError('Could not compile CutTools because its'+\ 5133 ' source directory could not be found in the SOURCE folder.\n'+\ 5134 " Check the MG5_aMC option 'output_dependencies.'") 5135 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5136 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5137 raise aMCatNLOError('CutTools compilation failed.') 5138 5139 # Verify compatibility between current compiler and the one which was 5140 # used when last compiling CutTools (if specified). 5141 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5142 libdir, 'libcts.a')))),'compiler_version.log') 5143 if os.path.exists(compiler_log_path): 5144 compiler_version_used = open(compiler_log_path,'r').read() 5145 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5146 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5147 if os.path.exists(pjoin(sourcedir,'CutTools')): 5148 logger.info('CutTools was compiled with a different fortran'+\ 5149 ' compiler. Re-compiling it now...') 5150 misc.compile(['cleanCT'], cwd = sourcedir) 5151 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5152 logger.info(' ...done.') 5153 else: 5154 raise aMCatNLOError("CutTools installation in %s"\ 5155 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 5156 " seems to have been compiled with a different compiler than"+\ 5157 " the one specified in MG5_aMC. Please recompile CutTools.") 5158 5159 # make IREGI (only necessary with MG option output_dependencies='internal') 5160 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 5161 and os.path.exists(pjoin(sourcedir,'IREGI')): 5162 logger.info('Compiling IREGI (can take a couple of minutes) ...') 5163 misc.compile(['IREGI'], cwd = sourcedir) 5164 logger.info(' ...done.') 5165 5166 if os.path.exists(pjoin(libdir, 'libiregi.a')): 5167 # Verify compatibility between current compiler and the one which was 5168 # used when last compiling IREGI (if specified). 5169 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5170 libdir, 'libiregi.a')))),'compiler_version.log') 5171 if os.path.exists(compiler_log_path): 5172 compiler_version_used = open(compiler_log_path,'r').read() 5173 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5174 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5175 if os.path.exists(pjoin(sourcedir,'IREGI')): 5176 logger.info('IREGI was compiled with a different fortran'+\ 5177 ' compiler. Re-compiling it now...') 5178 misc.compile(['cleanIR'], cwd = sourcedir) 5179 misc.compile(['IREGI'], cwd = sourcedir) 5180 logger.info(' ...done.') 5181 else: 5182 raise aMCatNLOError("IREGI installation in %s"\ 5183 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 5184 " seems to have been compiled with a different compiler than"+\ 5185 " the one specified in MG5_aMC. Please recompile IREGI.") 5186 5187 # check if MadLoop virtuals have been generated 5188 if self.proc_characteristics['has_loops'] and \ 5189 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5190 if mode in ['NLO', 'aMC@NLO', 'noshower']: 5191 tests.append('check_poles') 5192 5193 # make and run tests (if asked for), gensym and make madevent in each dir 5194 self.update_status('Compiling directories...', level=None) 5195 5196 for test in tests: 5197 self.write_test_input(test) 5198 5199 try: 5200 import multiprocessing 5201 if not self.nb_core: 5202 try: 5203 self.nb_core = int(self.options['nb_core']) 5204 except TypeError: 5205 self.nb_core = multiprocessing.cpu_count() 5206 except ImportError: 5207 self.nb_core = 1 5208 5209 compile_options = copy.copy(self.options) 5210 compile_options['nb_core'] = self.nb_core 5211 compile_cluster = cluster.MultiCore(**compile_options) 5212 logger.info('Compiling on %d cores' % self.nb_core) 5213 5214 update_status = lambda i, r, f: self.donothing(i,r,f) 5215 for p_dir in p_dirs: 5216 compile_cluster.submit(prog = compile_dir, 5217 argument = [self.me_dir, p_dir, mode, options, 5218 tests, exe, self.options['run_mode']]) 5219 try: 5220 compile_cluster.wait(self.me_dir, update_status) 5221 except Exception, error: 5222 logger.warning("Fail to compile the Subprocesses") 5223 if __debug__: 5224 raise 5225 compile_cluster.remove() 5226 self.do_quit('') 5227 5228 logger.info('Checking test output:') 5229 for p_dir in p_dirs: 5230 logger.info(p_dir) 5231 for test in tests: 5232 logger.info(' Result for %s:' % test) 5233 5234 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 5235 #check that none of the tests failed 5236 self.check_tests(test, this_dir)
5237 5238
5239 - def donothing(*args):
5240 pass
5241 5242
5243 - def check_tests(self, test, dir):
5244 """just call the correct parser for the test log. 5245 Skip check_poles for LOonly folders""" 5246 if test in ['test_ME', 'test_MC']: 5247 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 5248 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 5249 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
5250 5251
5252 - def parse_test_mx_log(self, log):
5253 """read and parse the test_ME/MC.log file""" 5254 content = open(log).read() 5255 if 'FAILED' in content: 5256 logger.info('Output of the failing test:\n'+content[:-1],'$MG:BOLD') 5257 raise aMCatNLOError('Some tests failed, run cannot continue.\n' + \ 5258 'Please check that widths of final state particles (e.g. top) have been' + \ 5259 ' set to 0 in the param_card.dat.') 5260 else: 5261 lines = [l for l in content.split('\n') if 'PASSED' in l] 5262 logger.info(' Passed.') 5263 logger.debug('\n'+'\n'.join(lines))
5264 5265
5266 - def parse_check_poles_log(self, log):
5267 """reads and parse the check_poles.log file""" 5268 content = open(log).read() 5269 npass = 0 5270 nfail = 0 5271 for line in content.split('\n'): 5272 if 'PASSED' in line: 5273 npass +=1 5274 tolerance = float(line.split()[1]) 5275 if 'FAILED' in line: 5276 nfail +=1 5277 tolerance = float(line.split()[1]) 5278 5279 if nfail + npass == 0: 5280 logger.warning('0 points have been tried') 5281 return 5282 5283 if float(nfail)/float(nfail+npass) > 0.1: 5284 raise aMCatNLOError('Poles do not cancel, run cannot continue') 5285 else: 5286 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 5287 %(npass, nfail+npass, tolerance))
5288 5289
5290 - def write_test_input(self, test):
5291 """write the input files to run test_ME/MC or check_poles""" 5292 if test in ['test_ME', 'test_MC']: 5293 content = "-2 -2\n" #generate randomly energy/angle 5294 content+= "100 100\n" #run 100 points for soft and collinear tests 5295 content+= "0\n" #all FKS configs 5296 content+= '\n'.join(["-1"] * 50) #random diagram (=first diagram) 5297 elif test == 'check_poles': 5298 content = '20 \n -1\n' 5299 5300 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 5301 if test == 'test_MC': 5302 shower = self.run_card['parton_shower'] 5303 header = "1 \n %s\n 1 -0.1\n-1 -0.1\n" % shower 5304 file.write(header + content) 5305 elif test == 'test_ME': 5306 header = "2 \n" 5307 file.write(header + content) 5308 else: 5309 file.write(content) 5310 file.close()
5311 5312 5313 action_switcher = AskRunNLO 5314 ############################################################################
5315 - def ask_run_configuration(self, mode, options, switch={}):
5316 """Ask the question when launching generate_events/multi_run""" 5317 5318 if 'parton' not in options: 5319 options['parton'] = False 5320 if 'reweightonly' not in options: 5321 options['reweightonly'] = False 5322 5323 if mode == 'auto': 5324 mode = None 5325 if not mode and (options['parton'] or options['reweightonly']): 5326 mode = 'noshower' 5327 5328 passing_cmd = [] 5329 for key,value in switch.keys(): 5330 passing_cmd.append('%s=%s' % (key,value)) 5331 5332 if 'do_reweight' in options and options['do_reweight']: 5333 passing_cmd.append('reweight=ON') 5334 if 'do_madspin' in options and options['do_madspin']: 5335 passing_cmd.append('madspin=ON') 5336 5337 force = self.force 5338 if mode == 'onlyshower': 5339 passing_cmd.append('onlyshower') 5340 force = True 5341 elif mode: 5342 passing_cmd.append(mode) 5343 5344 switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, 5345 mode=mode, force=force, 5346 first_cmd=passing_cmd, 5347 return_instance=True) 5348 5349 if 'mode' in switch: 5350 mode = switch['mode'] 5351 5352 #assign the mode depending of the switch 5353 if not mode or mode == 'auto': 5354 if switch['order'] == 'LO': 5355 if switch['runshower']: 5356 mode = 'aMC@LO' 5357 elif switch['fixed_order'] == 'ON': 5358 mode = 'LO' 5359 else: 5360 mode = 'noshowerLO' 5361 elif switch['order'] == 'NLO': 5362 if switch['runshower']: 5363 mode = 'aMC@NLO' 5364 elif switch['fixed_order'] == 'ON': 5365 mode = 'NLO' 5366 else: 5367 mode = 'noshower' 5368 logger.info('will run in mode: %s' % mode) 5369 5370 if mode == 'noshower': 5371 if switch['shower'] == 'OFF': 5372 logger.warning("""You have chosen not to run a parton shower. 5373 NLO events without showering are NOT physical. 5374 Please, shower the LesHouches events before using them for physics analyses. 5375 You have to choose NOW which parton-shower you WILL use and specify it in the run_card.""") 5376 else: 5377 logger.info("""Your Parton-shower choice is not available for running. 5378 The events will be generated for the associated Parton-Shower. 5379 Remember that NLO events without showering are NOT physical.""", '$MG:BOLD') 5380 5381 5382 # specify the cards which are needed for this run. 5383 cards = ['param_card.dat', 'run_card.dat'] 5384 ignore = [] 5385 if mode in ['LO', 'NLO']: 5386 options['parton'] = True 5387 ignore = ['shower_card.dat', 'madspin_card.dat'] 5388 cards.append('FO_analyse_card.dat') 5389 else: 5390 if switch['madspin'] != 'OFF': 5391 cards.append('madspin_card.dat') 5392 if switch['reweight'] != 'OFF': 5393 cards.append('reweight_card.dat') 5394 if switch['madanalysis'] in ['HADRON', 'ON']: 5395 cards.append('madanalysis5_hadron_card.dat') 5396 if 'aMC@' in mode: 5397 cards.append('shower_card.dat') 5398 if mode == 'onlyshower': 5399 cards = ['shower_card.dat'] 5400 if options['reweightonly']: 5401 cards = ['run_card.dat'] 5402 5403 self.keep_cards(cards, ignore) 5404 5405 if mode =='onlyshower': 5406 cards = ['shower_card.dat'] 5407 5408 5409 # automatically switch to keep_wgt option 5410 first_cmd = cmd_switch.get_cardcmd() 5411 5412 if not options['force'] and not self.force: 5413 self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) 5414 5415 self.banner = banner_mod.Banner() 5416 5417 # store the cards in the banner 5418 for card in cards: 5419 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 5420 # and the run settings 5421 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 5422 self.banner.add_text('run_settings', run_settings) 5423 5424 if not mode =='onlyshower': 5425 self.run_card = self.banner.charge_card('run_card') 5426 self.run_tag = self.run_card['run_tag'] 5427 #this is if the user did not provide a name for the current run 5428 if not hasattr(self, 'run_name') or not self.run_name: 5429 self.run_name = self.find_available_run_name(self.me_dir) 5430 #add a tag in the run_name for distinguish run_type 5431 if self.run_name.startswith('run_'): 5432 if mode in ['LO','aMC@LO','noshowerLO']: 5433 self.run_name += '_LO' 5434 self.set_run_name(self.run_name, self.run_tag, 'parton') 5435 if self.run_card['ickkw'] == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 5436 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 5437 elif self.run_card['ickkw'] == 3 and mode in ['aMC@NLO', 'noshower']: 5438 logger.warning("""You are running with FxFx merging enabled. To be able to merge 5439 samples of various multiplicities without double counting, you 5440 have to remove some events after showering 'by hand'. Please 5441 read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 5442 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 5443 raise self.InvalidCmd("""FxFx merging does not work with Q-squared ordered showers.""") 5444 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8' and self.run_card['parton_shower'].upper() != 'HERWIGPP': 5445 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 5446 "Type \'n\' to stop or \'y\' to continue" 5447 answers = ['n','y'] 5448 answer = self.ask(question, 'n', answers) 5449 if answer == 'n': 5450 error = '''Stop opertation''' 5451 self.ask_run_configuration(mode, options) 5452 # raise aMCatNLOError(error) 5453 elif self.run_card['ickkw'] == -1 and mode in ['aMC@NLO', 'noshower']: 5454 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 5455 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 5456 if 'aMC@' in mode or mode == 'onlyshower': 5457 self.shower_card = self.banner.charge_card('shower_card') 5458 5459 elif mode in ['LO', 'NLO']: 5460 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 5461 self.analyse_card = self.banner.charge_card('FO_analyse_card') 5462 5463 return mode
5464
5465 5466 #=============================================================================== 5467 # aMCatNLOCmd 5468 #=============================================================================== 5469 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
5470 """The command line processor of MadGraph"""
5471 5472 _compile_usage = "compile [MODE] [options]\n" + \ 5473 "-- compiles aMC@NLO \n" + \ 5474 " MODE can be either FO, for fixed-order computations, \n" + \ 5475 " or MC for matching with parton-shower monte-carlos. \n" + \ 5476 " (if omitted, it is set to MC)\n" 5477 _compile_parser = misc.OptionParser(usage=_compile_usage) 5478 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 5479 help="Use the card present in the directory for the launch, without editing them") 5480 5481 _launch_usage = "launch [MODE] [options]\n" + \ 5482 "-- execute aMC@NLO \n" + \ 5483 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5484 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5485 " computation of the total cross section and the filling of parton-level histograms \n" + \ 5486 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 5487 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5488 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5489 " in the run_card.dat\n" 5490 5491 _launch_parser = misc.OptionParser(usage=_launch_usage) 5492 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 5493 help="Use the card present in the directory for the launch, without editing them") 5494 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 5495 help="Submit the jobs on the cluster") 5496 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 5497 help="Submit the jobs on multicore mode") 5498 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5499 help="Skip compilation. Ignored if no executable is found") 5500 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5501 help="Skip integration and event generation, just run reweight on the" + \ 5502 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5503 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 5504 help="Stop the run after the parton level file generation (you need " + \ 5505 "to shower the file in order to get physical results)") 5506 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5507 help="Skip grid set up, just generate events starting from " + \ 5508 "the last available results") 5509 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 5510 help="Provide a name to the run") 5511 _launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5512 help="For use with APPLgrid only: start from existing grids") 5513 _launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true', 5514 help="Run the reweight module (reweighting by different model parameters)") 5515 _launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true', 5516 help="Run the madspin package") 5517 5518 5519 5520 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 5521 "-- execute aMC@NLO \n" + \ 5522 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5523 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5524 " computation of the total cross section and the filling of parton-level histograms \n" + \ 5525 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 5526 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5527 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5528 " in the run_card.dat\n" 5529 5530 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 5531 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 5532 help="Use the card present in the directory for the generate_events, without editing them") 5533 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 5534 help="Submit the jobs on the cluster") 5535 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 5536 help="Submit the jobs on multicore mode") 5537 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5538 help="Skip compilation. Ignored if no executable is found") 5539 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5540 help="Skip integration and event generation, just run reweight on the" + \ 5541 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5542 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 5543 help="Stop the run after the parton level file generation (you need " + \ 5544 "to shower the file in order to get physical results)") 5545 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5546 help="Skip grid set up, just generate events starting from " + \ 5547 "the last available results") 5548 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 5549 help="Provide a name to the run") 5550 5551 5552 5553 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 5554 "-- calculate cross section up to ORDER.\n" + \ 5555 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 5556 5557 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 5558 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 5559 help="Use the card present in the directory for the launch, without editing them") 5560 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 5561 help="Submit the jobs on the cluster") 5562 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 5563 help="Submit the jobs on multicore mode") 5564 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5565 help="Skip compilation. Ignored if no executable is found") 5566 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 5567 help="Provide a name to the run") 5568 _calculate_xsect_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5569 help="For use with APPLgrid only: start from existing grids") 5570 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5571 help="Skip grid set up, just generate events starting from " + \ 5572 "the last available results") 5573 5574 _shower_usage = 'shower run_name [options]\n' + \ 5575 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 5576 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 5577 ' are directly read from the header of the event file\n' 5578 _shower_parser = misc.OptionParser(usage=_shower_usage) 5579 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 5580 help="Use the shower_card present in the directory for the launch, without editing") 5581 5582 if '__main__' == __name__: 5583 # Launch the interface without any check if one code is already running. 5584 # This can ONLY run a single command !! 5585 import sys 5586 if not sys.version_info[0] == 2 or sys.version_info[1] < 6: 5587 sys.exit('MadGraph/MadEvent 5 works only with python 2.6 or later (but not python 3.X).\n'+\ 5588 'Please upgrate your version of python.') 5589 5590 import os 5591 import optparse 5592 # Get the directory of the script real path (bin) 5593 # and add it to the current PYTHONPATH 5594 root_path = os.path.dirname(os.path.dirname(os.path.realpath( __file__ ))) 5595 sys.path.insert(0, root_path)
5596 5597 - class MyOptParser(optparse.OptionParser):
5598 - class InvalidOption(Exception): pass
5599 - def error(self, msg=''):
5600 raise MyOptParser.InvalidOption(msg)
5601 # Write out nice usage message if called with -h or --help 5602 usage = "usage: %prog [options] [FILE] " 5603 parser = MyOptParser(usage=usage) 5604 parser.add_option("-l", "--logging", default='INFO', 5605 help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]") 5606 parser.add_option("","--web", action="store_true", default=False, dest='web', \ 5607 help='force toce to be in secure mode') 5608 parser.add_option("","--debug", action="store_true", default=False, dest='debug', \ 5609 help='force to launch debug mode') 5610 parser_error = '' 5611 done = False 5612 5613 for i in range(len(sys.argv)-1): 5614 try: 5615 (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) 5616 done = True 5617 except MyOptParser.InvalidOption, error: 5618 pass 5619 else: 5620 args += sys.argv[len(sys.argv)-i:] 5621 if not done: 5622 # raise correct error: 5623 try: 5624 (options, args) = parser.parse_args() 5625 except MyOptParser.InvalidOption, error: 5626 print error 5627 sys.exit(2) 5628 5629 if len(args) == 0: 5630 args = '' 5631 5632 import subprocess 5633 import logging 5634 import logging.config 5635 # Set logging level according to the logging level given by options 5636 #logging.basicConfig(level=vars(logging)[options.logging]) 5637 import internal.coloring_logging 5638 try: 5639 if __debug__ and options.logging == 'INFO': 5640 options.logging = 'DEBUG' 5641 if options.logging.isdigit(): 5642 level = int(options.logging) 5643 else: 5644 level = eval('logging.' + options.logging) 5645 print os.path.join(root_path, 'internal', 'me5_logging.conf') 5646 logging.config.fileConfig(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5647 logging.root.setLevel(level) 5648 logging.getLogger('madgraph').setLevel(level) 5649 except: 5650 raise 5651 pass 5652 5653 # Call the cmd interface main loop 5654 try: 5655 if args: 5656 # a single command is provided 5657 if '--web' in args: 5658 i = args.index('--web') 5659 args.pop(i) 5660 cmd_line = aMCatNLOCmd(me_dir=os.path.dirname(root_path),force_run=True) 5661 else: 5662 cmd_line = aMCatNLOCmdShell(me_dir=os.path.dirname(root_path),force_run=True) 5663 5664 if not hasattr(cmd_line, 'do_%s' % args[0]): 5665 if parser_error: 5666 print parser_error 5667 print 'and %s can not be interpreted as a valid command.' % args[0] 5668 else: 5669 print 'ERROR: %s not a valid command. Please retry' % args[0] 5670 else: 5671 cmd_line.use_rawinput = False 5672 cmd_line.run_cmd(' '.join(args)) 5673 cmd_line.run_cmd('quit') 5674 5675 except KeyboardInterrupt: 5676 print 'quit on KeyboardInterrupt' 5677 pass 5678