Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  from __future__ import absolute_import 
  21  from __future__ import print_function 
  22  import atexit 
  23  import glob 
  24  import logging 
  25  import math 
  26  import optparse 
  27  import os 
  28  import pydoc 
  29  import random 
  30  import re 
  31  import shutil 
  32  import subprocess 
  33  import sys 
  34  import traceback 
  35  import time 
  36  import signal 
  37  import tarfile 
  38  import copy 
  39  import datetime 
  40  import tarfile 
  41  import traceback 
  42  import six 
  43  StringIO = six 
  44  from six.moves import range 
  45  from six.moves import zip 
  46  try: 
  47      import cpickle as pickle 
  48  except: 
  49      import pickle 
  50   
  51  try: 
  52      import readline 
  53      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  54  except: 
  55      GNU_SPLITTING = True 
  56   
  57  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  58  root_path = os.path.split(root_path)[0] 
  59  sys.path.insert(0, os.path.join(root_path,'bin')) 
  60   
  61  # usefull shortcut 
  62  pjoin = os.path.join 
  63  # Special logger for the Cmd Interface 
  64  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  65  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  66    
  67  try: 
  68      import madgraph 
  69  except ImportError:  
  70      aMCatNLO = True  
  71      import internal.extended_cmd as cmd 
  72      import internal.common_run_interface as common_run 
  73      import internal.banner as banner_mod 
  74      import internal.misc as misc     
  75      from internal import InvalidCmd, MadGraph5Error 
  76      import internal.files as files 
  77      import internal.cluster as cluster 
  78      import internal.save_load_object as save_load_object 
  79      import internal.gen_crossxhtml as gen_crossxhtml 
  80      import internal.sum_html as sum_html 
  81      import internal.shower_card as shower_card 
  82      import internal.FO_analyse_card as analyse_card  
  83      import internal.lhe_parser as lhe_parser 
  84  else: 
  85      # import from madgraph directory 
  86      aMCatNLO = False 
  87      import madgraph.interface.extended_cmd as cmd 
  88      import madgraph.interface.common_run_interface as common_run 
  89      import madgraph.iolibs.files as files 
  90      import madgraph.iolibs.save_load_object as save_load_object 
  91      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  92      import madgraph.madevent.sum_html as sum_html 
  93      import madgraph.various.banner as banner_mod 
  94      import madgraph.various.cluster as cluster 
  95      import madgraph.various.misc as misc 
  96      import madgraph.various.shower_card as shower_card 
  97      import madgraph.various.FO_analyse_card as analyse_card 
  98      import madgraph.various.lhe_parser as lhe_parser 
  99      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error,MG5DIR 
100 101 -class aMCatNLOError(Exception):
102 pass
103
104 105 -def compile_dir(*arguments):
106 """compile the direcory p_dir 107 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 108 this function needs not to be a class method in order to do 109 the compilation on multicore""" 110 111 if len(arguments) == 1: 112 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 113 elif len(arguments)==7: 114 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 115 else: 116 raise aMCatNLOError('not correct number of argument') 117 logger.info(' Compiling %s...' % p_dir) 118 119 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 120 121 try: 122 #compile everything 123 # compile and run tests 124 for test in tests: 125 # skip check_poles for LOonly dirs 126 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 127 continue 128 if test == 'test_ME' or test == 'test_MC': 129 test_exe='test_soft_col_limits' 130 else: 131 test_exe=test 132 misc.compile([test_exe], cwd = this_dir, job_specs = False) 133 input = pjoin(me_dir, '%s_input.txt' % test) 134 #this can be improved/better written to handle the output 135 misc.call(['./%s' % (test_exe)], cwd=this_dir, 136 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w'), 137 close_fds=True) 138 if test == 'check_poles' and os.path.exists(pjoin(this_dir,'MadLoop5_resources')) : 139 tf=tarfile.open(pjoin(this_dir,'MadLoop5_resources.tar.gz'),'w:gz', 140 dereference=True) 141 tf.add(pjoin(this_dir,'MadLoop5_resources'),arcname='MadLoop5_resources') 142 tf.close() 143 144 if not options['reweightonly']: 145 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 146 misc.call(['./gensym'],cwd= this_dir, 147 stdout=open(pjoin(this_dir, 'gensym.log'), 'w'), 148 close_fds=True) 149 #compile madevent_mintMC/mintFO 150 misc.compile([exe], cwd=this_dir, job_specs = False) 151 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 152 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 153 154 logger.info(' %s done.' % p_dir) 155 return 0 156 except MadGraph5Error as msg: 157 return msg
158
159 160 -def check_compiler(options, block=False):
161 """check that the current fortran compiler is gfortran 4.6 or later. 162 If block, stops the execution, otherwise just print a warning""" 163 164 msg = 'In order to be able to run at NLO MadGraph5_aMC@NLO, you need to have ' + \ 165 'gfortran 4.6 or later installed.\n%s has been detected\n'+\ 166 'Note that You can still run all MadEvent run without any problem!' 167 #first check that gfortran is installed 168 if options['fortran_compiler']: 169 compiler = options['fortran_compiler'] 170 elif misc.which('gfortran'): 171 compiler = 'gfortran' 172 else: 173 compiler = '' 174 175 if 'gfortran' not in compiler: 176 if block: 177 raise aMCatNLOError(msg % compiler) 178 else: 179 logger.warning(msg % compiler) 180 else: 181 curr_version = misc.get_gfortran_version(compiler) 182 curr_version = curr_version.split('.') 183 if len(curr_version) == 1: 184 curr_version.append(0) 185 186 if int(curr_version[0]) < 5: 187 if int(curr_version[0]) == 4 and int(curr_version[1]) > 5: 188 return 189 if block: 190 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 191 else: 192 logger.warning(msg % (compiler + ' ' + curr_version))
193
194 195 196 #=============================================================================== 197 # CmdExtended 198 #=============================================================================== 199 -class CmdExtended(common_run.CommonRunCmd):
200 """Particularisation of the cmd command for aMCatNLO""" 201 202 #suggested list of command 203 next_possibility = { 204 'start': [], 205 } 206 207 debug_output = 'ME5_debug' 208 error_debug = 'Please report this bug on https://bugs.launchpad.net/mg5amcnlo\n' 209 error_debug += 'More information is found in \'%(debug)s\'.\n' 210 error_debug += 'Please attach this file to your report.' 211 212 config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/mg5amcnlo\n' 213 214 215 keyboard_stop_msg = """stopping all operation 216 in order to quit MadGraph5_aMC@NLO please enter exit""" 217 218 # Define the Error 219 InvalidCmd = InvalidCmd 220 ConfigurationError = aMCatNLOError 221
222 - def __init__(self, me_dir, options, *arg, **opt):
223 """Init history and line continuation""" 224 225 # Tag allowing/forbiding question 226 self.force = False 227 228 # If possible, build an info line with current version number 229 # and date, from the VERSION text file 230 info = misc.get_pkg_info() 231 info_line = "" 232 if info and 'version' in info and 'date' in info: 233 len_version = len(info['version']) 234 len_date = len(info['date']) 235 if len_version + len_date < 30: 236 info_line = "#* VERSION %s %s %s *\n" % \ 237 (info['version'], 238 (30 - len_version - len_date) * ' ', 239 info['date']) 240 else: 241 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 242 info_line = "#* VERSION %s %s *\n" % \ 243 (version, (24 - len(version)) * ' ') 244 245 # Create a header for the history file. 246 # Remember to fill in time at writeout time! 247 self.history_header = \ 248 '#************************************************************\n' + \ 249 '#* MadGraph5_aMC@NLO *\n' + \ 250 '#* *\n' + \ 251 "#* * * *\n" + \ 252 "#* * * * * *\n" + \ 253 "#* * * * * 5 * * * * *\n" + \ 254 "#* * * * * *\n" + \ 255 "#* * * *\n" + \ 256 "#* *\n" + \ 257 "#* *\n" + \ 258 info_line + \ 259 "#* *\n" + \ 260 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 261 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 262 "#* and *\n" + \ 263 "#* http://amcatnlo.cern.ch *\n" + \ 264 '#* *\n' + \ 265 '#************************************************************\n' + \ 266 '#* *\n' + \ 267 '#* Command File for aMCatNLO *\n' + \ 268 '#* *\n' + \ 269 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 270 '#* *\n' + \ 271 '#************************************************************\n' 272 273 if info_line: 274 info_line = info_line[1:] 275 276 logger.info(\ 277 "************************************************************\n" + \ 278 "* *\n" + \ 279 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 280 "* a M C @ N L O *\n" + \ 281 "* *\n" + \ 282 "* * * *\n" + \ 283 "* * * * * *\n" + \ 284 "* * * * * 5 * * * * *\n" + \ 285 "* * * * * *\n" + \ 286 "* * * *\n" + \ 287 "* *\n" + \ 288 info_line + \ 289 "* *\n" + \ 290 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 291 "* http://amcatnlo.cern.ch *\n" + \ 292 "* *\n" + \ 293 "* Type 'help' for in-line help. *\n" + \ 294 "* *\n" + \ 295 "************************************************************") 296 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
297 298
299 - def get_history_header(self):
300 """return the history header""" 301 return self.history_header % misc.get_time_info()
302
303 - def stop_on_keyboard_stop(self):
304 """action to perform to close nicely on a keyboard interupt""" 305 try: 306 if hasattr(self, 'cluster'): 307 logger.info('rm jobs on queue') 308 self.cluster.remove() 309 if hasattr(self, 'results'): 310 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 311 self.add_error_log_in_html(KeyboardInterrupt) 312 except: 313 pass
314
315 - def postcmd(self, stop, line):
316 """ Update the status of the run for finishing interactive command """ 317 318 # relaxing the tag forbidding question 319 self.force = False 320 321 if not self.use_rawinput: 322 return stop 323 324 325 arg = line.split() 326 if len(arg) == 0: 327 return stop 328 elif str(arg[0]) in ['exit','quit','EOF']: 329 return stop 330 331 try: 332 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 333 level=None, error=True) 334 except Exception: 335 misc.sprint('self.update_status fails', log=logger) 336 pass
337
338 - def nice_user_error(self, error, line):
339 """If a ME run is currently running add a link in the html output""" 340 341 self.add_error_log_in_html() 342 cmd.Cmd.nice_user_error(self, error, line)
343
344 - def nice_config_error(self, error, line):
345 """If a ME run is currently running add a link in the html output""" 346 347 self.add_error_log_in_html() 348 cmd.Cmd.nice_config_error(self, error, line)
349
350 - def nice_error_handling(self, error, line):
351 """If a ME run is currently running add a link in the html output""" 352 353 self.add_error_log_in_html() 354 cmd.Cmd.nice_error_handling(self, error, line)
355
356 357 358 #=============================================================================== 359 # HelpToCmd 360 #=============================================================================== 361 -class HelpToCmd(object):
362 """ The Series of help routine for the aMCatNLOCmd""" 363
364 - def help_launch(self):
365 """help for launch command""" 366 _launch_parser.print_help()
367
368 - def help_banner_run(self):
369 logger.info("syntax: banner_run Path|RUN [--run_options]") 370 logger.info("-- Reproduce a run following a given banner") 371 logger.info(" One of the following argument is require:") 372 logger.info(" Path should be the path of a valid banner.") 373 logger.info(" RUN should be the name of a run of the current directory") 374 self.run_options_help([('-f','answer all question by default'), 375 ('--name=X', 'Define the name associated with the new run')])
376 377
378 - def help_compile(self):
379 """help for compile command""" 380 _compile_parser.print_help()
381
382 - def help_generate_events(self):
383 """help for generate_events commandi 384 just call help_launch""" 385 _generate_events_parser.print_help()
386 387
388 - def help_calculate_xsect(self):
389 """help for generate_events command""" 390 _calculate_xsect_parser.print_help()
391
392 - def help_shower(self):
393 """help for shower command""" 394 _shower_parser.print_help()
395 396
397 - def help_open(self):
398 logger.info("syntax: open FILE ") 399 logger.info("-- open a file with the appropriate editor.") 400 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 401 logger.info(' the path to the last created/used directory is used')
402
403 - def run_options_help(self, data):
404 if data: 405 logger.info('-- local options:') 406 for name, info in data: 407 logger.info(' %s : %s' % (name, info)) 408 409 logger.info("-- session options:") 410 logger.info(" Note that those options will be kept for the current session") 411 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 412 logger.info(" --multicore : Run in multi-core configuration") 413 logger.info(" --nb_core=X : limit the number of core to use to X.")
414
415 416 417 418 #=============================================================================== 419 # CheckValidForCmd 420 #=============================================================================== 421 -class CheckValidForCmd(object):
422 """ The Series of check routine for the aMCatNLOCmd""" 423
424 - def check_shower(self, args, options):
425 """Check the validity of the line. args[0] is the run_directory""" 426 427 if options['force']: 428 self.force = True 429 430 if len(args) == 0: 431 self.help_shower() 432 raise self.InvalidCmd('Invalid syntax, please specify the run name') 433 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 434 raise self.InvalidCmd('Directory %s does not exists' % \ 435 pjoin(os.getcwd(), 'Events', args[0])) 436 437 self.set_run_name(args[0], level= 'shower') 438 args[0] = pjoin(self.me_dir, 'Events', args[0])
439
440 - def check_plot(self, args):
441 """Check the argument for the plot command 442 plot run_name modes""" 443 444 445 madir = self.options['madanalysis_path'] 446 td = self.options['td_path'] 447 448 if not madir or not td: 449 logger.info('Retry to read configuration file to find madanalysis/td') 450 self.set_configuration() 451 452 madir = self.options['madanalysis_path'] 453 td = self.options['td_path'] 454 455 if not madir: 456 error_msg = 'No Madanalysis path correctly set.' 457 error_msg += 'Please use the set command to define the path and retry.' 458 error_msg += 'You can also define it in the configuration file.' 459 raise self.InvalidCmd(error_msg) 460 if not td: 461 error_msg = 'No path to td directory correctly set.' 462 error_msg += 'Please use the set command to define the path and retry.' 463 error_msg += 'You can also define it in the configuration file.' 464 raise self.InvalidCmd(error_msg) 465 466 if len(args) == 0: 467 if not hasattr(self, 'run_name') or not self.run_name: 468 self.help_plot() 469 raise self.InvalidCmd('No run name currently define. Please add this information.') 470 args.append('all') 471 return 472 473 474 if args[0] not in self._plot_mode: 475 self.set_run_name(args[0], level='plot') 476 del args[0] 477 if len(args) == 0: 478 args.append('all') 479 elif not self.run_name: 480 self.help_plot() 481 raise self.InvalidCmd('No run name currently define. Please add this information.') 482 483 for arg in args: 484 if arg not in self._plot_mode and arg != self.run_name: 485 self.help_plot() 486 raise self.InvalidCmd('unknown options %s' % arg)
487
488 - def check_pgs(self, arg):
489 """Check the argument for pythia command 490 syntax: pgs [NAME] 491 Note that other option are already remove at this point 492 """ 493 494 # If not pythia-pgs path 495 if not self.options['pythia-pgs_path']: 496 logger.info('Retry to read configuration file to find pythia-pgs path') 497 self.set_configuration() 498 499 if not self.options['pythia-pgs_path'] or not \ 500 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 501 error_msg = 'No pythia-pgs path correctly set.' 502 error_msg += 'Please use the set command to define the path and retry.' 503 error_msg += 'You can also define it in the configuration file.' 504 raise self.InvalidCmd(error_msg) 505 506 tag = [a for a in arg if a.startswith('--tag=')] 507 if tag: 508 arg.remove(tag[0]) 509 tag = tag[0][6:] 510 511 512 if len(arg) == 0 and not self.run_name: 513 if self.results.lastrun: 514 arg.insert(0, self.results.lastrun) 515 else: 516 raise self.InvalidCmd('No run name currently define. Please add this information.') 517 518 if len(arg) == 1 and self.run_name == arg[0]: 519 arg.pop(0) 520 521 if not len(arg) and \ 522 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 523 self.help_pgs() 524 raise self.InvalidCmd('''No file file pythia_events.hep currently available 525 Please specify a valid run_name''') 526 527 lock = None 528 if len(arg) == 1: 529 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 530 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 531 532 if not filenames: 533 raise self.InvalidCmd('No events file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 534 else: 535 input_file = filenames[0] 536 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 537 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 538 argument=['-c', input_file], 539 close_fds=True) 540 else: 541 if tag: 542 self.run_card['run_tag'] = tag 543 self.set_run_name(self.run_name, tag, 'pgs') 544 545 return lock
546 547
548 - def check_delphes(self, arg):
549 """Check the argument for pythia command 550 syntax: delphes [NAME] 551 Note that other option are already remove at this point 552 """ 553 554 # If not pythia-pgs path 555 if not self.options['delphes_path']: 556 logger.info('Retry to read configuration file to find delphes path') 557 self.set_configuration() 558 559 if not self.options['delphes_path']: 560 error_msg = 'No delphes path correctly set.' 561 error_msg += 'Please use the set command to define the path and retry.' 562 error_msg += 'You can also define it in the configuration file.' 563 raise self.InvalidCmd(error_msg) 564 565 tag = [a for a in arg if a.startswith('--tag=')] 566 if tag: 567 arg.remove(tag[0]) 568 tag = tag[0][6:] 569 570 571 if len(arg) == 0 and not self.run_name: 572 if self.results.lastrun: 573 arg.insert(0, self.results.lastrun) 574 else: 575 raise self.InvalidCmd('No run name currently define. Please add this information.') 576 577 if len(arg) == 1 and self.run_name == arg[0]: 578 arg.pop(0) 579 580 if not len(arg) and \ 581 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 582 self.help_pgs() 583 raise self.InvalidCmd('''No file file pythia_events.hep currently available 584 Please specify a valid run_name''') 585 586 if len(arg) == 1: 587 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 588 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events')) 589 590 591 if not filenames: 592 raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\ 593 % (self.run_name, prev_tag, 594 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 595 else: 596 input_file = filenames[0] 597 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 598 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 599 argument=['-c', input_file], 600 close_fds=True) 601 else: 602 if tag: 603 self.run_card['run_tag'] = tag 604 self.set_run_name(self.run_name, tag, 'delphes')
605
606 - def check_calculate_xsect(self, args, options):
607 """check the validity of the line. args is ORDER, 608 ORDER being LO or NLO. If no mode is passed, NLO is used""" 609 # modify args in order to be DIR 610 # mode being either standalone or madevent 611 612 if options['force']: 613 self.force = True 614 615 if not args: 616 args.append('NLO') 617 return 618 619 if len(args) > 1: 620 self.help_calculate_xsect() 621 raise self.InvalidCmd('Invalid Syntax: Too many argument') 622 623 elif len(args) == 1: 624 if not args[0] in ['NLO', 'LO']: 625 raise self.InvalidCmd('%s is not a valid mode, please use "LO" or "NLO"' % args[1]) 626 mode = args[0] 627 628 # check for incompatible options/modes 629 if options['multicore'] and options['cluster']: 630 raise self.InvalidCmd('options -m (--multicore) and -c (--cluster)' + \ 631 ' are not compatible. Please choose one.')
632 633
634 - def check_generate_events(self, args, options):
635 """check the validity of the line. args is ORDER, 636 ORDER being LO or NLO. If no mode is passed, NLO is used""" 637 # modify args in order to be DIR 638 # mode being either standalone or madevent 639 640 if not args: 641 args.append('NLO') 642 return 643 644 if len(args) > 1: 645 self.help_generate_events() 646 raise self.InvalidCmd('Invalid Syntax: Too many argument') 647 648 elif len(args) == 1: 649 if not args[0] in ['NLO', 'LO']: 650 raise self.InvalidCmd('%s is not a valid mode, please use "LO" or "NLO"' % args[1]) 651 mode = args[0] 652 653 # check for incompatible options/modes 654 if options['multicore'] and options['cluster']: 655 raise self.InvalidCmd('options -m (--multicore) and -c (--cluster)' + \ 656 ' are not compatible. Please choose one.')
657
658 - def check_banner_run(self, args):
659 """check the validity of line""" 660 661 if len(args) == 0: 662 self.help_banner_run() 663 raise self.InvalidCmd('banner_run requires at least one argument.') 664 665 tag = [a[6:] for a in args if a.startswith('--tag=')] 666 667 668 if os.path.exists(args[0]): 669 type ='banner' 670 format = self.detect_card_type(args[0]) 671 if format != 'banner': 672 raise self.InvalidCmd('The file is not a valid banner.') 673 elif tag: 674 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 675 (args[0], tag)) 676 if not os.path.exists(args[0]): 677 raise self.InvalidCmd('No banner associates to this name and tag.') 678 else: 679 name = args[0] 680 type = 'run' 681 banners = misc.glob('*_banner.txt', pjoin(self.me_dir,'Events', args[0])) 682 if not banners: 683 raise self.InvalidCmd('No banner associates to this name.') 684 elif len(banners) == 1: 685 args[0] = banners[0] 686 else: 687 #list the tag and propose those to the user 688 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 689 tag = self.ask('which tag do you want to use?', tags[0], tags) 690 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 691 (args[0], tag)) 692 693 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 694 if run_name: 695 try: 696 self.exec_cmd('remove %s all banner -f' % run_name) 697 except Exception: 698 pass 699 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 700 elif type == 'banner': 701 self.set_run_name(self.find_available_run_name(self.me_dir)) 702 elif type == 'run': 703 if not self.results[name].is_empty(): 704 run_name = self.find_available_run_name(self.me_dir) 705 logger.info('Run %s is not empty so will use run_name: %s' % \ 706 (name, run_name)) 707 self.set_run_name(run_name) 708 else: 709 try: 710 self.exec_cmd('remove %s all banner -f' % run_name) 711 except Exception: 712 pass 713 self.set_run_name(name)
714 715 716
717 - def check_launch(self, args, options):
718 """check the validity of the line. args is MODE 719 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 720 # modify args in order to be DIR 721 # mode being either standalone or madevent 722 723 if options['force']: 724 self.force = True 725 726 727 if not args: 728 args.append('auto') 729 return 730 731 if len(args) > 1: 732 self.help_launch() 733 raise self.InvalidCmd('Invalid Syntax: Too many argument') 734 735 elif len(args) == 1: 736 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 737 raise self.InvalidCmd('%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0]) 738 mode = args[0] 739 740 # check for incompatible options/modes 741 if options['multicore'] and options['cluster']: 742 raise self.InvalidCmd('options -m (--multicore) and -c (--cluster)' + \ 743 ' are not compatible. Please choose one.') 744 if mode == 'NLO' and options['reweightonly']: 745 raise self.InvalidCmd('option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"')
746 747
748 - def check_compile(self, args, options):
749 """check the validity of the line. args is MODE 750 MODE being FO or MC. If no mode is passed, MC is used""" 751 # modify args in order to be DIR 752 # mode being either standalone or madevent 753 754 if options['force']: 755 self.force = True 756 757 if not args: 758 args.append('MC') 759 return 760 761 if len(args) > 1: 762 self.help_compile() 763 raise self.InvalidCmd('Invalid Syntax: Too many argument') 764 765 elif len(args) == 1: 766 if not args[0] in ['MC', 'FO']: 767 raise self.InvalidCmd('%s is not a valid mode, please use "FO" or "MC"' % args[0]) 768 mode = args[0]
769
770 # check for incompatible options/modes 771 772 773 #=============================================================================== 774 # CompleteForCmd 775 #=============================================================================== 776 -class CompleteForCmd(CheckValidForCmd):
777 """ The Series of help routine for the MadGraphCmd""" 778
779 - def complete_launch(self, text, line, begidx, endidx):
780 """auto-completion for launch command""" 781 782 args = self.split_arg(line[0:begidx]) 783 if len(args) == 1: 784 #return mode 785 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 786 elif len(args) == 2 and line[begidx-1] == '@': 787 return self.list_completion(text,['LO','NLO'],line) 788 else: 789 opts = [] 790 for opt in _launch_parser.option_list: 791 opts += opt._long_opts + opt._short_opts 792 return self.list_completion(text, opts, line)
793
794 - def complete_banner_run(self, text, line, begidx, endidx, formatting=True):
795 "Complete the banner run command" 796 try: 797 798 799 args = self.split_arg(line[0:begidx], error=False) 800 801 if args[-1].endswith(os.path.sep): 802 return self.path_completion(text, 803 os.path.join('.',*[a for a in args \ 804 if a.endswith(os.path.sep)])) 805 806 807 if len(args) > 1: 808 # only options are possible 809 tags = misc.glob('%s_*_banner.txt' % args[1],pjoin(self.me_dir, 'Events' , args[1])) 810 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 811 812 if args[-1] != '--tag=': 813 tags = ['--tag=%s' % t for t in tags] 814 else: 815 return self.list_completion(text, tags) 816 return self.list_completion(text, tags +['--name=','-f'], line) 817 818 # First argument 819 possibilites = {} 820 821 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 822 if a.endswith(os.path.sep)])) 823 if os.path.sep in line: 824 return comp 825 else: 826 possibilites['Path from ./'] = comp 827 828 run_list = misc.glob(pjoin('*','*_banner.txt'), pjoin(self.me_dir, 'Events')) 829 run_list = [n.rsplit('/',2)[1] for n in run_list] 830 possibilites['RUN Name'] = self.list_completion(text, run_list) 831 832 return self.deal_multiple_categories(possibilites, formatting) 833 834 835 except Exception as error: 836 print(error)
837 838
839 - def complete_compile(self, text, line, begidx, endidx):
840 """auto-completion for launch command""" 841 842 args = self.split_arg(line[0:begidx]) 843 if len(args) == 1: 844 #return mode 845 return self.list_completion(text,['FO','MC'],line) 846 else: 847 opts = [] 848 for opt in _compile_parser.option_list: 849 opts += opt._long_opts + opt._short_opts 850 return self.list_completion(text, opts, line)
851
852 - def complete_calculate_xsect(self, text, line, begidx, endidx):
853 """auto-completion for launch command""" 854 855 args = self.split_arg(line[0:begidx]) 856 if len(args) == 1: 857 #return mode 858 return self.list_completion(text,['LO','NLO'],line) 859 else: 860 opts = [] 861 for opt in _calculate_xsect_parser.option_list: 862 opts += opt._long_opts + opt._short_opts 863 return self.list_completion(text, opts, line)
864
865 - def complete_generate_events(self, text, line, begidx, endidx):
866 """auto-completion for generate_events command 867 call the compeltion for launch""" 868 self.complete_launch(text, line, begidx, endidx)
869 870
871 - def complete_shower(self, text, line, begidx, endidx):
872 args = self.split_arg(line[0:begidx]) 873 if len(args) == 1: 874 #return valid run_name 875 data = misc.glob(pjoin('*','events.lhe.gz', pjoin(self.me_dir, 'Events'))) 876 data = [n.rsplit('/',2)[1] for n in data] 877 tmp1 = self.list_completion(text, data) 878 if not self.run_name: 879 return tmp1
880
881 - def complete_plot(self, text, line, begidx, endidx):
882 """ Complete the plot command """ 883 884 args = self.split_arg(line[0:begidx], error=False) 885 886 if len(args) == 1: 887 #return valid run_name 888 data = misc.glob(pjoin('*','events.lhe*', pjoin(self.me_dir, 'Events'))) 889 data = [n.rsplit('/',2)[1] for n in data] 890 tmp1 = self.list_completion(text, data) 891 if not self.run_name: 892 return tmp1 893 894 if len(args) > 1: 895 return self.list_completion(text, self._plot_mode)
896
897 - def complete_pgs(self,text, line, begidx, endidx):
898 "Complete the pgs command" 899 args = self.split_arg(line[0:begidx], error=False) 900 if len(args) == 1: 901 #return valid run_name 902 data = misc.glob(pjoin('*', 'events_*.hep.gz'), 903 pjoin(self.me_dir, 'Events')) 904 data = [n.rsplit('/',2)[1] for n in data] 905 tmp1 = self.list_completion(text, data) 906 if not self.run_name: 907 return tmp1 908 else: 909 tmp2 = self.list_completion(text, self._run_options + ['-f', 910 '--tag=' ,'--no_default'], line) 911 return tmp1 + tmp2 912 else: 913 return self.list_completion(text, self._run_options + ['-f', 914 '--tag=','--no_default'], line)
915 916 complete_delphes = complete_pgs
917
918 -class aMCatNLOAlreadyRunning(InvalidCmd):
919 pass
920
921 -class AskRunNLO(cmd.ControlSwitch):
922 923 to_control = [('order', 'Type of perturbative computation'), 924 ('fixed_order', 'No MC@[N]LO matching / event generation'), 925 ('shower', 'Shower the generated events'), 926 ('madspin', 'Decay onshell particles'), 927 ('reweight', 'Add weights to events for new hypp.'), 928 ('madanalysis','Run MadAnalysis5 on the events generated')] 929 930 quit_on = cmd.ControlSwitch.quit_on + ['onlyshower'] 931
932 - def __init__(self, question, line_args=[], mode=None, force=False, 933 *args, **opt):
934 935 self.me_dir = opt['mother_interface'].me_dir 936 self.check_available_module(opt['mother_interface'].options) 937 self.last_mode = opt['mother_interface'].last_mode 938 self.proc_characteristics = opt['mother_interface'].proc_characteristics 939 self.run_card = banner_mod.RunCard(pjoin(self.me_dir,'Cards', 'run_card.dat'), 940 consistency='warning') 941 super(AskRunNLO,self).__init__(self.to_control, opt['mother_interface'], 942 *args, **opt)
943 944 @property
945 - def answer(self):
946 947 out = super(AskRunNLO, self).answer 948 if out['shower'] == 'HERWIG7': 949 out['shower'] = 'HERWIGPP' 950 951 if out['shower'] not in self.get_allowed('shower') or out['shower'] =='OFF': 952 out['runshower'] = False 953 else: 954 out['runshower'] = True 955 return out
956 957
958 - def check_available_module(self, options):
959 960 self.available_module = set() 961 if options['madanalysis5_path']: 962 self.available_module.add('MA5') 963 if not aMCatNLO or ('mg5_path' in options and options['mg5_path']): 964 965 self.available_module.add('MadSpin') 966 if misc.has_f2py() or options['f2py_compiler']: 967 self.available_module.add('reweight') 968 if options['pythia8_path']: 969 self.available_module.add('PY8') 970 if options['hwpp_path'] and options['thepeg_path'] and options['hepmc_path']: 971 self.available_module.add('HW7') 972 973 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 974 if os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))): 975 self.available_module.add('StdHEP')
976 # 977 # shorcut 978 #
979 - def ans_lo(self, value):
980 """ function called if the user type lo=value. or lo (then value is None)""" 981 982 if value is None: 983 self.switch['order'] = 'LO' 984 self.switch['fixed_order'] = 'ON' 985 self.set_switch('shower', 'OFF') 986 else: 987 logger.warning('Invalid command: lo=%s' % value)
988
989 - def ans_nlo(self, value):
990 if value is None: 991 self.switch['order'] = 'NLO' 992 self.switch['fixed_order'] = 'ON' 993 self.set_switch('shower', 'OFF') 994 else: 995 logger.warning('Invalid command: nlo=%s' % value)
996
997 - def ans_amc__at__nlo(self, value):
998 if value is None: 999 self.switch['order'] = 'NLO' 1000 self.switch['fixed_order'] = 'OFF' 1001 self.set_switch('shower', 'ON') 1002 else: 1003 logger.warning('Invalid command: aMC@NLO=%s' % value)
1004
1005 - def ans_amc__at__lo(self, value):
1006 if value is None: 1007 self.switch['order'] = 'LO' 1008 self.switch['fixed_order'] = 'OFF' 1009 self.set_switch('shower', 'ON') 1010 else: 1011 logger.warning('Invalid command: aMC@LO=%s' % value)
1012
1013 - def ans_noshower(self, value):
1014 if value is None: 1015 self.switch['order'] = 'NLO' 1016 self.switch['fixed_order'] = 'OFF' 1017 self.set_switch('shower', 'OFF') 1018 else: 1019 logger.warning('Invalid command: noshower=%s' % value)
1020
1021 - def ans_onlyshower(self, value):
1022 if value is None: 1023 self.switch['mode'] = 'onlyshower' 1024 self.switch['madspin'] = 'OFF' 1025 self.switch['reweight'] = 'OFF' 1026 else: 1027 logger.warning('Invalid command: onlyshower=%s' % value)
1028
1029 - def ans_noshowerlo(self, value):
1030 if value is None: 1031 self.switch['order'] = 'LO' 1032 self.switch['fixed_order'] = 'OFF' 1033 self.set_switch('shower', 'OFF') 1034 else: 1035 logger.warning('Invalid command: noshowerlo=%s' % value)
1036
1037 - def ans_madanalysis5(self, value):
1038 """ shortcut madanalysis5 -> madanalysis """ 1039 1040 if value is None: 1041 return self.onecmd('madanalysis') 1042 else: 1043 self.set_switch('madanalysis', value)
1044 # 1045 # ORDER 1046 #
1047 - def get_allowed_order(self):
1048 return ["LO", "NLO"]
1049
1050 - def set_default_order(self):
1051 1052 if self.last_mode in ['LO', 'aMC@L0', 'noshowerLO']: 1053 self.switch['order'] = 'LO' 1054 self.switch['order'] = 'NLO'
1055
1056 - def set_switch_off_order(self):
1057 return
1058 # 1059 # Fix order 1060 #
1061 - def get_allowed_fixed_order(self):
1062 """ """ 1063 if self.proc_characteristics['ninitial'] == 1: 1064 return ['ON'] 1065 else: 1066 return ['ON', 'OFF']
1067
1068 - def set_default_fixed_order(self):
1069 1070 if self.last_mode in ['LO', 'NLO']: 1071 self.switch['fixed_order'] = 'ON' 1072 if self.proc_characteristics['ninitial'] == 1: 1073 self.switch['fixed_order'] = 'ON' 1074 else: 1075 self.switch['fixed_order'] = 'OFF' 1076
1077 - def color_for_fixed_order(self, switch_value):
1078 1079 if switch_value in ['OFF']: 1080 return self.green % switch_value 1081 else: 1082 return self.red % switch_value
1083
1084 - def color_for_shower(self, switch_value):
1085 1086 if switch_value in ['ON']: 1087 return self.green % switch_value 1088 elif switch_value in self.get_allowed('shower'): 1089 return self.green % switch_value 1090 else: 1091 return self.red % switch_value
1092
1093 - def consistency_fixed_order_shower(self, vfix, vshower):
1094 """ consistency_XX_YY(val_XX, val_YY) 1095 -> XX is the new key set by the user to a new value val_XX 1096 -> YY is another key set by the user. 1097 -> return value should be None or "replace_YY" 1098 """ 1099 1100 if vfix == 'ON' and vshower != 'OFF' : 1101 return 'OFF' 1102 return None
1103 1104 consistency_fixed_order_madspin = consistency_fixed_order_shower 1105 consistency_fixed_order_reweight = consistency_fixed_order_shower 1106
1107 - def consistency_fixed_order_madanalysis(self, vfix, vma5):
1108 1109 if vfix == 'ON' and vma5 == 'ON' : 1110 return 'OFF' 1111 return None
1112 1113
1114 - def consistency_shower_fixed_order(self, vshower, vfix):
1115 """ consistency_XX_YY(val_XX, val_YY) 1116 -> XX is the new key set by the user to a new value val_XX 1117 -> YY is another key set by the user. 1118 -> return value should be None or "replace_YY" 1119 """ 1120 1121 if vshower != 'OFF' and vfix == 'ON': 1122 return 'OFF' 1123 return None
1124 1125 consistency_madspin_fixed_order = consistency_shower_fixed_order 1126 consistency_reweight_fixed_order = consistency_shower_fixed_order 1127 consistency_madanalysis_fixed_order = consistency_shower_fixed_order 1128 1129 1130 # 1131 # Shower 1132 #
1133 - def get_allowed_shower(self):
1134 """ """ 1135 1136 if hasattr(self, 'allowed_shower'): 1137 return self.allowed_shower 1138 1139 if not misc.which('bc'): 1140 return ['OFF'] 1141 1142 if self.proc_characteristics['ninitial'] == 1: 1143 self.allowed_shower = ['OFF'] 1144 return ['OFF'] 1145 else: 1146 if 'StdHEP' in self.available_module: 1147 allowed = ['HERWIG6','OFF', 'PYTHIA6Q', 'PYTHIA6PT', ] 1148 else: 1149 allowed = ['OFF'] 1150 if 'PY8' in self.available_module: 1151 allowed.append('PYTHIA8') 1152 if 'HW7' in self.available_module: 1153 allowed.append('HERWIGPP') 1154 1155 1156 self.allowed_shower = allowed 1157 1158 return allowed
1159
1160 - def check_value_shower(self, value):
1161 """ """ 1162 1163 if value.upper() in self.get_allowed_shower(): 1164 return True 1165 if value.upper() in ['PYTHIA8', 'HERWIGPP']: 1166 return True 1167 if value.upper() == 'ON': 1168 return self.run_card['parton_shower'] 1169 if value.upper() in ['P8','PY8','PYTHIA_8']: 1170 return 'PYTHIA8' 1171 if value.upper() in ['PY6','P6','PY6PT', 'PYTHIA_6', 'PYTHIA_6PT','PYTHIA6PT','PYTHIA6_PT']: 1172 return 'PYTHIA6PT' 1173 if value.upper() in ['PY6Q', 'PYTHIA_6Q','PYTHIA6Q', 'PYTHIA6_Q']: 1174 return 'PYTHIA6Q' 1175 if value.upper() in ['HW7', 'HERWIG7']: 1176 return 'HERWIG7' 1177 if value.upper() in ['HW++', 'HWPP', 'HERWIG++']: 1178 return 'HERWIGPP' 1179 if value.upper() in ['HW6', 'HERWIG_6']: 1180 return 'HERWIG6'
1181
1182 - def set_default_shower(self):
1183 1184 if self.last_mode in ['LO', 'NLO', 'noshower', 'noshowerLO']: 1185 self.switch['shower'] = 'OFF' 1186 return 1187 1188 if self.proc_characteristics['ninitial'] == 1: 1189 self.switch['shower'] = 'OFF' 1190 return 1191 1192 if not misc.which('bc'): 1193 logger.warning('bc command not available. Forbids to run the shower. please install it if you want to run the shower. (sudo apt-get install bc)') 1194 self.switch['shower'] = 'OFF' 1195 return 1196 1197 if os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 1198 self.switch['shower'] = self.run_card['parton_shower'] 1199 #self.switch['shower'] = 'ON' 1200 self.switch['fixed_order'] = "OFF" 1201 else: 1202 self.switch['shower'] = 'OFF' 1203
1204 - def consistency_shower_madanalysis(self, vshower, vma5):
1205 """ MA5 only possible with (N)LO+PS if shower is run""" 1206 1207 if vshower == 'OFF' and vma5 == 'ON': 1208 return 'OFF' 1209 return None
1210
1211 - def consistency_madanalysis_shower(self, vma5, vshower):
1212 1213 if vma5=='ON' and vshower == 'OFF': 1214 return 'ON' 1215 return None
1216
1217 - def get_cardcmd_for_shower(self, value):
1218 """ adpat run_card according to this setup. return list of cmd to run""" 1219 1220 if value != 'OFF': 1221 return ['set parton_shower %s' % self.switch['shower']] 1222 return []
1223 1224 # 1225 # madspin 1226 #
1227 - def get_allowed_madspin(self):
1228 """ """ 1229 1230 if hasattr(self, 'allowed_madspin'): 1231 return self.allowed_madspin 1232 1233 self.allowed_madspin = [] 1234 1235 1236 if 'MadSpin' not in self.available_module: 1237 return self.allowed_madspin 1238 if self.proc_characteristics['ninitial'] == 1: 1239 self.available_module.remove('MadSpin') 1240 self.allowed_madspin = ['OFF'] 1241 return self.allowed_madspin 1242 else: 1243 self.allowed_madspin = ['OFF', 'ON', 'onshell'] 1244 return self.allowed_madspin
1245
1246 - def check_value_madspin(self, value):
1247 """handle alias and valid option not present in get_allowed_madspin 1248 remember that this mode should always be OFF for 1>N. (ON not in allowed value)""" 1249 1250 if value.upper() in self.get_allowed_madspin(): 1251 if value == value.upper(): 1252 return True 1253 else: 1254 return value.upper() 1255 elif value.lower() in self.get_allowed_madspin(): 1256 if value == value.lower(): 1257 return True 1258 else: 1259 return value.lower() 1260 1261 if 'MadSpin' not in self.available_module or \ 1262 'ON' not in self.get_allowed_madspin(): 1263 return False 1264 1265 if value.lower() in ['madspin', 'full']: 1266 return 'full' 1267 elif value.lower() in ['none']: 1268 return 'none'
1269
1270 - def set_default_madspin(self):
1271 1272 if 'MadSpin' in self.available_module: 1273 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 1274 self.switch['madspin'] = 'ON' 1275 else: 1276 self.switch['madspin'] = 'OFF' 1277 else: 1278 self.switch['madspin'] = 'Not Avail.'
1279
1280 - def get_cardcmd_for_madspin(self, value):
1281 """set some command to run before allowing the user to modify the cards.""" 1282 1283 if value == 'onshell': 1284 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode onshell"] 1285 elif value in ['full', 'madspin']: 1286 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode madspin"] 1287 elif value == 'none': 1288 return ["edit madspin_card --replace_line='set spinmode' --before_line='decay' set spinmode none"] 1289 else: 1290 return []
1291 1292 # 1293 # reweight 1294 #
1295 - def get_allowed_reweight(self):
1296 """set the valid (visible) options for reweight""" 1297 1298 if hasattr(self, 'allowed_reweight'): 1299 return getattr(self, 'allowed_reweight') 1300 1301 self.allowed_reweight = [] 1302 if 'reweight' not in self.available_module: 1303 return self.allowed_reweight 1304 if self.proc_characteristics['ninitial'] == 1: 1305 self.available_module.remove('reweight') 1306 self.allowed_reweight.append('OFF') 1307 return self.allowed_reweight 1308 else: 1309 self.allowed_reweight = [ 'OFF', 'ON', 'NLO', 'NLO_TREE','LO'] 1310 return self.allowed_reweight
1311
1312 - def set_default_reweight(self):
1313 """initialise the switch for reweight""" 1314 1315 if 'reweight' in self.available_module: 1316 if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): 1317 self.switch['reweight'] = 'ON' 1318 else: 1319 self.switch['reweight'] = 'OFF' 1320 else: 1321 self.switch['reweight'] = 'Not Avail.'
1322
1323 - def get_cardcmd_for_reweight(self, value):
1324 """ adpat run_card according to this setup. return list of cmd to run""" 1325 1326 if value == 'LO': 1327 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode LO"] 1328 elif value == 'NLO': 1329 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO", 1330 "set store_rwgt_info T"] 1331 elif value == 'NLO_TREE': 1332 return ["edit reweight_card --replace_line='change mode' --before_line='launch' change mode NLO_tree", 1333 "set store_rwgt_info T"] 1334 return []
1335 1336 # 1337 # MadAnalysis5 1338 #
1339 - def get_allowed_madanalysis(self):
1340 1341 if hasattr(self, 'allowed_madanalysis'): 1342 return self.allowed_madanalysis 1343 1344 self.allowed_madanalysis = [] 1345 1346 1347 if 'MA5' not in self.available_module: 1348 return self.allowed_madanalysis 1349 1350 if self.proc_characteristics['ninitial'] == 1: 1351 self.available_module.remove('MA5') 1352 self.allowed_madanalysis = ['OFF'] 1353 return self.allowed_madanalysis 1354 else: 1355 self.allowed_madanalysis = ['OFF', 'ON'] 1356 return self.allowed_madanalysis
1357
1358 - def set_default_madanalysis(self):
1359 """initialise the switch for reweight""" 1360 1361 if 'MA5' not in self.available_module: 1362 self.switch['madanalysis'] = 'Not Avail.' 1363 elif os.path.exists(pjoin(self.me_dir,'Cards', 'madanalysis5_hadron_card.dat')): 1364 self.switch['madanalysis'] = 'ON' 1365 else: 1366 self.switch['madanalysis'] = 'OFF'
1367
1368 - def check_value_madanalysis(self, value):
1369 """check an entry is valid. return the valid entry in case of shortcut""" 1370 1371 if value.upper() in self.get_allowed('madanalysis'): 1372 return True 1373 value = value.lower() 1374 if value == 'hadron': 1375 return 'ON' if 'ON' in self.get_allowed_madanalysis5 else False 1376 else: 1377 return False
1378
1379 1380 #=============================================================================== 1381 # aMCatNLOCmd 1382 #=============================================================================== 1383 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
1384 """The command line processor of MadGraph""" 1385 1386 # Truth values 1387 true = ['T','.true.',True,'true'] 1388 # Options and formats available 1389 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 1390 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 1391 _calculate_decay_options = ['-f', '--accuracy=0.'] 1392 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 1393 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 1394 _clean_mode = _plot_mode + ['channel', 'banner'] 1395 _display_opts = ['run_name', 'options', 'variable'] 1396 # survey options, dict from name to type, default value, and help text 1397 # Variables to store object information 1398 web = False 1399 cluster_mode = 0 1400 queue = 'madgraph' 1401 nb_core = None 1402 make_opts_var = {} 1403 1404 next_possibility = { 1405 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 1406 'help generate_events'], 1407 'generate_events': ['generate_events [OPTIONS]', 'shower'], 1408 'launch': ['launch [OPTIONS]', 'shower'], 1409 'shower' : ['generate_events [OPTIONS]'] 1410 } 1411 1412 1413 ############################################################################
1414 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
1415 """ add information to the cmd """ 1416 1417 self.start_time = 0 1418 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 1419 #common_run.CommonRunCmd.__init__(self, me_dir, options) 1420 1421 self.mode = 'aMCatNLO' 1422 self.nb_core = 0 1423 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 1424 1425 1426 self.load_results_db() 1427 self.results.def_web_mode(self.web) 1428 # check that compiler is gfortran 4.6 or later if virtuals have been exported 1429 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 1430 1431 if not '[real=QCD]' in proc_card: 1432 check_compiler(self.options, block=True)
1433 1434 1435 ############################################################################
1436 - def do_shower(self, line):
1437 """ run the shower on a given parton level file """ 1438 argss = self.split_arg(line) 1439 (options, argss) = _launch_parser.parse_args(argss) 1440 # check argument validity and normalise argument 1441 options = options.__dict__ 1442 options['reweightonly'] = False 1443 self.check_shower(argss, options) 1444 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 1445 self.ask_run_configuration('onlyshower', options) 1446 self.run_mcatnlo(evt_file, options) 1447 1448 self.update_status('', level='all', update_results=True)
1449 1450 ################################################################################
1451 - def do_plot(self, line):
1452 """Create the plot for a given run""" 1453 1454 # Since in principle, all plot are already done automaticaly 1455 args = self.split_arg(line) 1456 # Check argument's validity 1457 self.check_plot(args) 1458 logger.info('plot for run %s' % self.run_name) 1459 1460 if not self.force: 1461 self.ask_edit_cards([], args, plot=True) 1462 1463 if any([arg in ['parton'] for arg in args]): 1464 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 1465 if os.path.exists(filename+'.gz'): 1466 misc.gunzip(filename) 1467 if os.path.exists(filename): 1468 logger.info('Found events.lhe file for run %s' % self.run_name) 1469 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 1470 self.create_plot('parton') 1471 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 1472 misc.gzip(filename) 1473 1474 if any([arg in ['all','parton'] for arg in args]): 1475 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 1476 if os.path.exists(filename): 1477 logger.info('Found MADatNLO.top file for run %s' % \ 1478 self.run_name) 1479 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 1480 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 1481 1482 if not os.path.isdir(plot_dir): 1483 os.makedirs(plot_dir) 1484 top_file = pjoin(plot_dir, 'plots.top') 1485 files.cp(filename, top_file) 1486 madir = self.options['madanalysis_path'] 1487 tag = self.run_card['run_tag'] 1488 td = self.options['td_path'] 1489 misc.call(['%s/plot' % self.dirbin, madir, td], 1490 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1491 stderr = subprocess.STDOUT, 1492 cwd=plot_dir) 1493 1494 misc.call(['%s/plot_page-pl' % self.dirbin, 1495 os.path.basename(plot_dir), 1496 'parton'], 1497 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1498 stderr = subprocess.STDOUT, 1499 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1500 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1501 output) 1502 1503 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1504 1505 if any([arg in ['all','shower'] for arg in args]): 1506 filenames = misc.glob('events_*.lhe.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1507 if len(filenames) != 1: 1508 filenames = misc.glob('events_*.hep.gz', pjoin(self.me_dir, 'Events', self.run_name)) 1509 if len(filenames) != 1: 1510 logger.info('No shower level file found for run %s' % \ 1511 self.run_name) 1512 return 1513 filename = filenames[0] 1514 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1515 1516 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1517 if aMCatNLO and not self.options['mg5_path']: 1518 raise Exception("plotting NLO HEP file needs MG5 utilities") 1519 1520 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1521 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1522 self.run_hep2lhe() 1523 else: 1524 filename = filenames[0] 1525 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1526 1527 self.create_plot('shower') 1528 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1529 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1530 lhe_file_name) 1531 misc.gzip(lhe_file_name) 1532 1533 if any([arg in ['all','pgs'] for arg in args]): 1534 filename = pjoin(self.me_dir, 'Events', self.run_name, 1535 '%s_pgs_events.lhco' % self.run_tag) 1536 if os.path.exists(filename+'.gz'): 1537 misc.gunzip(filename) 1538 if os.path.exists(filename): 1539 self.create_plot('PGS') 1540 misc.gzip(filename) 1541 else: 1542 logger.info('No valid files for pgs plot') 1543 1544 if any([arg in ['all','delphes'] for arg in args]): 1545 filename = pjoin(self.me_dir, 'Events', self.run_name, 1546 '%s_delphes_events.lhco' % self.run_tag) 1547 if os.path.exists(filename+'.gz'): 1548 misc.gunzip(filename) 1549 if os.path.exists(filename): 1550 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1551 self.create_plot('Delphes') 1552 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1553 misc.gzip(filename) 1554 else: 1555 logger.info('No valid files for delphes plot')
1556 1557 1558 ############################################################################
1559 - def do_calculate_xsect(self, line):
1560 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1561 this function wraps the do_launch one""" 1562 1563 self.start_time = time.time() 1564 argss = self.split_arg(line) 1565 # check argument validity and normalise argument 1566 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1567 options = options.__dict__ 1568 options['reweightonly'] = False 1569 options['parton'] = True 1570 self.check_calculate_xsect(argss, options) 1571 self.do_launch(line, options, argss)
1572 1573 ############################################################################
1574 - def do_banner_run(self, line):
1575 """Make a run from the banner file""" 1576 1577 args = self.split_arg(line) 1578 #check the validity of the arguments 1579 self.check_banner_run(args) 1580 1581 # Remove previous cards 1582 for name in ['shower_card.dat', 'madspin_card.dat']: 1583 try: 1584 os.remove(pjoin(self.me_dir, 'Cards', name)) 1585 except Exception: 1586 pass 1587 1588 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1589 1590 # Check if we want to modify the run 1591 if not self.force: 1592 ans = self.ask('Do you want to modify the Cards/Run Type?', 'n', ['y','n']) 1593 if ans == 'n': 1594 self.force = True 1595 1596 # Compute run mode: 1597 if self.force: 1598 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1599 banner = banner_mod.Banner(args[0]) 1600 for line in banner['run_settings']: 1601 if '=' in line: 1602 mode, value = [t.strip() for t in line.split('=')] 1603 mode_status[mode] = value 1604 else: 1605 mode_status = {} 1606 1607 # Call Generate events 1608 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1609 switch=mode_status)
1610 1611 ############################################################################
1612 - def do_generate_events(self, line):
1613 """Main commands: generate events 1614 this function just wraps the do_launch one""" 1615 self.do_launch(line)
1616 1617 1618 ############################################################################
1619 - def do_treatcards(self, line, amcatnlo=True,mode=''):
1620 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1621 #check if no 'Auto' are present in the file 1622 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1623 1624 # propagate the FO_card entry FO_LHE_weight_ratio to the run_card. 1625 # this variable is system only in the run_card 1626 # can not be done in EditCard since this parameter is not written in the 1627 # run_card directly. 1628 if mode in ['LO', 'NLO']: 1629 name = 'fo_lhe_weight_ratio' 1630 FO_card = analyse_card.FOAnalyseCard(pjoin(self.me_dir,'Cards', 'FO_analyse_card.dat')) 1631 if name in FO_card: 1632 self.run_card.set(name, FO_card[name], user=False) 1633 name = 'fo_lhe_postprocessing' 1634 if name in FO_card: 1635 self.run_card.set(name, FO_card[name], user=False) 1636 1637 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1638 1639 ############################################################################
1640 - def set_configuration(self, amcatnlo=True, **opt):
1641 """assign all configuration variable from file 1642 loop over the different config file if config_file not define """ 1643 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1644 1645 ############################################################################
1646 - def do_launch(self, line, options={}, argss=[], switch={}):
1647 """Main commands: launch the full chain 1648 options and args are relevant if the function is called from other 1649 functions, such as generate_events or calculate_xsect 1650 mode gives the list of switch needed for the computation (usefull for banner_run) 1651 """ 1652 1653 if not argss and not options: 1654 self.start_time = time.time() 1655 argss = self.split_arg(line) 1656 # check argument validity and normalise argument 1657 (options, argss) = _launch_parser.parse_args(argss) 1658 options = options.__dict__ 1659 self.check_launch(argss, options) 1660 1661 1662 if 'run_name' in list(options.keys()) and options['run_name']: 1663 self.run_name = options['run_name'] 1664 # if a dir with the given run_name already exists 1665 # remove it and warn the user 1666 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1667 logger.warning('Removing old run information in \n'+ 1668 pjoin(self.me_dir, 'Events', self.run_name)) 1669 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1670 self.results.delete_run(self.run_name) 1671 else: 1672 self.run_name = '' # will be set later 1673 1674 if options['multicore']: 1675 self.cluster_mode = 2 1676 elif options['cluster']: 1677 self.cluster_mode = 1 1678 1679 if not switch: 1680 mode = argss[0] 1681 1682 if mode in ['LO', 'NLO']: 1683 options['parton'] = True 1684 mode = self.ask_run_configuration(mode, options) 1685 else: 1686 mode = self.ask_run_configuration('auto', options, switch) 1687 1688 self.results.add_detail('run_mode', mode) 1689 1690 self.update_status('Starting run', level=None, update_results=True) 1691 1692 if self.options['automatic_html_opening']: 1693 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1694 self.options['automatic_html_opening'] = False 1695 1696 if '+' in mode: 1697 mode = mode.split('+')[0] 1698 self.compile(mode, options) 1699 evt_file = self.run(mode, options) 1700 1701 if self.run_card['nevents'] == 0 and not mode in ['LO', 'NLO']: 1702 logger.info('No event file generated: grids have been set-up with a '\ 1703 'relative precision of %s' % self.run_card['req_acc']) 1704 return 1705 1706 if not mode in ['LO', 'NLO']: 1707 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1708 1709 if self.run_card['systematics_program'] == 'systematics': 1710 self.exec_cmd('systematics %s %s ' % (self.run_name, ' '.join(self.run_card['systematics_arguments']))) 1711 1712 self.exec_cmd('reweight -from_cards', postcmd=False) 1713 self.exec_cmd('decay_events -from_cards', postcmd=False) 1714 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1715 1716 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1717 and not options['parton']: 1718 self.run_mcatnlo(evt_file, options) 1719 self.exec_cmd('madanalysis5_hadron --no_default', postcmd=False, printcmd=False) 1720 1721 elif mode == 'noshower': 1722 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 1723 Please, shower the Les Houches events before using them for physics analyses.""") 1724 1725 1726 self.update_status('', level='all', update_results=True) 1727 if self.run_card['ickkw'] == 3 and \ 1728 (mode in ['noshower'] or \ 1729 (('PYTHIA8' not in self.run_card['parton_shower'].upper()) and (mode in ['aMC@NLO']))): 1730 logger.warning("""You are running with FxFx merging enabled. 1731 To be able to merge samples of various multiplicities without double counting, 1732 you have to remove some events after showering 'by hand'. 1733 Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 1734 1735 self.store_result() 1736 #check if the param_card defines a scan. 1737 if self.param_card_iterator: 1738 cpath = pjoin(self.me_dir,'Cards','param_card.dat') 1739 param_card_iterator = self.param_card_iterator 1740 self.param_card_iterator = [] #avoid to next generate go trough here 1741 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1742 error=self.results.current['error'], 1743 param_card_path=cpath) 1744 orig_name = self.run_name 1745 #go trough the scal 1746 with misc.TMP_variable(self, 'allow_notification_center', False): 1747 for i,card in enumerate(param_card_iterator): 1748 card.write(cpath) 1749 self.check_param_card(cpath, dependent=True) 1750 if not options['force']: 1751 options['force'] = True 1752 if options['run_name']: 1753 options['run_name'] = '%s_%s' % (orig_name, i+1) 1754 if not argss: 1755 argss = [mode, "-f"] 1756 elif argss[0] == "auto": 1757 argss[0] = mode 1758 self.do_launch("", options=options, argss=argss, switch=switch) 1759 #self.exec_cmd("launch -f ",precmd=True, postcmd=True,errorhandling=False) 1760 param_card_iterator.store_entry(self.run_name, self.results.current['cross'], 1761 error=self.results.current['error'], 1762 param_card_path=cpath) 1763 #restore original param_card 1764 param_card_iterator.write(pjoin(self.me_dir,'Cards','param_card.dat')) 1765 name = misc.get_scan_name(orig_name, self.run_name) 1766 path = pjoin(self.me_dir, 'Events','scan_%s.txt' % name) 1767 logger.info("write all cross-section results in %s" % path, '$MG:BOLD') 1768 param_card_iterator.write_summary(path) 1769 1770 if self.allow_notification_center: 1771 misc.apple_notify('Run %s finished' % os.path.basename(self.me_dir), 1772 '%s: %s +- %s ' % (self.results.current['run_name'], 1773 self.results.current['cross'], 1774 self.results.current['error']))
1775 1776 1777 ############################################################################
1778 - def do_compile(self, line):
1779 """Advanced commands: just compile the executables """ 1780 argss = self.split_arg(line) 1781 # check argument validity and normalise argument 1782 (options, argss) = _compile_parser.parse_args(argss) 1783 options = options.__dict__ 1784 options['reweightonly'] = False 1785 options['nocompile'] = False 1786 self.check_compile(argss, options) 1787 1788 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1789 self.ask_run_configuration(mode, options) 1790 self.compile(mode, options) 1791 1792 1793 self.update_status('', level='all', update_results=True)
1794 1795
1796 - def update_random_seed(self):
1797 """Update random number seed with the value from the run_card. 1798 If this is 0, update the number according to a fresh one""" 1799 iseed = self.run_card['iseed'] 1800 if iseed == 0: 1801 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1802 iseed = int(randinit.read()[2:]) + 1 1803 randinit.close() 1804 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1805 randinit.write('r=%d' % iseed) 1806 randinit.close()
1807 1808
1809 - def run(self, mode, options):
1810 """runs aMC@NLO. Returns the name of the event file created""" 1811 logger.info('Starting run') 1812 1813 if not 'only_generation' in list(options.keys()): 1814 options['only_generation'] = False 1815 1816 # for second step in applgrid mode, do only the event generation step 1817 if mode in ['LO', 'NLO'] and self.run_card['iappl'] == 2 and not options['only_generation']: 1818 options['only_generation'] = True 1819 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1820 self.setup_cluster_or_multicore() 1821 self.update_random_seed() 1822 #find and keep track of all the jobs 1823 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1824 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1825 folder_names['noshower'] = folder_names['aMC@NLO'] 1826 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1827 p_dirs = [d for d in \ 1828 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1829 #Clean previous results 1830 self.clean_previous_results(options,p_dirs,folder_names[mode]) 1831 1832 mcatnlo_status = ['Setting up grids', 'Computing upper envelope', 'Generating events'] 1833 1834 1835 if options['reweightonly']: 1836 event_norm=self.run_card['event_norm'] 1837 nevents=self.run_card['nevents'] 1838 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1839 1840 if mode in ['LO', 'NLO']: 1841 # this is for fixed order runs 1842 mode_dict = {'NLO': 'all', 'LO': 'born'} 1843 logger.info('Doing fixed order %s' % mode) 1844 req_acc = self.run_card['req_acc_FO'] 1845 1846 # Re-distribute the grids for the 2nd step of the applgrid 1847 # running 1848 if self.run_card['iappl'] == 2: 1849 self.applgrid_distribute(options,mode_dict[mode],p_dirs) 1850 1851 # create a list of dictionaries "jobs_to_run" with all the 1852 # jobs that need to be run 1853 integration_step=-1 1854 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1855 req_acc,mode_dict[mode],integration_step,mode,fixed_order=True) 1856 self.prepare_directories(jobs_to_run,mode) 1857 1858 # loop over the integration steps. After every step, check 1859 # if we have the required accuracy. If this is the case, 1860 # stop running, else do another step. 1861 while True: 1862 integration_step=integration_step+1 1863 self.run_all_jobs(jobs_to_run,integration_step) 1864 self.collect_log_files(jobs_to_run,integration_step) 1865 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1866 jobs_to_collect,integration_step,mode,mode_dict[mode]) 1867 if not jobs_to_run: 1868 # there are no more jobs to run (jobs_to_run is empty) 1869 break 1870 # We are done. 1871 self.finalise_run_FO(folder_names[mode],jobs_to_collect) 1872 self.update_status('Run complete', level='parton', update_results=True) 1873 return 1874 1875 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1876 if self.ninitial == 1: 1877 raise aMCatNLOError('Decay processes can only be run at fixed order.') 1878 mode_dict = {'aMC@NLO': 'all', 'aMC@LO': 'born',\ 1879 'noshower': 'all', 'noshowerLO': 'born'} 1880 shower = self.run_card['parton_shower'].upper() 1881 nevents = self.run_card['nevents'] 1882 req_acc = self.run_card['req_acc'] 1883 if nevents == 0 and req_acc < 0 : 1884 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1885 'of events, because 0 events requested. Please set '\ 1886 'the "req_acc" parameter in the run_card to a value '\ 1887 'between 0 and 1') 1888 elif req_acc >1 or req_acc == 0 : 1889 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1890 'be between larger than 0 and smaller than 1, '\ 1891 'or set to -1 for automatic determination. Current '\ 1892 'value is %f' % req_acc) 1893 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1894 elif req_acc < 0 and nevents > 1000000 : 1895 req_acc=0.001 1896 1897 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1898 1899 if not shower in shower_list: 1900 raise aMCatNLOError('%s is not a valid parton shower. '\ 1901 'Please use one of the following: %s' \ 1902 % (shower, ', '.join(shower_list))) 1903 1904 # check that PYTHIA6PT is not used for processes with FSR 1905 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1906 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1907 1908 if mode in ['aMC@NLO', 'aMC@LO']: 1909 logger.info('Doing %s matched to parton shower' % mode[4:]) 1910 elif mode in ['noshower','noshowerLO']: 1911 logger.info('Generating events without running the shower.') 1912 elif options['only_generation']: 1913 logger.info('Generating events starting from existing results') 1914 1915 jobs_to_run,jobs_to_collect,integration_step = self.create_jobs_to_run(options,p_dirs, \ 1916 req_acc,mode_dict[mode],1,mode,fixed_order=False) 1917 # Make sure to update all the jobs to be ready for the event generation step 1918 if options['only_generation']: 1919 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1920 jobs_to_collect,1,mode,mode_dict[mode],fixed_order=False) 1921 else: 1922 self.prepare_directories(jobs_to_run,mode,fixed_order=False) 1923 1924 1925 # Main loop over the three MINT generation steps: 1926 for mint_step, status in enumerate(mcatnlo_status): 1927 if options['only_generation'] and mint_step < 2: 1928 continue 1929 self.update_status(status, level='parton') 1930 self.run_all_jobs(jobs_to_run,mint_step,fixed_order=False) 1931 self.collect_log_files(jobs_to_run,mint_step) 1932 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, \ 1933 jobs_to_collect,mint_step,mode,mode_dict[mode],fixed_order=False) 1934 if mint_step+1==2 and nevents==0: 1935 self.print_summary(options,2,mode) 1936 return 1937 1938 # Sanity check on the event files. If error the jobs are resubmitted 1939 self.check_event_files(jobs_to_collect) 1940 1941 if self.cluster_mode == 1: 1942 #if cluster run, wait 10 sec so that event files are transferred back 1943 self.update_status( 1944 'Waiting while files are transferred back from the cluster nodes', 1945 level='parton') 1946 time.sleep(10) 1947 1948 event_norm=self.run_card['event_norm'] 1949 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
1950
1951 - def create_jobs_to_run(self,options,p_dirs,req_acc,run_mode,\ 1952 integration_step,mode,fixed_order=True):
1953 """Creates a list of dictionaries with all the jobs to be run""" 1954 jobs_to_run=[] 1955 if not options['only_generation']: 1956 # Fresh, new run. Check all the P*/channels.txt files 1957 # (created by the 'gensym' executable) to set-up all the 1958 # jobs using the default inputs. 1959 npoints = self.run_card['npoints_FO_grid'] 1960 niters = self.run_card['niters_FO_grid'] 1961 for p_dir in p_dirs: 1962 try: 1963 with open(pjoin(self.me_dir,'SubProcesses',p_dir,'channels.txt')) as chan_file: 1964 channels=chan_file.readline().split() 1965 except IOError: 1966 logger.warning('No integration channels found for contribution %s' % p_dir) 1967 continue 1968 if fixed_order: 1969 lch=len(channels) 1970 maxchannels=20 # combine up to 20 channels in a single job 1971 if self.run_card['iappl'] != 0: maxchannels=1 1972 njobs=(int(lch/maxchannels)+1 if lch%maxchannels!= 0 \ 1973 else int(lch/maxchannels)) 1974 for nj in range(1,njobs+1): 1975 job={} 1976 job['p_dir']=p_dir 1977 job['channel']=str(nj) 1978 job['nchans']=(int(lch/njobs)+1 if nj <= lch%njobs else int(lch/njobs)) 1979 job['configs']=' '.join(channels[:job['nchans']]) 1980 del channels[:job['nchans']] 1981 job['split']=0 1982 if req_acc == -1: 1983 job['accuracy']=0 1984 job['niters']=niters 1985 job['npoints']=npoints 1986 elif req_acc > 0: 1987 job['accuracy']=0.05 1988 job['niters']=6 1989 job['npoints']=-1 1990 else: 1991 raise aMCatNLOError('No consistent "req_acc_FO" set. Use a value '+ 1992 'between 0 and 1 or set it equal to -1.') 1993 job['mint_mode']=0 1994 job['run_mode']=run_mode 1995 job['wgt_frac']=1.0 1996 job['wgt_mult']=1.0 1997 jobs_to_run.append(job) 1998 if channels: 1999 raise aMCatNLOError('channels is not empty %s' % channels) 2000 else: 2001 for channel in channels: 2002 job={} 2003 job['p_dir']=p_dir 2004 job['channel']=channel 2005 job['split']=0 2006 job['accuracy']=0.03 2007 job['niters']=12 2008 job['npoints']=-1 2009 job['mint_mode']=0 2010 job['run_mode']=run_mode 2011 job['wgt_frac']=1.0 2012 jobs_to_run.append(job) 2013 jobs_to_collect=copy.copy(jobs_to_run) # These are all jobs 2014 else: 2015 # if options['only_generation'] is true, just read the current jobs from file 2016 try: 2017 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'rb') as f: 2018 jobs_to_collect=pickle.load(f) 2019 for job in jobs_to_collect: 2020 job['dirname']=pjoin(self.me_dir,'SubProcesses',job['dirname'].rsplit('/SubProcesses/',1)[1]) 2021 jobs_to_run=copy.copy(jobs_to_collect) 2022 except: 2023 raise aMCatNLOError('Cannot reconstruct saved job status in %s' % \ 2024 pjoin(self.me_dir,'SubProcesses','job_status.pkl')) 2025 # Update cross sections and determine which jobs to run next 2026 if fixed_order: 2027 jobs_to_run,jobs_to_collect=self.collect_the_results(options,req_acc,jobs_to_run, 2028 jobs_to_collect,integration_step,mode,run_mode) 2029 # Update the integration_step to make sure that nothing will be overwritten 2030 integration_step=1 2031 for job in jobs_to_run: 2032 while os.path.exists(pjoin(job['dirname'],'res_%s.dat' % integration_step)): 2033 integration_step=integration_step+1 2034 integration_step=integration_step-1 2035 else: 2036 self.append_the_results(jobs_to_collect,integration_step) 2037 return jobs_to_run,jobs_to_collect,integration_step
2038
2039 - def prepare_directories(self,jobs_to_run,mode,fixed_order=True):
2040 """Set-up the G* directories for running""" 2041 name_suffix={'born' :'B' , 'all':'F'} 2042 for job in jobs_to_run: 2043 if job['split'] == 0: 2044 if fixed_order : 2045 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2046 job['run_mode']+'_G'+job['channel']) 2047 else: 2048 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2049 'G'+name_suffix[job['run_mode']]+job['channel']) 2050 else: 2051 if fixed_order : 2052 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2053 job['run_mode']+'_G'+job['channel']+'_'+str(job['split'])) 2054 else: 2055 dirname=pjoin(self.me_dir,'SubProcesses',job['p_dir'], 2056 'G'+name_suffix[job['run_mode']]+job['channel']+'_'+str(job['split'])) 2057 job['dirname']=dirname 2058 if not os.path.isdir(dirname): 2059 os.makedirs(dirname) 2060 self.write_input_file(job,fixed_order) 2061 # link or copy the grids from the base directory to the split directory: 2062 if not fixed_order: 2063 if job['split'] != 0: 2064 for f in ['grid.MC_integer','mint_grids','res_1']: 2065 if not os.path.isfile(pjoin(job['dirname'],f)): 2066 files.ln(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname']) 2067 else: 2068 if job['split'] != 0: 2069 for f in ['grid.MC_integer','mint_grids']: 2070 files.cp(pjoin(job['dirname'].rsplit("_",1)[0],f),job['dirname'])
2071 2072
2073 - def write_input_file(self,job,fixed_order):
2074 """write the input file for the madevent_mint* executable in the appropriate directory""" 2075 if fixed_order: 2076 content= \ 2077 """NPOINTS = %(npoints)s 2078 NITERATIONS = %(niters)s 2079 ACCURACY = %(accuracy)s 2080 ADAPT_GRID = 2 2081 MULTICHANNEL = 1 2082 SUM_HELICITY = 1 2083 NCHANS = %(nchans)s 2084 CHANNEL = %(configs)s 2085 SPLIT = %(split)s 2086 WGT_MULT= %(wgt_mult)s 2087 RUN_MODE = %(run_mode)s 2088 RESTART = %(mint_mode)s 2089 """ \ 2090 % job 2091 else: 2092 content = \ 2093 """-1 12 ! points, iterations 2094 %(accuracy)s ! desired fractional accuracy 2095 1 -0.1 ! alpha, beta for Gsoft 2096 -1 -0.1 ! alpha, beta for Gazi 2097 1 ! Suppress amplitude (0 no, 1 yes)? 2098 1 ! Exact helicity sum (0 yes, n = number/event)? 2099 %(channel)s ! Enter Configuration Number: 2100 %(mint_mode)s ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 2101 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 2102 %(run_mode)s ! all, born, real, virt 2103 """ \ 2104 % job 2105 with open(pjoin(job['dirname'], 'input_app.txt'), 'w') as input_file: 2106 input_file.write(content)
2107 2108
2109 - def run_all_jobs(self,jobs_to_run,integration_step,fixed_order=True):
2110 """Loops over the jobs_to_run and executes them using the function 'run_exe'""" 2111 if fixed_order: 2112 if integration_step == 0: 2113 self.update_status('Setting up grids', level=None) 2114 else: 2115 self.update_status('Refining results, step %i' % integration_step, level=None) 2116 self.ijob = 0 2117 name_suffix={'born' :'B', 'all':'F'} 2118 if fixed_order: 2119 run_type="Fixed order integration step %s" % integration_step 2120 else: 2121 run_type="MINT step %s" % integration_step 2122 self.njobs=len(jobs_to_run) 2123 for job in jobs_to_run: 2124 executable='ajob1' 2125 if fixed_order: 2126 arguments=[job['channel'],job['run_mode'], \ 2127 str(job['split']),str(integration_step)] 2128 else: 2129 arguments=[job['channel'],name_suffix[job['run_mode']], \ 2130 str(job['split']),str(integration_step)] 2131 self.run_exe(executable,arguments,run_type, 2132 cwd=pjoin(self.me_dir,'SubProcesses',job['p_dir'])) 2133 2134 if self.cluster_mode == 2: 2135 time.sleep(1) # security to allow all jobs to be launched 2136 self.wait_for_complete(run_type)
2137 2138
2139 - def collect_the_results(self,options,req_acc,jobs_to_run,jobs_to_collect,\ 2140 integration_step,mode,run_mode,fixed_order=True):
2141 """Collect the results, make HTML pages, print the summary and 2142 determine if there are more jobs to run. Returns the list 2143 of the jobs that still need to be run, as well as the 2144 complete list of jobs that need to be collected to get the 2145 final answer. 2146 """ 2147 # Get the results of the current integration/MINT step 2148 self.append_the_results(jobs_to_run,integration_step) 2149 self.cross_sect_dict = self.write_res_txt_file(jobs_to_collect,integration_step) 2150 # Update HTML pages 2151 if fixed_order: 2152 cross, error = self.make_make_all_html_results(folder_names=['%s*' % run_mode], 2153 jobs=jobs_to_collect) 2154 else: 2155 name_suffix={'born' :'B' , 'all':'F'} 2156 cross, error = self.make_make_all_html_results(folder_names=['G%s*' % name_suffix[run_mode]]) 2157 self.results.add_detail('cross', cross) 2158 self.results.add_detail('error', error) 2159 # Combine grids from split fixed order jobs 2160 if fixed_order: 2161 jobs_to_run=self.combine_split_order_run(jobs_to_run) 2162 # Set-up jobs for the next iteration/MINT step 2163 jobs_to_run_new=self.update_jobs_to_run(req_acc,integration_step,jobs_to_run,fixed_order) 2164 # IF THERE ARE NO MORE JOBS, WE ARE DONE!!! 2165 if fixed_order: 2166 # Write the jobs_to_collect directory to file so that we 2167 # can restart them later (with only-generation option) 2168 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2169 pickle.dump(jobs_to_collect,f) 2170 # Print summary 2171 if (not jobs_to_run_new) and fixed_order: 2172 # print final summary of results (for fixed order) 2173 scale_pdf_info=self.collect_scale_pdf_info(options,jobs_to_collect) 2174 self.print_summary(options,integration_step,mode,scale_pdf_info,done=True) 2175 return jobs_to_run_new,jobs_to_collect 2176 elif jobs_to_run_new: 2177 # print intermediate summary of results 2178 scale_pdf_info=[] 2179 self.print_summary(options,integration_step,mode,scale_pdf_info,done=False) 2180 else: 2181 # When we are done for (N)LO+PS runs, do not print 2182 # anything yet. This will be done after the reweighting 2183 # and collection of the events 2184 scale_pdf_info=[] 2185 # Prepare for the next integration/MINT step 2186 if (not fixed_order) and integration_step+1 == 2 : 2187 # Write the jobs_to_collect directory to file so that we 2188 # can restart them later (with only-generation option) 2189 with open(pjoin(self.me_dir,"SubProcesses","job_status.pkl"),'wb') as f: 2190 pickle.dump(jobs_to_collect,f) 2191 # next step is event generation (mint_step 2) 2192 jobs_to_run_new,jobs_to_collect_new= \ 2193 self.check_the_need_to_split(jobs_to_run_new,jobs_to_collect) 2194 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2195 self.write_nevents_unweighted_file(jobs_to_collect_new,jobs_to_collect) 2196 self.write_nevts_files(jobs_to_run_new) 2197 else: 2198 if fixed_order and self.run_card['iappl'] == 0 \ 2199 and self.run_card['req_acc_FO'] > 0: 2200 jobs_to_run_new,jobs_to_collect= \ 2201 self.split_jobs_fixed_order(jobs_to_run_new,jobs_to_collect) 2202 self.prepare_directories(jobs_to_run_new,mode,fixed_order) 2203 jobs_to_collect_new=jobs_to_collect 2204 return jobs_to_run_new,jobs_to_collect_new
2205 2206
2207 - def write_nevents_unweighted_file(self,jobs,jobs0events):
2208 """writes the nevents_unweighted file in the SubProcesses directory. 2209 We also need to write the jobs that will generate 0 events, 2210 because that makes sure that the cross section from those channels 2211 is taken into account in the event weights (by collect_events.f). 2212 """ 2213 content=[] 2214 for job in jobs: 2215 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2216 lhefile=pjoin(path,'events.lhe') 2217 content.append(' %s %d %9e %9e' % \ 2218 (lhefile.ljust(40),job['nevents'],job['resultABS']*job['wgt_frac'],job['wgt_frac'])) 2219 for job in jobs0events: 2220 if job['nevents']==0: 2221 path=pjoin(job['dirname'].split('/')[-2],job['dirname'].split('/')[-1]) 2222 lhefile=pjoin(path,'events.lhe') 2223 content.append(' %s %d %9e %9e' % \ 2224 (lhefile.ljust(40),job['nevents'],job['resultABS'],1.)) 2225 with open(pjoin(self.me_dir,'SubProcesses',"nevents_unweighted"),'w') as f: 2226 f.write('\n'.join(content)+'\n')
2227
2228 - def write_nevts_files(self,jobs):
2229 """write the nevts files in the SubProcesses/P*/G*/ directories""" 2230 for job in jobs: 2231 with open(pjoin(job['dirname'],'nevts'),'w') as f: 2232 if self.run_card['event_norm'].lower()=='bias': 2233 f.write('%i %f\n' % (job['nevents'],self.cross_sect_dict['xseca'])) 2234 else: 2235 f.write('%i\n' % job['nevents'])
2236
2237 - def combine_split_order_run(self,jobs_to_run):
2238 """Combines jobs and grids from split jobs that have been run""" 2239 # combine the jobs that need to be combined in job 2240 # groups. Simply combine the ones that have the same p_dir and 2241 # same channel. 2242 jobgroups_to_combine=[] 2243 jobs_to_run_new=[] 2244 for job in jobs_to_run: 2245 if job['split'] == 0: 2246 job['combined']=1 2247 jobs_to_run_new.append(job) # this jobs wasn't split 2248 elif job['split'] == 1: 2249 jobgroups_to_combine.append([j for j in jobs_to_run if j['p_dir'] == job['p_dir'] and \ 2250 j['channel'] == job['channel']]) 2251 else: 2252 continue 2253 for job_group in jobgroups_to_combine: 2254 # Combine the grids (mint-grids & MC-integer grids) first 2255 self.combine_split_order_grids(job_group) 2256 jobs_to_run_new.append(self.combine_split_order_jobs(job_group)) 2257 return jobs_to_run_new
2258
2259 - def combine_split_order_jobs(self,job_group):
2260 """combine the jobs in job_group and return a single summed job""" 2261 # first copy one of the jobs in 'jobs' 2262 sum_job=copy.copy(job_group[0]) 2263 # update the information to have a 'non-split' job: 2264 sum_job['dirname']=pjoin(sum_job['dirname'].rsplit('_',1)[0]) 2265 sum_job['split']=0 2266 sum_job['wgt_mult']=1.0 2267 sum_job['combined']=len(job_group) 2268 # information to be summed: 2269 keys=['niters_done','npoints_done','niters','npoints',\ 2270 'result','resultABS','time_spend'] 2271 keys2=['error','errorABS'] 2272 # information to be summed in quadrature: 2273 for key in keys2: 2274 sum_job[key]=math.pow(sum_job[key],2) 2275 # Loop over the jobs and sum the information 2276 for i,job in enumerate(job_group): 2277 if i==0 : continue # skip the first 2278 for key in keys: 2279 sum_job[key]+=job[key] 2280 for key in keys2: 2281 sum_job[key]+=math.pow(job[key],2) 2282 for key in keys2: 2283 sum_job[key]=math.sqrt(sum_job[key]) 2284 sum_job['err_percABS'] = sum_job['errorABS']/sum_job['resultABS']*100. 2285 sum_job['err_perc'] = sum_job['error']/sum_job['result']*100. 2286 sum_job['niters']=int(sum_job['niters_done']/len(job_group)) 2287 sum_job['niters_done']=int(sum_job['niters_done']/len(job_group)) 2288 return sum_job
2289 2290
2291 - def combine_split_order_grids(self,job_group):
2292 """Combines the mint_grids and MC-integer grids from the split order 2293 jobs (fixed order only). 2294 """ 2295 files_mint_grids=[] 2296 files_MC_integer=[] 2297 location=None 2298 for job in job_group: 2299 files_mint_grids.append(pjoin(job['dirname'],'mint_grids')) 2300 files_MC_integer.append(pjoin(job['dirname'],'grid.MC_integer')) 2301 if not location: 2302 location=pjoin(job['dirname'].rsplit('_',1)[0]) 2303 else: 2304 if location != pjoin(job['dirname'].rsplit('_',1)[0]) : 2305 raise aMCatNLOError('Not all jobs have the same location. '\ 2306 +'Cannot combine them.') 2307 # Needed to average the grids (both xgrids, ave_virt and 2308 # MC_integer grids), but sum the cross section info. The 2309 # latter is only the only line that contains integers. 2310 for j,fs in enumerate([files_mint_grids,files_MC_integer]): 2311 linesoffiles=[] 2312 for f in fs: 2313 with open(f,'r+') as fi: 2314 linesoffiles.append(fi.readlines()) 2315 to_write=[] 2316 for rowgrp in zip(*linesoffiles): 2317 try: 2318 # check that last element on the line is an 2319 # integer (will raise ValueError if not the 2320 # case). If integer, this is the line that 2321 # contains information that needs to be 2322 # summed. All other lines can be averaged. 2323 is_integer = [[int(row.strip().split()[-1])] for row in rowgrp] 2324 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2325 floatgrps = list(zip(*floatsbyfile)) 2326 special=[] 2327 for i,floatgrp in enumerate(floatgrps): 2328 if i==0: # sum X-sec 2329 special.append(sum(floatgrp)) 2330 elif i==1: # sum unc in quadrature 2331 special.append(math.sqrt(sum([err**2 for err in floatgrp]))) 2332 elif i==2: # average number of PS per iteration 2333 special.append(int(sum(floatgrp)/len(floatgrp))) 2334 elif i==3: # sum the number of iterations 2335 special.append(int(sum(floatgrp))) 2336 elif i==4: # average the nhits_in_grids 2337 special.append(int(sum(floatgrp)/len(floatgrp))) 2338 else: 2339 raise aMCatNLOError('"mint_grids" files not in correct format. '+\ 2340 'Cannot combine them.') 2341 to_write.append(" ".join(str(s) for s in special) + "\n") 2342 except ValueError: 2343 # just average all 2344 floatsbyfile = [[float(a) for a in row.strip().split()] for row in rowgrp] 2345 floatgrps = list(zip(*floatsbyfile)) 2346 averages = [sum(floatgrp)/len(floatgrp) for floatgrp in floatgrps] 2347 to_write.append(" ".join(str(a) for a in averages) + "\n") 2348 # write the data over the master location 2349 if j==0: 2350 with open(pjoin(location,'mint_grids'),'w') as f: 2351 f.writelines(to_write) 2352 elif j==1: 2353 with open(pjoin(location,'grid.MC_integer'),'w') as f: 2354 f.writelines(to_write)
2355 2356
2357 - def split_jobs_fixed_order(self,jobs_to_run,jobs_to_collect):
2358 """Looks in the jobs_to_run to see if there is the need to split the 2359 jobs, depending on the expected time they take. Updates 2360 jobs_to_run and jobs_to_collect to replace the split-job by 2361 its splits. 2362 """ 2363 # determine the number jobs we should have (this is per p_dir) 2364 if self.options['run_mode'] ==2: 2365 nb_submit = int(self.options['nb_core']) 2366 elif self.options['run_mode'] ==1: 2367 nb_submit = int(self.options['cluster_size']) 2368 else: 2369 nb_submit =1 2370 # total expected aggregated running time 2371 time_expected=0 2372 for job in jobs_to_run: 2373 time_expected+=job['time_spend']*(job['niters']*job['npoints'])/ \ 2374 (job['niters_done']*job['npoints_done']) 2375 # this means that we must expect the following per job (in 2376 # ideal conditions) 2377 time_per_job=time_expected/(nb_submit*(1+len(jobs_to_run)/2)) 2378 jobs_to_run_new=[] 2379 jobs_to_collect_new=copy.copy(jobs_to_collect) 2380 for job in jobs_to_run: 2381 # remove current job from jobs_to_collect. Make sure 2382 # to remove all the split ones in case the original 2383 # job had been a split one (before it was re-combined) 2384 for j in [j for j in jobs_to_collect_new if j['p_dir'] == job['p_dir'] and \ 2385 j['channel'] == job['channel']]: 2386 jobs_to_collect_new.remove(j) 2387 time_expected=job['time_spend']*(job['niters']*job['npoints'])/ \ 2388 (job['niters_done']*job['npoints_done']) 2389 # if the time expected for this job is (much) larger than 2390 # the time spend in the previous iteration, and larger 2391 # than the expected time per job, split it 2392 if time_expected > max(2*job['time_spend']/job['combined'],time_per_job): 2393 # determine the number of splits needed 2394 nsplit=min(max(int(time_expected/max(2*job['time_spend']/job['combined'],time_per_job)),2),nb_submit) 2395 for i in range(1,nsplit+1): 2396 job_new=copy.copy(job) 2397 job_new['split']=i 2398 job_new['wgt_mult']=1./float(nsplit) 2399 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2400 job_new['accuracy']=min(job['accuracy']*math.sqrt(float(nsplit)),0.1) 2401 if nsplit >= job['niters']: 2402 job_new['npoints']=int(job['npoints']*job['niters']/nsplit) 2403 job_new['niters']=1 2404 else: 2405 job_new['npoints']=int(job['npoints']/nsplit) 2406 jobs_to_collect_new.append(job_new) 2407 jobs_to_run_new.append(job_new) 2408 else: 2409 jobs_to_collect_new.append(job) 2410 jobs_to_run_new.append(job) 2411 return jobs_to_run_new,jobs_to_collect_new
2412 2413
2414 - def check_the_need_to_split(self,jobs_to_run,jobs_to_collect):
2415 """Looks in the jobs_to_run to see if there is the need to split the 2416 event generation step. Updates jobs_to_run and 2417 jobs_to_collect to replace the split-job by its 2418 splits. Also removes jobs that do not need any events. 2419 """ 2420 nevt_job=self.run_card['nevt_job'] 2421 if nevt_job > 0: 2422 jobs_to_collect_new=copy.copy(jobs_to_collect) 2423 for job in jobs_to_run: 2424 nevents=job['nevents'] 2425 if nevents == 0: 2426 jobs_to_collect_new.remove(job) 2427 elif nevents > nevt_job: 2428 jobs_to_collect_new.remove(job) 2429 if nevents % nevt_job != 0 : 2430 nsplit=int(nevents/nevt_job)+1 2431 else: 2432 nsplit=int(nevents/nevt_job) 2433 for i in range(1,nsplit+1): 2434 job_new=copy.copy(job) 2435 left_over=nevents % nsplit 2436 if i <= left_over: 2437 job_new['nevents']=int(nevents/nsplit)+1 2438 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2439 else: 2440 job_new['nevents']=int(nevents/nsplit) 2441 job_new['wgt_frac']=float(job_new['nevents'])/float(nevents) 2442 job_new['split']=i 2443 job_new['dirname']=job['dirname']+'_%i' % job_new['split'] 2444 jobs_to_collect_new.append(job_new) 2445 jobs_to_run_new=copy.copy(jobs_to_collect_new) 2446 else: 2447 jobs_to_run_new=copy.copy(jobs_to_collect) 2448 for job in jobs_to_collect: 2449 if job['nevents'] == 0: 2450 jobs_to_run_new.remove(job) 2451 jobs_to_collect_new=copy.copy(jobs_to_run_new) 2452 2453 return jobs_to_run_new,jobs_to_collect_new
2454 2455
2456 - def update_jobs_to_run(self,req_acc,step,jobs,fixed_order=True):
2457 """ 2458 For (N)LO+PS: determines the number of events and/or the required 2459 accuracy per job. 2460 For fixed order: determines which jobs need higher precision and 2461 returns those with the newly requested precision. 2462 """ 2463 err=self.cross_sect_dict['errt'] 2464 tot=self.cross_sect_dict['xsect'] 2465 errABS=self.cross_sect_dict['erra'] 2466 totABS=self.cross_sect_dict['xseca'] 2467 jobs_new=[] 2468 if fixed_order: 2469 if req_acc == -1: 2470 if step+1 == 1: 2471 npoints = self.run_card['npoints_FO'] 2472 niters = self.run_card['niters_FO'] 2473 for job in jobs: 2474 job['mint_mode']=-1 2475 job['niters']=niters 2476 job['npoints']=npoints 2477 jobs_new.append(job) 2478 elif step+1 == 2: 2479 pass 2480 elif step+1 > 2: 2481 raise aMCatNLOError('Cannot determine number of iterations and PS points '+ 2482 'for integration step %i' % step ) 2483 elif ( req_acc > 0 and err/abs(tot) > req_acc*1.2 ) or step <= 0: 2484 req_accABS=req_acc*abs(tot)/totABS # overal relative required accuracy on ABS Xsec. 2485 for job in jobs: 2486 job['mint_mode']=-1 2487 # Determine relative required accuracy on the ABS for this job 2488 job['accuracy']=req_accABS*math.sqrt(totABS/job['resultABS']) 2489 # If already accurate enough, skip the job (except when doing the first 2490 # step for the iappl=2 run: we need to fill all the applgrid grids!) 2491 if (job['accuracy'] > job['errorABS']/job['resultABS'] and step != 0) \ 2492 and not (step==-1 and self.run_card['iappl'] == 2): 2493 continue 2494 # Update the number of PS points based on errorABS, ncall and accuracy 2495 itmax_fl=job['niters_done']*math.pow(job['errorABS']/ 2496 (job['accuracy']*job['resultABS']),2) 2497 if itmax_fl <= 4.0 : 2498 job['niters']=max(int(round(itmax_fl)),2) 2499 job['npoints']=job['npoints_done']*2 2500 elif itmax_fl > 4.0 and itmax_fl <= 16.0 : 2501 job['niters']=4 2502 job['npoints']=int(round(job['npoints_done']*itmax_fl/4.0))*2 2503 else: 2504 if itmax_fl > 100.0 : itmax_fl=50.0 2505 job['niters']=int(round(math.sqrt(itmax_fl))) 2506 job['npoints']=int(round(job['npoints_done']*itmax_fl/ 2507 round(math.sqrt(itmax_fl))))*2 2508 # Add the job to the list of jobs that need to be run 2509 jobs_new.append(job) 2510 return jobs_new 2511 elif step+1 <= 2: 2512 nevents=self.run_card['nevents'] 2513 # Total required accuracy for the upper bounding envelope 2514 if req_acc<0: 2515 req_acc2_inv=nevents 2516 else: 2517 req_acc2_inv=1/(req_acc*req_acc) 2518 if step+1 == 1 or step+1 == 2 : 2519 # determine the req. accuracy for each of the jobs for Mint-step = 1 2520 for job in jobs: 2521 accuracy=min(math.sqrt(totABS/(req_acc2_inv*job['resultABS'])),0.2) 2522 job['accuracy']=accuracy 2523 if step+1 == 2: 2524 # Randomly (based on the relative ABS Xsec of the job) determine the 2525 # number of events each job needs to generate for MINT-step = 2. 2526 r=self.get_randinit_seed() 2527 if not hasattr(random, 'mg_seedset'): 2528 random.seed(r) 2529 random.mg_seedset = r 2530 totevts=nevents 2531 for job in jobs: 2532 job['nevents'] = 0 2533 while totevts : 2534 target = random.random() * totABS 2535 crosssum = 0. 2536 i = 0 2537 while i<len(jobs) and crosssum < target: 2538 job = jobs[i] 2539 crosssum += job['resultABS'] 2540 i += 1 2541 totevts -= 1 2542 i -= 1 2543 jobs[i]['nevents'] += 1 2544 for job in jobs: 2545 job['mint_mode']=step+1 # next step 2546 return jobs 2547 else: 2548 return []
2549 2550
2551 - def get_randinit_seed(self):
2552 """ Get the random number seed from the randinit file """ 2553 with open(pjoin(self.me_dir,"SubProcesses","randinit")) as randinit: 2554 # format of the file is "r=%d". 2555 iseed = int(randinit.read()[2:]) 2556 return iseed
2557 2558
2559 - def append_the_results(self,jobs,integration_step):
2560 """Appends the results for each of the jobs in the job list""" 2561 error_found=False 2562 for job in jobs: 2563 try: 2564 if integration_step >= 0 : 2565 with open(pjoin(job['dirname'],'res_%s.dat' % integration_step)) as res_file: 2566 results=res_file.readline().split() 2567 else: 2568 # should only be here when doing fixed order with the 'only_generation' 2569 # option equal to True. Take the results from the final run done. 2570 with open(pjoin(job['dirname'],'res.dat')) as res_file: 2571 results=res_file.readline().split() 2572 except IOError: 2573 if not error_found: 2574 error_found=True 2575 error_log=[] 2576 error_log.append(pjoin(job['dirname'],'log.txt')) 2577 continue 2578 job['resultABS']=float(results[0]) 2579 job['errorABS']=float(results[1]) 2580 job['result']=float(results[2]) 2581 job['error']=float(results[3]) 2582 job['niters_done']=int(results[4]) 2583 job['npoints_done']=int(results[5]) 2584 job['time_spend']=float(results[6]) 2585 job['err_percABS'] = job['errorABS']/job['resultABS']*100. 2586 job['err_perc'] = job['error']/job['result']*100. 2587 if error_found: 2588 raise aMCatNLOError('An error occurred during the collection of results.\n' + 2589 'Please check the .log files inside the directories which failed:\n' + 2590 '\n'.join(error_log)+'\n')
2591 2592 2593
2594 - def write_res_txt_file(self,jobs,integration_step):
2595 """writes the res.txt files in the SubProcess dir""" 2596 jobs.sort(key = lambda job: -job['errorABS']) 2597 content=[] 2598 content.append('\n\nCross section per integration channel:') 2599 for job in jobs: 2600 content.append('%(p_dir)20s %(channel)15s %(result)10.8e %(error)6.4e %(err_perc)6.4f%% ' % job) 2601 content.append('\n\nABS cross section per integration channel:') 2602 for job in jobs: 2603 content.append('%(p_dir)20s %(channel)15s %(resultABS)10.8e %(errorABS)6.4e %(err_percABS)6.4f%% ' % job) 2604 totABS=0 2605 errABS=0 2606 tot=0 2607 err=0 2608 for job in jobs: 2609 totABS+= job['resultABS']*job['wgt_frac'] 2610 errABS+= math.pow(job['errorABS'],2)*job['wgt_frac'] 2611 tot+= job['result']*job['wgt_frac'] 2612 err+= math.pow(job['error'],2)*job['wgt_frac'] 2613 if jobs: 2614 content.append('\nTotal ABS and \nTotal: \n %10.8e +- %6.4e (%6.4e%%)\n %10.8e +- %6.4e (%6.4e%%) \n' %\ 2615 (totABS, math.sqrt(errABS), math.sqrt(errABS)/totABS *100.,\ 2616 tot, math.sqrt(err), math.sqrt(err)/tot *100.)) 2617 with open(pjoin(self.me_dir,'SubProcesses','res_%s.txt' % integration_step),'w') as res_file: 2618 res_file.write('\n'.join(content)) 2619 randinit=self.get_randinit_seed() 2620 return {'xsect':tot,'xseca':totABS,'errt':math.sqrt(err),\ 2621 'erra':math.sqrt(errABS),'randinit':randinit}
2622 2623
2624 - def collect_scale_pdf_info(self,options,jobs):
2625 """read the scale_pdf_dependence.dat files and collects there results""" 2626 scale_pdf_info=[] 2627 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 2628 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1: 2629 evt_files=[] 2630 evt_wghts=[] 2631 for job in jobs: 2632 evt_files.append(pjoin(job['dirname'],'scale_pdf_dependence.dat')) 2633 evt_wghts.append(job['wgt_frac']) 2634 scale_pdf_info = self.pdf_scale_from_reweighting(evt_files,evt_wghts) 2635 return scale_pdf_info
2636 2637
2638 - def combine_plots_FO(self,folder_name,jobs):
2639 """combines the plots and puts then in the Events/run* directory""" 2640 devnull = open(os.devnull, 'w') 2641 2642 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 2643 topfiles = [] 2644 for job in jobs: 2645 if job['dirname'].endswith('.top'): 2646 topfiles.append(job['dirname']) 2647 else: 2648 topfiles.append(pjoin(job['dirname'],'MADatNLO.top')) 2649 misc.call(['./combine_plots_FO.sh'] + topfiles, \ 2650 stdout=devnull, 2651 cwd=pjoin(self.me_dir, 'SubProcesses')) 2652 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 2653 pjoin(self.me_dir, 'Events', self.run_name)) 2654 logger.info('The results of this run and the TopDrawer file with the plots' + \ 2655 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2656 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 2657 out=pjoin(self.me_dir,'Events',self.run_name,'MADatNLO') 2658 self.combine_plots_HwU(jobs,out) 2659 try: 2660 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 2661 stdout=devnull,stderr=devnull,\ 2662 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2663 except Exception: 2664 pass 2665 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 2666 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2667 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 2668 rootfiles = [] 2669 for job in jobs: 2670 if job['dirname'].endswith('.root'): 2671 rootfiles.append(job['dirname']) 2672 else: 2673 rootfiles.append(pjoin(job['dirname'],'MADatNLO.root')) 2674 misc.call(['./combine_root.sh'] + folder_name + rootfiles, \ 2675 stdout=devnull, 2676 cwd=pjoin(self.me_dir, 'SubProcesses')) 2677 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 2678 pjoin(self.me_dir, 'Events', self.run_name)) 2679 logger.info('The results of this run and the ROOT file with the plots' + \ 2680 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2681 elif self.analyse_card['fo_analysis_format'].lower() == 'lhe': 2682 self.combine_FO_lhe(jobs) 2683 logger.info('The results of this run and the LHE File (to be used for plotting only)' + \ 2684 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 2685 else: 2686 logger.info('The results of this run' + \ 2687 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name))
2688
2689 - def combine_FO_lhe(self,jobs):
2690 """combine the various lhe file generated in each directory. 2691 They are two steps: 2692 1) banner 2693 2) reweight each sample by the factor written at the end of each file 2694 3) concatenate each of the new files (gzip those). 2695 """ 2696 2697 logger.info('Combining lhe events for plotting analysis') 2698 start = time.time() 2699 self.run_card['fo_lhe_postprocessing'] = [i.lower() for i in self.run_card['fo_lhe_postprocessing']] 2700 output = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2701 if os.path.exists(output): 2702 os.remove(output) 2703 2704 2705 2706 2707 # 1. write the banner 2708 text = open(pjoin(jobs[0]['dirname'],'header.txt'),'r').read() 2709 i1, i2 = text.find('<initrwgt>'),text.find('</initrwgt>') 2710 self.banner['initrwgt'] = text[10+i1:i2] 2711 # 2712 # <init> 2713 # 2212 2212 6.500000e+03 6.500000e+03 0 0 247000 247000 -4 1 2714 # 8.430000e+02 2.132160e+00 8.430000e+02 1 2715 # <generator name='MadGraph5_aMC@NLO' version='2.5.2'>please cite 1405.0301 </generator> 2716 # </init> 2717 2718 cross = sum(j['result'] for j in jobs) 2719 error = math.sqrt(sum(j['error'] for j in jobs)) 2720 self.banner['init'] = "0 0 0e0 0e0 0 0 0 0 -4 1\n %s %s %s 1" % (cross, error, cross) 2721 self.banner.write(output[:-3], close_tag=False) 2722 misc.gzip(output[:-3]) 2723 2724 2725 2726 fsock = lhe_parser.EventFile(output,'a') 2727 if 'nogrouping' in self.run_card['fo_lhe_postprocessing']: 2728 fsock.eventgroup = False 2729 else: 2730 fsock.eventgroup = True 2731 2732 if 'norandom' in self.run_card['fo_lhe_postprocessing']: 2733 for job in jobs: 2734 dirname = job['dirname'] 2735 #read last line 2736 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2737 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2738 # get normalisation ratio 2739 ratio = cross/sumwgt 2740 lhe = lhe_parser.EventFile(pjoin(dirname,'events.lhe')) 2741 lhe.eventgroup = True # read the events by eventgroup 2742 for eventsgroup in lhe: 2743 neweventsgroup = [] 2744 for i,event in enumerate(eventsgroup): 2745 event.rescale_weights(ratio) 2746 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2747 and event == neweventsgroup[-1]: 2748 neweventsgroup[-1].wgt += event.wgt 2749 for key in event.reweight_data: 2750 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2751 else: 2752 neweventsgroup.append(event) 2753 fsock.write_events(neweventsgroup) 2754 lhe.close() 2755 os.remove(pjoin(dirname,'events.lhe')) 2756 else: 2757 lhe = [] 2758 lenlhe = [] 2759 misc.sprint('need to combine %s event file' % len(jobs)) 2760 globallhe = lhe_parser.MultiEventFile() 2761 globallhe.eventgroup = True 2762 for job in jobs: 2763 dirname = job['dirname'] 2764 lastline = misc.BackRead(pjoin(dirname,'events.lhe')).readline() 2765 nb_event, sumwgt, cross = [float(i) for i in lastline.split()] 2766 lastlhe = globallhe.add(pjoin(dirname,'events.lhe'),cross, 0, cross, 2767 nb_event=int(nb_event), scale=cross/sumwgt) 2768 for eventsgroup in globallhe: 2769 neweventsgroup = [] 2770 for i,event in enumerate(eventsgroup): 2771 event.rescale_weights(event.sample_scale) 2772 if i>0 and 'noidentification' not in self.run_card['fo_lhe_postprocessing'] \ 2773 and event == neweventsgroup[-1]: 2774 neweventsgroup[-1].wgt += event.wgt 2775 for key in event.reweight_data: 2776 neweventsgroup[-1].reweight_data[key] += event.reweight_data[key] 2777 else: 2778 neweventsgroup.append(event) 2779 fsock.write_events(neweventsgroup) 2780 globallhe.close() 2781 fsock.write('</LesHouchesEvents>\n') 2782 fsock.close() 2783 misc.sprint('combining lhe file done in ', time.time()-start) 2784 for job in jobs: 2785 dirname = job['dirname'] 2786 os.remove(pjoin(dirname,'events.lhe')) 2787 2788 2789 2790 misc.sprint('combining lhe file done in ', time.time()-start)
2791 2792 2793 2794 2795 2796
2797 - def combine_plots_HwU(self,jobs,out,normalisation=None):
2798 """Sums all the plots in the HwU format.""" 2799 logger.debug('Combining HwU plots.') 2800 2801 command = [sys.executable] 2802 command.append(pjoin(self.me_dir, 'bin', 'internal','histograms.py')) 2803 for job in jobs: 2804 if job['dirname'].endswith('.HwU'): 2805 command.append(job['dirname']) 2806 else: 2807 command.append(pjoin(job['dirname'],'MADatNLO.HwU')) 2808 command.append("--out="+out) 2809 command.append("--gnuplot") 2810 command.append("--band=[]") 2811 command.append("--lhapdf-config="+self.options['lhapdf']) 2812 if normalisation: 2813 command.append("--multiply="+(','.join([str(n) for n in normalisation]))) 2814 command.append("--sum") 2815 command.append("--keep_all_weights") 2816 command.append("--no_open") 2817 2818 p = misc.Popen(command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, cwd=self.me_dir) 2819 2820 while p.poll() is None: 2821 line = p.stdout.readline().decode(errors='ignore') 2822 #misc.sprint(type(line)) 2823 if any(t in line for t in ['INFO:','WARNING:','CRITICAL:','ERROR:','KEEP:']): 2824 print(line[:-1]) 2825 elif __debug__ and line: 2826 logger.debug(line[:-1])
2827 2828
2829 - def applgrid_combine(self,cross,error,jobs):
2830 """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories""" 2831 logger.debug('Combining APPLgrids \n') 2832 applcomb=pjoin(self.options['applgrid'].rstrip('applgrid-config'), 2833 'applgrid-combine') 2834 all_jobs=[] 2835 for job in jobs: 2836 all_jobs.append(job['dirname']) 2837 ngrids=len(all_jobs) 2838 nobs =len([name for name in os.listdir(all_jobs[0]) if name.endswith("_out.root")]) 2839 for obs in range(0,nobs): 2840 gdir = [pjoin(job,"grid_obs_"+str(obs)+"_out.root") for job in all_jobs] 2841 # combine APPLgrids from different channels for observable 'obs' 2842 if self.run_card["iappl"] == 1: 2843 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events",self.run_name, 2844 "aMCfast_obs_"+str(obs)+"_starting_grid.root"), '--optimise']+ gdir) 2845 elif self.run_card["iappl"] == 2: 2846 unc2_inv=pow(cross/error,2) 2847 unc2_inv_ngrids=pow(cross/error,2)*ngrids 2848 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events", 2849 self.run_name,"aMCfast_obs_"+str(obs)+".root"),'-s', 2850 str(unc2_inv),'--weight',str(unc2_inv)]+ gdir) 2851 for job in all_jobs: 2852 os.remove(pjoin(job,"grid_obs_"+str(obs)+"_in.root")) 2853 else: 2854 raise aMCatNLOError('iappl parameter can only be 0, 1 or 2') 2855 # after combining, delete the original grids 2856 for ggdir in gdir: 2857 os.remove(ggdir)
2858 2859
2860 - def applgrid_distribute(self,options,mode,p_dirs):
2861 """Distributes the APPLgrids ready to be filled by a second run of the code""" 2862 # if no appl_start_grid argument given, guess it from the time stamps 2863 # of the starting grid files 2864 if not('appl_start_grid' in list(options.keys()) and options['appl_start_grid']): 2865 gfiles = misc.glob(pjoin('*', 'aMCfast_obs_0_starting_grid.root'), 2866 pjoin(self.me_dir,'Events')) 2867 2868 time_stamps={} 2869 for root_file in gfiles: 2870 time_stamps[root_file]=os.path.getmtime(root_file) 2871 options['appl_start_grid']= \ 2872 max(six.iterkeys(time_stamps), key=(lambda key: 2873 time_stamps[key])).split('/')[-2] 2874 logger.info('No --appl_start_grid option given. '+\ 2875 'Guessing that start grid from run "%s" should be used.' \ 2876 % options['appl_start_grid']) 2877 2878 if 'appl_start_grid' in list(options.keys()) and options['appl_start_grid']: 2879 self.appl_start_grid = options['appl_start_grid'] 2880 start_grid_dir=pjoin(self.me_dir, 'Events', self.appl_start_grid) 2881 # check that this dir exists and at least one grid file is there 2882 if not os.path.exists(pjoin(start_grid_dir, 2883 'aMCfast_obs_0_starting_grid.root')): 2884 raise self.InvalidCmd('APPLgrid file not found: %s' % \ 2885 pjoin(start_grid_dir,'aMCfast_obs_0_starting_grid.root')) 2886 else: 2887 all_grids=[pjoin(start_grid_dir,name) for name in os.listdir( \ 2888 start_grid_dir) if name.endswith("_starting_grid.root")] 2889 nobs =len(all_grids) 2890 gstring=" ".join(all_grids) 2891 if not hasattr(self, 'appl_start_grid') or not self.appl_start_grid: 2892 raise self.InvalidCmd('No APPLgrid name currently defined.'+ 2893 'Please provide this information.') 2894 #copy the grid to all relevant directories 2895 for pdir in p_dirs: 2896 g_dirs = [file for file in os.listdir(pjoin(self.me_dir, 2897 "SubProcesses",pdir)) if file.startswith(mode+'_G') and 2898 os.path.isdir(pjoin(self.me_dir,"SubProcesses",pdir, file))] 2899 for g_dir in g_dirs: 2900 for grid in all_grids: 2901 obs=grid.split('_')[-3] 2902 files.cp(grid,pjoin(self.me_dir,"SubProcesses",pdir,g_dir, 2903 'grid_obs_'+obs+'_in.root'))
2904 2905 2906 2907
2908 - def collect_log_files(self, jobs, integration_step):
2909 """collect the log files and put them in a single, html-friendly file 2910 inside the Events/run_.../ directory""" 2911 log_file = pjoin(self.me_dir, 'Events', self.run_name, 2912 'alllogs_%d.html' % integration_step) 2913 outfile = open(log_file, 'w') 2914 2915 content = '' 2916 content += '<HTML><BODY>\n<font face="courier" size=2>' 2917 for job in jobs: 2918 # put an anchor 2919 log=pjoin(job['dirname'],'log_MINT%s.txt' % integration_step) 2920 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 2921 pjoin(self.me_dir,'SubProcesses'),'')) 2922 # and put some nice header 2923 content += '<font color="red">\n' 2924 content += '<br>LOG file for integration channel %s, %s <br>' % \ 2925 (os.path.dirname(log).replace(pjoin(self.me_dir, 2926 'SubProcesses'), ''), 2927 integration_step) 2928 content += '</font>\n' 2929 #then just flush the content of the small log inside the big log 2930 #the PRE tag prints everything verbatim 2931 with open(log) as l: 2932 content += '<PRE>\n' + l.read() + '\n</PRE>' 2933 content +='<br>\n' 2934 outfile.write(content) 2935 content='' 2936 2937 outfile.write('</font>\n</BODY></HTML>\n') 2938 outfile.close()
2939 2940
2941 - def finalise_run_FO(self,folder_name,jobs):
2942 """Combine the plots and put the res*.txt files in the Events/run.../ folder.""" 2943 # Copy the res_*.txt files to the Events/run* folder 2944 res_files = misc.glob('res_*.txt', pjoin(self.me_dir, 'SubProcesses')) 2945 for res_file in res_files: 2946 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 2947 # Collect the plots and put them in the Events/run* folder 2948 self.combine_plots_FO(folder_name,jobs) 2949 # If doing the applgrid-stuff, also combine those grids 2950 # and put those in the Events/run* folder 2951 if self.run_card['iappl'] != 0: 2952 cross=self.cross_sect_dict['xsect'] 2953 error=self.cross_sect_dict['errt'] 2954 self.applgrid_combine(cross,error,jobs)
2955 2956
2957 - def setup_cluster_or_multicore(self):
2958 """setup the number of cores for multicore, and the cluster-type for cluster runs""" 2959 if self.cluster_mode == 1: 2960 cluster_name = self.options['cluster_type'] 2961 try: 2962 self.cluster = cluster.from_name[cluster_name](**self.options) 2963 except KeyError: 2964 # Check if a plugin define this type of cluster 2965 # check for PLUGIN format 2966 cluster_class = misc.from_plugin_import(self.plugin_path, 2967 'new_cluster', cluster_name, 2968 info = 'cluster handling will be done with PLUGIN: %(plug)s' ) 2969 if cluster_class: 2970 self.cluster = cluster_class(**self.options) 2971 2972 if self.cluster_mode == 2: 2973 try: 2974 import multiprocessing 2975 if not self.nb_core: 2976 try: 2977 self.nb_core = int(self.options['nb_core']) 2978 except TypeError: 2979 self.nb_core = multiprocessing.cpu_count() 2980 logger.info('Using %d cores' % self.nb_core) 2981 except ImportError: 2982 self.nb_core = 1 2983 logger.warning('Impossible to detect the number of cores => Using One.\n'+ 2984 'Use set nb_core X in order to set this number and be able to'+ 2985 'run in multicore.') 2986 2987 self.cluster = cluster.MultiCore(**self.options)
2988 2989
2990 - def clean_previous_results(self,options,p_dirs,folder_name):
2991 """Clean previous results. 2992 o. If doing only the reweighting step, do not delete anything and return directlty. 2993 o. Always remove all the G*_* files (from split event generation). 2994 o. Remove the G* (or born_G* or all_G*) only when NOT doing only_generation or reweight_only.""" 2995 if options['reweightonly']: 2996 return 2997 if not options['only_generation']: 2998 self.update_status('Cleaning previous results', level=None) 2999 for dir in p_dirs: 3000 #find old folders to be removed 3001 for obj in folder_name: 3002 # list all the G* (or all_G* or born_G*) directories 3003 to_rm = [file for file in \ 3004 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3005 if file.startswith(obj[:-1]) and \ 3006 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3007 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3008 # list all the G*_* directories (from split event generation) 3009 to_always_rm = [file for file in \ 3010 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 3011 if file.startswith(obj[:-1]) and 3012 '_' in file and not '_G' in file and \ 3013 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 3014 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 3015 3016 if not options['only_generation']: 3017 to_always_rm.extend(to_rm) 3018 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 3019 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 3020 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 3021 return
3022 3023
3024 - def print_summary(self, options, step, mode, scale_pdf_info=[], done=True):
3025 """print a summary of the results contained in self.cross_sect_dict. 3026 step corresponds to the mintMC step, if =2 (i.e. after event generation) 3027 some additional infos are printed""" 3028 # find process name 3029 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 3030 process = '' 3031 for line in proc_card_lines: 3032 if line.startswith('generate') or line.startswith('add process'): 3033 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 3034 lpp = {0:'l', 1:'p', -1:'pbar', 2:'elastic photon from p', 3:'elastic photon from e'} 3035 if self.ninitial == 1: 3036 proc_info = '\n Process %s' % process[:-3] 3037 else: 3038 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 3039 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 3040 self.run_card['ebeam1'], self.run_card['ebeam2']) 3041 3042 if self.ninitial == 1: 3043 self.cross_sect_dict['unit']='GeV' 3044 self.cross_sect_dict['xsec_string']='(Partial) decay width' 3045 self.cross_sect_dict['axsec_string']='(Partial) abs(decay width)' 3046 else: 3047 self.cross_sect_dict['unit']='pb' 3048 self.cross_sect_dict['xsec_string']='Total cross section' 3049 self.cross_sect_dict['axsec_string']='Total abs(cross section)' 3050 if self.run_card['event_norm'].lower()=='bias': 3051 self.cross_sect_dict['xsec_string']+=', incl. bias (DO NOT USE)' 3052 3053 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3054 status = ['Determining the number of unweighted events per channel', 3055 'Updating the number of unweighted events per channel', 3056 'Summary:'] 3057 computed='(computed from LHE events)' 3058 elif mode in ['NLO', 'LO']: 3059 status = ['Results after grid setup:','Current results:', 3060 'Final results and run summary:'] 3061 computed='(computed from histogram information)' 3062 3063 if step != 2 and mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3064 message = status[step] + '\n\n Intermediate results:' + \ 3065 ('\n Random seed: %(randinit)d' + \ 3066 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' + \ 3067 '\n %(axsec_string)s: %(xseca)8.3e +- %(erra)6.1e %(unit)s \n') \ 3068 % self.cross_sect_dict 3069 elif mode in ['NLO','LO'] and not done: 3070 if step == 0: 3071 message = '\n ' + status[0] + \ 3072 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3073 self.cross_sect_dict 3074 else: 3075 message = '\n ' + status[1] + \ 3076 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3077 self.cross_sect_dict 3078 3079 else: 3080 message = '\n --------------------------------------------------------------' 3081 message = message + \ 3082 '\n ' + status[2] + proc_info 3083 if mode not in ['LO', 'NLO']: 3084 message = message + \ 3085 '\n Number of events generated: %s' % self.run_card['nevents'] 3086 message = message + \ 3087 '\n %(xsec_string)s: %(xsect)8.3e +- %(errt)6.1e %(unit)s' % \ 3088 self.cross_sect_dict 3089 message = message + \ 3090 '\n --------------------------------------------------------------' 3091 if scale_pdf_info and (self.run_card['nevents']>=10000 or mode in ['NLO', 'LO']): 3092 if scale_pdf_info[0]: 3093 # scale uncertainties 3094 message = message + '\n Scale variation %s:' % computed 3095 for s in scale_pdf_info[0]: 3096 if s['unc']: 3097 if self.run_card['ickkw'] != -1: 3098 message = message + \ 3099 ('\n Dynamical_scale_choice %(label)i (envelope of %(size)s values): '\ 3100 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % s 3101 else: 3102 message = message + \ 3103 ('\n Soft and hard scale dependence (added in quadrature): '\ 3104 '\n %(cen)8.3e pb +%(max_q)0.1f%% -%(min_q)0.1f%%') % s 3105 3106 else: 3107 message = message + \ 3108 ('\n Dynamical_scale_choice %(label)i: '\ 3109 '\n %(cen)8.3e pb') % s 3110 3111 if scale_pdf_info[1]: 3112 message = message + '\n PDF variation %s:' % computed 3113 for p in scale_pdf_info[1]: 3114 if p['unc']=='none': 3115 message = message + \ 3116 ('\n %(name)s (central value only): '\ 3117 '\n %(cen)8.3e pb') % p 3118 3119 elif p['unc']=='unknown': 3120 message = message + \ 3121 ('\n %(name)s (%(size)s members; combination method unknown): '\ 3122 '\n %(cen)8.3e pb') % p 3123 else: 3124 message = message + \ 3125 ('\n %(name)s (%(size)s members; using %(unc)s method): '\ 3126 '\n %(cen)8.3e pb +%(max)0.1f%% -%(min)0.1f%%') % p 3127 # pdf uncertainties 3128 message = message + \ 3129 '\n --------------------------------------------------------------' 3130 3131 3132 if (mode in ['NLO', 'LO'] and not done) or \ 3133 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 3134 logger.info(message+'\n') 3135 return 3136 3137 # Some advanced general statistics are shown in the debug message at the 3138 # end of the run 3139 # Make sure it never stops a run 3140 # Gather some basic statistics for the run and extracted from the log files. 3141 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 3142 log_GV_files = misc.glob(pjoin('P*','G*','log_MINT*.txt'), 3143 pjoin(self.me_dir, 'SubProcesses')) 3144 all_log_files = log_GV_files 3145 elif mode == 'NLO': 3146 log_GV_files = misc.glob(pjoin('P*','all_G*','log_MINT*.txt'), 3147 pjoin(self.me_dir, 'SubProcesses')) 3148 all_log_files = log_GV_files 3149 3150 elif mode == 'LO': 3151 log_GV_files = '' 3152 all_log_files = misc.glob(pjoin('P*','born_G*','log_MINT*.txt'), 3153 pjoin(self.me_dir, 'SubProcesses')) 3154 else: 3155 raise aMCatNLOError('Running mode %s not supported.'%mode) 3156 3157 try: 3158 message, debug_msg = \ 3159 self.compile_advanced_stats(log_GV_files, all_log_files, message) 3160 except Exception as e: 3161 debug_msg = 'Advanced statistics collection failed with error "%s"\n'%str(e) 3162 err_string = StringIO.StringIO() 3163 traceback.print_exc(limit=4, file=err_string) 3164 debug_msg += 'Please report this backtrace to a MadGraph developer:\n%s'\ 3165 %err_string.getvalue() 3166 3167 logger.debug(debug_msg+'\n') 3168 logger.info(message+'\n') 3169 3170 # Now copy relevant information in the Events/Run_<xxx> directory 3171 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 3172 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 3173 open(pjoin(evt_path, '.full_summary.txt'), 3174 'w').write(message+'\n\n'+debug_msg+'\n') 3175 3176 self.archive_files(evt_path,mode)
3177
3178 - def archive_files(self, evt_path, mode):
3179 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 3180 the run.""" 3181 3182 files_to_arxiv = [pjoin('Cards','param_card.dat'), 3183 pjoin('Cards','MadLoopParams.dat'), 3184 pjoin('Cards','FKS_params.dat'), 3185 pjoin('Cards','run_card.dat'), 3186 pjoin('Subprocesses','setscales.f'), 3187 pjoin('Subprocesses','cuts.f')] 3188 3189 if mode in ['NLO', 'LO']: 3190 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 3191 3192 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 3193 os.mkdir(pjoin(evt_path,'RunMaterial')) 3194 3195 for path in files_to_arxiv: 3196 if os.path.isfile(pjoin(self.me_dir,path)): 3197 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 3198 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 3199 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
3200
3201 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
3202 """ This functions goes through the log files given in arguments and 3203 compiles statistics about MadLoop stability, virtual integration 3204 optimization and detection of potential error messages into a nice 3205 debug message to printed at the end of the run """ 3206 3207 def safe_float(str_float): 3208 try: 3209 return float(str_float) 3210 except ValueError: 3211 logger.debug('Could not convert the following float during'+ 3212 ' advanced statistics printout: %s'%str(str_float)) 3213 return -1.0
3214 3215 3216 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 3217 # > Errors is a list of tuples with this format (log_file,nErrors) 3218 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 3219 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 3220 3221 # ================================== 3222 # == MadLoop stability statistics == 3223 # ================================== 3224 3225 # Recuperate the fraction of unstable PS points found in the runs for 3226 # the virtuals 3227 UPS_stat_finder = re.compile( 3228 r"Satistics from MadLoop:.*"+\ 3229 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 3230 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 3231 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 3232 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 3233 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 3234 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 3235 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 3236 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 3237 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 3238 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 3239 3240 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 3241 1 : 'CutTools (double precision)', 3242 2 : 'PJFry++', 3243 3 : 'IREGI', 3244 4 : 'Golem95', 3245 5 : 'Samurai', 3246 6 : 'Ninja (double precision)', 3247 7 : 'COLLIER', 3248 8 : 'Ninja (quadruple precision)', 3249 9 : 'CutTools (quadruple precision)'} 3250 RetUnit_finder =re.compile( 3251 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 3252 #Unit 3253 3254 for gv_log in log_GV_files: 3255 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 3256 log=open(gv_log,'r').read() 3257 UPS_stats = re.search(UPS_stat_finder,log) 3258 for retunit_stats in re.finditer(RetUnit_finder, log): 3259 if channel_name not in list(stats['UPS'].keys()): 3260 stats['UPS'][channel_name] = [0]*10+[[0]*10] 3261 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 3262 += int(retunit_stats.group('n_occurences')) 3263 if not UPS_stats is None: 3264 try: 3265 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 3266 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 3267 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 3268 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 3269 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 3270 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 3271 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 3272 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 3273 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 3274 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 3275 except KeyError: 3276 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 3277 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 3278 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 3279 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 3280 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 3281 int(UPS_stats.group('n10')),[0]*10] 3282 debug_msg = "" 3283 if len(list(stats['UPS'].keys()))>0: 3284 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 3285 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 3286 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 3287 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 3288 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 3289 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 3290 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 3291 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 3292 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 3293 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 3294 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 3295 for i in range(10)] 3296 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 3297 safe_float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 3298 maxUPS = max(UPSfracs, key = lambda w: w[1]) 3299 3300 tmpStr = "" 3301 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 3302 tmpStr += '\n Stability unknown: %d'%nTotsun 3303 tmpStr += '\n Stable PS point: %d'%nTotsps 3304 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 3305 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 3306 tmpStr += '\n Only double precision used: %d'%nTotddp 3307 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 3308 tmpStr += '\n Initialization phase-space points: %d'%nTotini 3309 tmpStr += '\n Reduction methods used:' 3310 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 3311 unit_code_meaning.keys() if nTot1[i]>0] 3312 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 3313 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 3314 if nTot100 != 0: 3315 debug_msg += '\n Unknown return code (100): %d'%nTot100 3316 if nTot10 != 0: 3317 debug_msg += '\n Unknown return code (10): %d'%nTot10 3318 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 3319 not in list(unit_code_meaning.keys())) 3320 if nUnknownUnit != 0: 3321 debug_msg += '\n Unknown return code (1): %d'\ 3322 %nUnknownUnit 3323 3324 if maxUPS[1]>0.001: 3325 message += tmpStr 3326 message += '\n Total number of unstable PS point detected:'+\ 3327 ' %d (%4.2f%%)'%(nToteps,safe_float(100*nToteps)/nTotPS) 3328 message += '\n Maximum fraction of UPS points in '+\ 3329 'channel %s (%4.2f%%)'%maxUPS 3330 message += '\n Please report this to the authors while '+\ 3331 'providing the file' 3332 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 3333 maxUPS[0],'UPS.log')) 3334 else: 3335 debug_msg += tmpStr 3336 3337 3338 # ==================================================== 3339 # == aMC@NLO virtual integration optimization stats == 3340 # ==================================================== 3341 3342 virt_tricks_finder = re.compile( 3343 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 3344 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 3345 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 3346 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 3347 3348 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 3349 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*-?(?P<v_average>[\d\+-Eed\.]*)") 3350 3351 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 3352 3353 channel_contr_list = {} 3354 for gv_log in log_GV_files: 3355 logfile=open(gv_log,'r') 3356 log = logfile.read() 3357 logfile.close() 3358 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3359 vf_stats = None 3360 for vf_stats in re.finditer(virt_frac_finder, log): 3361 pass 3362 if not vf_stats is None: 3363 v_frac = safe_float(vf_stats.group('v_frac')) 3364 v_average = safe_float(vf_stats.group('v_average')) 3365 try: 3366 if v_frac < stats['virt_stats']['v_frac_min'][0]: 3367 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 3368 if v_frac > stats['virt_stats']['v_frac_max'][0]: 3369 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 3370 stats['virt_stats']['v_frac_avg'][0] += v_frac 3371 stats['virt_stats']['v_frac_avg'][1] += 1 3372 except KeyError: 3373 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 3374 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 3375 stats['virt_stats']['v_frac_avg']=[v_frac,1] 3376 3377 3378 ccontr_stats = None 3379 for ccontr_stats in re.finditer(channel_contr_finder, log): 3380 pass 3381 if not ccontr_stats is None: 3382 contrib = safe_float(ccontr_stats.group('v_contr')) 3383 try: 3384 if contrib>channel_contr_list[channel_name]: 3385 channel_contr_list[channel_name]=contrib 3386 except KeyError: 3387 channel_contr_list[channel_name]=contrib 3388 3389 3390 # Now build the list of relevant virt log files to look for the maxima 3391 # of virt fractions and such. 3392 average_contrib = 0.0 3393 for value in channel_contr_list.values(): 3394 average_contrib += value 3395 if len(list(channel_contr_list.values())) !=0: 3396 average_contrib = average_contrib / len(list(channel_contr_list.values())) 3397 3398 relevant_log_GV_files = [] 3399 excluded_channels = set([]) 3400 all_channels = set([]) 3401 for log_file in log_GV_files: 3402 channel_name = '/'.join(log_file.split('/')[-3:-1]) 3403 all_channels.add(channel_name) 3404 try: 3405 if channel_contr_list[channel_name] > (0.1*average_contrib): 3406 relevant_log_GV_files.append(log_file) 3407 else: 3408 excluded_channels.add(channel_name) 3409 except KeyError: 3410 relevant_log_GV_files.append(log_file) 3411 3412 # Now we want to use the latest occurence of accumulated result in the log file 3413 for gv_log in relevant_log_GV_files: 3414 logfile=open(gv_log,'r') 3415 log = logfile.read() 3416 logfile.close() 3417 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 3418 3419 vt_stats = None 3420 for vt_stats in re.finditer(virt_tricks_finder, log): 3421 pass 3422 if not vt_stats is None: 3423 vt_stats_group = vt_stats.groupdict() 3424 v_ratio = safe_float(vt_stats.group('v_ratio')) 3425 v_ratio_err = safe_float(vt_stats.group('v_ratio_err')) 3426 v_contr = safe_float(vt_stats.group('v_abs_contr')) 3427 v_contr_err = safe_float(vt_stats.group('v_abs_contr_err')) 3428 try: 3429 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 3430 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 3431 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 3432 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 3433 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 3434 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 3435 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 3436 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 3437 if v_contr < stats['virt_stats']['v_contr_min'][0]: 3438 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 3439 if v_contr > stats['virt_stats']['v_contr_max'][0]: 3440 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 3441 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 3442 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 3443 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 3444 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 3445 except KeyError: 3446 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 3447 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 3448 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 3449 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 3450 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 3451 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 3452 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 3453 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 3454 3455 vf_stats = None 3456 for vf_stats in re.finditer(virt_frac_finder, log): 3457 pass 3458 if not vf_stats is None: 3459 v_frac = safe_float(vf_stats.group('v_frac')) 3460 v_average = safe_float(vf_stats.group('v_average')) 3461 try: 3462 if v_average < stats['virt_stats']['v_average_min'][0]: 3463 stats['virt_stats']['v_average_min']=(v_average,channel_name) 3464 if v_average > stats['virt_stats']['v_average_max'][0]: 3465 stats['virt_stats']['v_average_max']=(v_average,channel_name) 3466 stats['virt_stats']['v_average_avg'][0] += v_average 3467 stats['virt_stats']['v_average_avg'][1] += 1 3468 except KeyError: 3469 stats['virt_stats']['v_average_min']=[v_average,channel_name] 3470 stats['virt_stats']['v_average_max']=[v_average,channel_name] 3471 stats['virt_stats']['v_average_avg']=[v_average,1] 3472 3473 try: 3474 debug_msg += '\n\n Statistics on virtual integration optimization : ' 3475 3476 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 3477 %tuple(stats['virt_stats']['v_frac_max']) 3478 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 3479 %tuple(stats['virt_stats']['v_frac_min']) 3480 debug_msg += '\n Average virt fraction computed %.3f'\ 3481 %safe_float(stats['virt_stats']['v_frac_avg'][0]/safe_float(stats['virt_stats']['v_frac_avg'][1])) 3482 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 3483 (len(excluded_channels),len(all_channels)) 3484 debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 3485 %tuple(stats['virt_stats']['v_average_max']) 3486 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 3487 %tuple(stats['virt_stats']['v_ratio_max']) 3488 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 3489 %tuple(stats['virt_stats']['v_ratio_err_max']) 3490 debug_msg += tmpStr 3491 # After all it was decided that it is better not to alarm the user unecessarily 3492 # with such printout of the statistics. 3493 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 3494 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3495 # message += "\n Suspiciously large MC error in :" 3496 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 3497 # message += tmpStr 3498 3499 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 3500 %tuple(stats['virt_stats']['v_contr_err_max']) 3501 debug_msg += tmpStr 3502 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 3503 # message += tmpStr 3504 3505 3506 except KeyError: 3507 debug_msg += '\n Could not find statistics on the integration optimization. ' 3508 3509 # ======================================= 3510 # == aMC@NLO timing profile statistics == 3511 # ======================================= 3512 3513 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 3514 "(?P<time>[\d\+-Eed\.]*)\s*") 3515 3516 for logf in log_GV_files: 3517 logfile=open(logf,'r') 3518 log = logfile.read() 3519 logfile.close() 3520 channel_name = '/'.join(logf.split('/')[-3:-1]) 3521 mint = re.search(mint_search,logf) 3522 if not mint is None: 3523 channel_name = channel_name+' [step %s]'%mint.group('ID') 3524 3525 for time_stats in re.finditer(timing_stat_finder, log): 3526 try: 3527 stats['timings'][time_stats.group('name')][channel_name]+=\ 3528 safe_float(time_stats.group('time')) 3529 except KeyError: 3530 if time_stats.group('name') not in list(stats['timings'].keys()): 3531 stats['timings'][time_stats.group('name')] = {} 3532 stats['timings'][time_stats.group('name')][channel_name]=\ 3533 safe_float(time_stats.group('time')) 3534 3535 # useful inline function 3536 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 3537 try: 3538 totTimeList = [(time, chan) for chan, time in \ 3539 stats['timings']['Total'].items()] 3540 except KeyError: 3541 totTimeList = [] 3542 3543 totTimeList.sort() 3544 if len(totTimeList)>0: 3545 debug_msg += '\n\n Inclusive timing profile :' 3546 debug_msg += '\n Overall slowest channel %s (%s)'%\ 3547 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 3548 debug_msg += '\n Average channel running time %s'%\ 3549 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 3550 debug_msg += '\n Aggregated total running time %s'%\ 3551 Tstr(sum([el[0] for el in totTimeList])) 3552 else: 3553 debug_msg += '\n\n Inclusive timing profile non available.' 3554 3555 sorted_keys = sorted(list(stats['timings'].keys()), key= lambda stat: \ 3556 sum(stats['timings'][stat].values()), reverse=True) 3557 for name in sorted_keys: 3558 if name=='Total': 3559 continue 3560 if sum(stats['timings'][name].values())<=0.0: 3561 debug_msg += '\n Zero time record for %s.'%name 3562 continue 3563 try: 3564 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 3565 chan) for chan, time in stats['timings'][name].items()] 3566 except KeyError as ZeroDivisionError: 3567 debug_msg += '\n\n Timing profile for %s unavailable.'%name 3568 continue 3569 TimeList.sort() 3570 debug_msg += '\n Timing profile for <%s> :'%name 3571 try: 3572 debug_msg += '\n Overall fraction of time %.3f %%'%\ 3573 safe_float((100.0*(sum(stats['timings'][name].values())/ 3574 sum(stats['timings']['Total'].values())))) 3575 except KeyError as ZeroDivisionError: 3576 debug_msg += '\n Overall fraction of time unavailable.' 3577 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 3578 (TimeList[-1][0],TimeList[-1][1]) 3579 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 3580 (TimeList[0][0],TimeList[0][1]) 3581 3582 # ============================= 3583 # == log file eror detection == 3584 # ============================= 3585 3586 # Find the number of potential errors found in all log files 3587 # This re is a simple match on a case-insensitve 'error' but there is 3588 # also some veto added for excluding the sentence 3589 # "See Section 6 of paper for error calculation." 3590 # which appear in the header of lhapdf in the logs. 3591 err_finder = re.compile(\ 3592 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 3593 for log in all_log_files: 3594 logfile=open(log,'r') 3595 nErrors = len(re.findall(err_finder, logfile.read())) 3596 logfile.close() 3597 if nErrors != 0: 3598 stats['Errors'].append((str(log),nErrors)) 3599 3600 nErrors = sum([err[1] for err in stats['Errors']],0) 3601 if nErrors != 0: 3602 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 3603 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 3604 'found in the following log file%s:'%('s' if \ 3605 len(stats['Errors'])>1 else '') 3606 for error in stats['Errors'][:3]: 3607 log_name = '/'.join(error[0].split('/')[-5:]) 3608 debug_msg += '\n > %d error%s in %s'%\ 3609 (error[1],'s' if error[1]>1 else '',log_name) 3610 if len(stats['Errors'])>3: 3611 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 3612 nRemainingLogs = len(stats['Errors'])-3 3613 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 3614 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 3615 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 3616 3617 return message, debug_msg 3618 3619
3620 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
3621 """this function calls the reweighting routines and creates the event file in the 3622 Event dir. Return the name of the event file created 3623 """ 3624 scale_pdf_info=[] 3625 if any(self.run_card['reweight_scale']) or any(self.run_card['reweight_PDF']) or \ 3626 len(self.run_card['dynamical_scale_choice']) > 1 or len(self.run_card['lhaid']) > 1\ 3627 or self.run_card['store_rwgt_info']: 3628 scale_pdf_info = self.run_reweight(options['reweightonly']) 3629 self.update_status('Collecting events', level='parton', update_results=True) 3630 misc.compile(['collect_events'], 3631 cwd=pjoin(self.me_dir, 'SubProcesses'), nocompile=options['nocompile']) 3632 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 3633 stdin=subprocess.PIPE, 3634 stdout=subprocess.PIPE, 3635 stderr=subprocess.PIPE) 3636 if event_norm.lower() == 'sum': 3637 out, err = p.communicate(input = '1\n'.encode()) 3638 elif event_norm.lower() == 'unity': 3639 out, err = p.communicate(input = '3\n'.encode()) 3640 elif event_norm.lower() == 'bias': 3641 out, err = p.communicate(input = '0\n'.encode()) 3642 else: 3643 out, err = p.communicate(input = '2\n'.encode()) 3644 3645 out = out.decode(errors='ignore') 3646 data = str(out) 3647 #get filename from collect events 3648 filename = data.split()[-1].strip().replace('\\n','').replace('"','').replace("'",'') 3649 3650 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 3651 raise aMCatNLOError('An error occurred during event generation. ' + \ 3652 'The event file has not been created: \n %s' % data) 3653 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 3654 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 3655 if not options['reweightonly']: 3656 self.print_summary(options, 2, mode, scale_pdf_info) 3657 res_files = misc.glob('res*.txt', pjoin(self.me_dir, 'SubProcesses')) 3658 for res_file in res_files: 3659 files.mv(res_file,pjoin(self.me_dir, 'Events', self.run_name)) 3660 3661 logger.info('The %s file has been generated.\n' % (evt_file)) 3662 self.results.add_detail('nb_event', nevents) 3663 self.update_status('Events generated', level='parton', update_results=True) 3664 return evt_file[:-3]
3665 3666
3667 - def run_mcatnlo(self, evt_file, options):
3668 """runs mcatnlo on the generated event file, to produce showered-events 3669 """ 3670 logger.info('Preparing MCatNLO run') 3671 try: 3672 misc.gunzip(evt_file) 3673 except Exception: 3674 pass 3675 3676 self.banner = banner_mod.Banner(evt_file) 3677 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 3678 3679 #check that the number of split event files divides the number of 3680 # events, otherwise set it to 1 3681 if int(self.banner.get_detail('run_card', 'nevents') / \ 3682 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 3683 != self.banner.get_detail('run_card', 'nevents'): 3684 logger.warning(\ 3685 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 3686 'Setting it to 1.') 3687 self.shower_card['nsplit_jobs'] = 1 3688 3689 # don't split jobs if the user asks to shower only a part of the events 3690 if self.shower_card['nevents'] > 0 and \ 3691 self.shower_card['nevents'] < self.banner.get_detail('run_card', 'nevents') and \ 3692 self.shower_card['nsplit_jobs'] != 1: 3693 logger.warning(\ 3694 'Only a part of the events will be showered.\n' + \ 3695 'Setting nsplit_jobs in the shower_card to 1.') 3696 self.shower_card['nsplit_jobs'] = 1 3697 3698 self.banner_to_mcatnlo(evt_file) 3699 3700 # if fastjet has to be linked (in extralibs) then 3701 # add lib /include dirs for fastjet if fastjet-config is present on the 3702 # system, otherwise add fjcore to the files to combine 3703 if 'fastjet' in self.shower_card['extralibs']: 3704 #first, check that stdc++ is also linked 3705 if not 'stdc++' in self.shower_card['extralibs']: 3706 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 3707 self.shower_card['extralibs'] += ' stdc++' 3708 # then check if options[fastjet] corresponds to a valid fj installation 3709 try: 3710 #this is for a complete fj installation 3711 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 3712 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3713 output, error = p.communicate() 3714 #remove the line break from output (last character) 3715 output = output.decode(errors='ignore')[:-1] 3716 # add lib/include paths 3717 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 3718 logger.warning('Linking FastJet: updating EXTRAPATHS') 3719 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 3720 if not pjoin(output, 'include') in self.shower_card['includepaths']: 3721 logger.warning('Linking FastJet: updating INCLUDEPATHS') 3722 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 3723 # to be changed in the fortran wrapper 3724 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 3725 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 3726 except Exception: 3727 logger.warning('Linking FastJet: using fjcore') 3728 # this is for FJcore, so no FJ library has to be linked 3729 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 3730 if not 'fjcore.o' in self.shower_card['analyse']: 3731 self.shower_card['analyse'] += ' fjcore.o' 3732 # to be changed in the fortran wrapper 3733 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 3734 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 3735 # change the fortran wrapper with the correct namespaces/include 3736 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 3737 for line in fjwrapper_lines: 3738 if '//INCLUDE_FJ' in line: 3739 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 3740 if '//NAMESPACE_FJ' in line: 3741 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 3742 with open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w') as fsock: 3743 fsock.write('\n'.join(fjwrapper_lines) + '\n') 3744 3745 extrapaths = self.shower_card['extrapaths'].split() 3746 3747 # check that the path needed by HW++ and PY8 are set if one uses these shower 3748 if shower in ['HERWIGPP', 'PYTHIA8']: 3749 path_dict = {'HERWIGPP': ['hepmc_path', 3750 'thepeg_path', 3751 'hwpp_path'], 3752 'PYTHIA8': ['pythia8_path']} 3753 3754 if not all([self.options[ppath] and os.path.exists(self.options[ppath]) for ppath in path_dict[shower]]): 3755 raise aMCatNLOError('Some paths are missing or invalid in the configuration file.\n' + \ 3756 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 3757 3758 if shower == 'HERWIGPP': 3759 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 3760 self.shower_card['extrapaths'] += ' %s' % pjoin(self.options['hepmc_path'], 'lib') 3761 3762 # add the HEPMC path of the pythia8 installation 3763 if shower == 'PYTHIA8': 3764 hepmc = subprocess.Popen([pjoin(self.options['pythia8_path'], 'bin', 'pythia8-config'), '--hepmc2'], 3765 stdout = subprocess.PIPE).stdout.read().decode(errors='ignore').strip() 3766 #this gives all the flags, i.e. 3767 #-I/Path/to/HepMC/include -L/Path/to/HepMC/lib -lHepMC 3768 # we just need the path to the HepMC libraries 3769 extrapaths.append(hepmc.split()[1].replace('-L', '')) 3770 3771 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3772 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 3773 3774 # set the PATH for the dynamic libraries 3775 if sys.platform == 'darwin': 3776 ld_library_path = 'DYLD_LIBRARY_PATH' 3777 else: 3778 ld_library_path = 'LD_LIBRARY_PATH' 3779 if ld_library_path in list(os.environ.keys()): 3780 paths = os.environ[ld_library_path] 3781 else: 3782 paths = '' 3783 paths += ':' + ':'.join(extrapaths) 3784 os.putenv(ld_library_path, paths) 3785 3786 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 3787 self.shower_card.write_card(shower, shower_card_path) 3788 3789 # overwrite if shower_card_set.dat exists in MCatNLO 3790 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 3791 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 3792 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 3793 3794 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 3795 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 3796 3797 3798 # libdl may be needded for pythia 82xx 3799 #if shower == 'PYTHIA8' and not \ 3800 # os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')) and \ 3801 # 'dl' not in self.shower_card['extralibs'].split(): 3802 # # 'dl' has to be linked with the extralibs 3803 # self.shower_card['extralibs'] += ' dl' 3804 # logger.warning("'dl' was added to extralibs from the shower_card.dat.\n" + \ 3805 # "It is needed for the correct running of PY8.2xx.\n" + \ 3806 # "If this library cannot be found on your system, a crash will occur.") 3807 3808 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 3809 stderr=open(mcatnlo_log, 'w'), 3810 cwd=pjoin(self.me_dir, 'MCatNLO'), 3811 close_fds=True) 3812 3813 exe = 'MCATNLO_%s_EXE' % shower 3814 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 3815 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 3816 print(open(mcatnlo_log).read()) 3817 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 3818 logger.info(' ... done') 3819 3820 # create an empty dir where to run 3821 count = 1 3822 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3823 (shower, count))): 3824 count += 1 3825 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 3826 (shower, count)) 3827 os.mkdir(rundir) 3828 files.cp(shower_card_path, rundir) 3829 3830 #look for the event files (don't resplit if one asks for the 3831 # same number of event files as in the previous run) 3832 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3833 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 3834 logger.info('Cleaning old files and splitting the event file...') 3835 #clean the old files 3836 files.rm([f for f in event_files if 'events.lhe' not in f]) 3837 if self.shower_card['nsplit_jobs'] > 1: 3838 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities'), nocompile=options['nocompile']) 3839 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 3840 stdin=subprocess.PIPE, 3841 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 3842 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 3843 p.communicate(input = ('events.lhe\n%d\n' % self.shower_card['nsplit_jobs']).encode()) 3844 logger.info('Splitting done.') 3845 event_files = misc.glob('events_*.lhe', pjoin(self.me_dir, 'Events', self.run_name)) 3846 3847 event_files.sort() 3848 3849 self.update_status('Showering events...', level='shower') 3850 logger.info('(Running in %s)' % rundir) 3851 if shower != 'PYTHIA8': 3852 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 3853 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 3854 else: 3855 # special treatment for pythia8 3856 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 3857 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 3858 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): # this is PY8.1xxx 3859 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 3860 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 3861 else: # this is PY8.2xxx 3862 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 3863 #link the hwpp exe in the rundir 3864 if shower == 'HERWIGPP': 3865 try: 3866 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 3867 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 3868 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 3869 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig'), rundir) 3870 except Exception: 3871 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 3872 3873 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 3874 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 3875 3876 files.ln(evt_file, rundir, 'events.lhe') 3877 for i, f in enumerate(event_files): 3878 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 3879 3880 if not self.shower_card['analyse']: 3881 # an hep/hepmc file as output 3882 out_id = 'HEP' 3883 else: 3884 # one or more .top file(s) as output 3885 if "HwU" in self.shower_card['analyse']: 3886 out_id = 'HWU' 3887 else: 3888 out_id = 'TOP' 3889 3890 # write the executable 3891 with open(pjoin(rundir, 'shower.sh'), 'w') as fsock: 3892 # set the PATH for the dynamic libraries 3893 if sys.platform == 'darwin': 3894 ld_library_path = 'DYLD_LIBRARY_PATH' 3895 else: 3896 ld_library_path = 'LD_LIBRARY_PATH' 3897 fsock.write(open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 3898 % {'ld_library_path': ld_library_path, 3899 'extralibs': ':'.join(extrapaths)}) 3900 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 3901 3902 if event_files: 3903 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 3904 for i in range(len(event_files))] 3905 else: 3906 arg_list = [[shower, out_id, self.run_name]] 3907 3908 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 3909 self.njobs = 1 3910 self.wait_for_complete('shower') 3911 3912 # now collect the results 3913 message = '' 3914 warning = '' 3915 to_gzip = [evt_file] 3916 if out_id == 'HEP': 3917 #copy the showered stdhep/hepmc file back in events 3918 if shower in ['PYTHIA8', 'HERWIGPP']: 3919 hep_format = 'HEPMC' 3920 ext = 'hepmc' 3921 else: 3922 hep_format = 'StdHEP' 3923 ext = 'hep' 3924 3925 hep_file = '%s_%s_0.%s.gz' % \ 3926 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 3927 count = 0 3928 3929 # find the first available name for the output: 3930 # check existing results with or without event splitting 3931 while os.path.exists(hep_file) or \ 3932 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 3933 count +=1 3934 hep_file = '%s_%s_%d.%s.gz' % \ 3935 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 3936 3937 try: 3938 if self.shower_card['nsplit_jobs'] == 1: 3939 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 3940 message = ('The file %s has been generated. \nIt contains showered' + \ 3941 ' and hadronized events in the %s format obtained' + \ 3942 ' showering the parton-level event file %s.gz with %s') % \ 3943 (hep_file, hep_format, evt_file, shower) 3944 else: 3945 hep_list = [] 3946 for i in range(self.shower_card['nsplit_jobs']): 3947 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 3948 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 3949 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 3950 ' and hadronized events in the %s format obtained' + \ 3951 ' showering the (split) parton-level event file %s.gz with %s') % \ 3952 ('\n '.join(hep_list), hep_format, evt_file, shower) 3953 3954 except OSError as IOError: 3955 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 3956 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 3957 3958 # run the plot creation in a secure way 3959 if hep_format == 'StdHEP': 3960 try: 3961 self.do_plot('%s -f' % self.run_name) 3962 except Exception as error: 3963 logger.info("Fail to make the plot. Continue...") 3964 pass 3965 3966 elif out_id == 'TOP' or out_id == 'HWU': 3967 #copy the topdrawer or HwU file(s) back in events 3968 if out_id=='TOP': 3969 ext='top' 3970 elif out_id=='HWU': 3971 ext='HwU' 3972 topfiles = [] 3973 top_tars = [tarfile.TarFile(f) for f in misc.glob('histfile*.tar', rundir)] 3974 for top_tar in top_tars: 3975 topfiles.extend(top_tar.getnames()) 3976 3977 # safety check 3978 if len(top_tars) != self.shower_card['nsplit_jobs']: 3979 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 3980 (self.shower_card['nsplit_jobs'], len(top_tars))) 3981 3982 # find the first available name for the output: 3983 # check existing results with or without event splitting 3984 filename = 'plot_%s_%d_' % (shower, 1) 3985 count = 1 3986 while os.path.exists(pjoin(self.me_dir, 'Events', 3987 self.run_name, '%s0.%s' % (filename,ext))) or \ 3988 os.path.exists(pjoin(self.me_dir, 'Events', 3989 self.run_name, '%s0__1.%s' % (filename,ext))): 3990 count += 1 3991 filename = 'plot_%s_%d_' % (shower, count) 3992 3993 if out_id=='TOP': 3994 hist_format='TopDrawer format' 3995 elif out_id=='HWU': 3996 hist_format='HwU and GnuPlot formats' 3997 3998 if not topfiles: 3999 # if no topfiles are found just warn the user 4000 warning = 'No .top file has been generated. For the results of your ' +\ 4001 'run, please check inside %s' % rundir 4002 elif self.shower_card['nsplit_jobs'] == 1: 4003 # only one job for the shower 4004 top_tars[0].extractall(path = rundir) 4005 plotfiles = [] 4006 for i, file in enumerate(topfiles): 4007 if out_id=='TOP': 4008 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4009 '%s%d.top' % (filename, i)) 4010 files.mv(pjoin(rundir, file), plotfile) 4011 elif out_id=='HWU': 4012 out=pjoin(self.me_dir,'Events', 4013 self.run_name,'%s%d'% (filename,i)) 4014 histos=[{'dirname':pjoin(rundir,file)}] 4015 self.combine_plots_HwU(histos,out) 4016 try: 4017 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 4018 stdout=os.open(os.devnull, os.O_RDWR),\ 4019 stderr=os.open(os.devnull, os.O_RDWR),\ 4020 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4021 except Exception: 4022 pass 4023 plotfile=pjoin(self.me_dir,'Events',self.run_name, 4024 '%s%d.HwU'% (filename,i)) 4025 plotfiles.append(plotfile) 4026 4027 ffiles = 'files' 4028 have = 'have' 4029 if len(plotfiles) == 1: 4030 ffiles = 'file' 4031 have = 'has' 4032 4033 message = ('The %s %s %s been generated, with histograms in the' + \ 4034 ' %s, obtained by showering the parton-level' + \ 4035 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 4036 hist_format, evt_file, shower) 4037 else: 4038 # many jobs for the shower have been run 4039 topfiles_set = set(topfiles) 4040 plotfiles = [] 4041 for j, top_tar in enumerate(top_tars): 4042 top_tar.extractall(path = rundir) 4043 for i, file in enumerate(topfiles_set): 4044 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 4045 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 4046 files.mv(pjoin(rundir, file), plotfile) 4047 plotfiles.append(plotfile) 4048 4049 # check if the user asked to combine the .top into a single file 4050 if self.shower_card['combine_td']: 4051 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 4052 4053 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 4054 norm = 1. 4055 else: 4056 norm = 1./float(self.shower_card['nsplit_jobs']) 4057 4058 plotfiles2 = [] 4059 for i, file in enumerate(topfiles_set): 4060 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 4061 for j in range(self.shower_card['nsplit_jobs'])] 4062 if out_id=='TOP': 4063 infile="%d\n%s\n%s\n" % \ 4064 (self.shower_card['nsplit_jobs'], 4065 '\n'.join(filelist), 4066 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 4067 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 4068 stdin=subprocess.PIPE, 4069 stdout=os.open(os.devnull, os.O_RDWR), 4070 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 4071 p.communicate(input = infile.encode()) 4072 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 4073 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 4074 elif out_id=='HWU': 4075 out=pjoin(self.me_dir,'Events', 4076 self.run_name,'%s%d'% (filename,i)) 4077 histos=[] 4078 norms=[] 4079 for plotfile in plotfiles: 4080 histos.append({'dirname':plotfile}) 4081 norms.append(norm) 4082 self.combine_plots_HwU(histos,out,normalisation=norms) 4083 try: 4084 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 4085 stdout=os.open(os.devnull, os.O_RDWR),\ 4086 stderr=os.open(os.devnull, os.O_RDWR),\ 4087 cwd=pjoin(self.me_dir, 'Events',self.run_name)) 4088 except Exception: 4089 pass 4090 4091 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 4092 tar = tarfile.open( 4093 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 4094 for f in filelist: 4095 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 4096 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 4097 4098 tar.close() 4099 4100 ffiles = 'files' 4101 have = 'have' 4102 if len(plotfiles2) == 1: 4103 ffiles = 'file' 4104 have = 'has' 4105 4106 message = ('The %s %s %s been generated, with histograms in the' + \ 4107 ' %s, obtained by showering the parton-level' + \ 4108 ' file %s.gz with %s.\n' + \ 4109 'The files from the different shower ' + \ 4110 'jobs (before combining them) can be found inside %s.') % \ 4111 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 4112 evt_file, shower, 4113 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 4114 4115 else: 4116 message = ('The following files have been generated:\n %s\n' + \ 4117 'They contain histograms in the' + \ 4118 ' %s, obtained by showering the parton-level' + \ 4119 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 4120 hist_format, evt_file, shower) 4121 4122 # Now arxiv the shower card used if RunMaterial is present 4123 run_dir_path = pjoin(rundir, self.run_name) 4124 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 4125 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 4126 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 4127 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 4128 %(shower, count))) 4129 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 4130 cwd=run_dir_path) 4131 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 4132 4133 if self.run_card['ickkw'] >0 : 4134 if self.run_card['ickkw'] != 3 or shower != 'PYTHIA8': 4135 logger.warning("Merged cross-section not retrieved by MadGraph. Please check the parton-shower log to get the correct cross-section after merging") 4136 else: 4137 pythia_log = misc.BackRead(pjoin(rundir, "mcatnlo_run.log") ) 4138 4139 pythiare = re.compile("\s*Les Houches User Process\(es\)\s+9999\s*\|\s*(?P<generated>\d+)\s+(?P<tried>\d+)\s+(?P<accepted>\d+)\s*\|\s*(?P<xsec>[\d\.DeE\-+]+)\s+(?P<xerr>[\d\.DeE\-+]+)\s*\|") 4140 # | Les Houches User Process(es) 9999 | 10000 10000 7115 | 1.120e-04 0.000e+00 | 4141 4142 for line in pythia_log: 4143 info = pythiare.search(line) 4144 if not info: 4145 continue 4146 try: 4147 # Pythia cross section in mb, we want pb 4148 sigma_m = float(info.group('xsec').replace('D','E')) *1e9 4149 sigma_err = float(info.group('xerr').replace('D','E')) *1e9 4150 Nacc = int(info.group('accepted')) 4151 #Ntry = int(info.group('accepted')) 4152 except: 4153 logger.warning("Merged cross-section not retrieved by MadGraph. Please check the parton-shower log to get the correct cross-section after merging") 4154 break 4155 4156 self.results.add_detail('cross_pythia', sigma_m) 4157 self.results.add_detail('nb_event_pythia', Nacc) 4158 self.results.add_detail('error_pythia', sigma_err) 4159 self.results.add_detail('shower_dir', os.path.basename(rundir)) 4160 logger.info("\nFxFx Cross-Section:\n"+\ 4161 "======================\n"+\ 4162 " %f pb.\n" 4163 " Number of events after merging: %s\n", sigma_m, Nacc, '$MG:BOLD') 4164 break 4165 else: 4166 logger.warning("Merged cross-section not retrieved by MadGraph. Please check the parton-shower log to get the correct cross-section after merging") 4167 4168 4169 4170 4171 4172 4173 # end of the run, gzip files and print out the message/warning 4174 for f in to_gzip: 4175 misc.gzip(f) 4176 if message: 4177 logger.info(message) 4178 if warning: 4179 logger.warning(warning) 4180 4181 self.update_status('Run complete', level='shower', update_results=True)
4182 4183 ############################################################################
4184 - def set_run_name(self, name, tag=None, level='parton', reload_card=False,**opts):
4185 """define the run name, the run_tag, the banner and the results.""" 4186 4187 # when are we force to change the tag new_run:previous run requiring changes 4188 upgrade_tag = {'parton': ['parton','delphes','shower','madanalysis5_hadron'], 4189 'shower': ['shower','delphes','madanalysis5_hadron'], 4190 'delphes':['delphes'], 4191 'madanalysis5_hadron':['madanalysis5_hadron'], 4192 'plot':[]} 4193 4194 if name == self.run_name: 4195 if reload_card: 4196 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4197 self.run_card = banner_mod.RunCardNLO(run_card) 4198 4199 #check if we need to change the tag 4200 if tag: 4201 self.run_card['run_tag'] = tag 4202 self.run_tag = tag 4203 self.results.add_run(self.run_name, self.run_card) 4204 else: 4205 for tag in upgrade_tag[level]: 4206 if getattr(self.results[self.run_name][-1], tag): 4207 tag = self.get_available_tag() 4208 self.run_card['run_tag'] = tag 4209 self.run_tag = tag 4210 self.results.add_run(self.run_name, self.run_card) 4211 break 4212 return # Nothing to do anymore 4213 4214 # save/clean previous run 4215 if self.run_name: 4216 self.store_result() 4217 # store new name 4218 self.run_name = name 4219 4220 # Read run_card 4221 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 4222 self.run_card = banner_mod.RunCardNLO(run_card) 4223 4224 new_tag = False 4225 # First call for this run -> set the banner 4226 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 4227 if 'mgruncard' in self.banner: 4228 self.run_card = self.banner.charge_card('run_card') 4229 if tag: 4230 self.run_card['run_tag'] = tag 4231 new_tag = True 4232 elif not self.run_name in self.results and level =='parton': 4233 pass # No results yet, so current tag is fine 4234 elif not self.run_name in self.results: 4235 #This is only for case when you want to trick the interface 4236 logger.warning('Trying to run data on unknown run.') 4237 self.results.add_run(name, self.run_card) 4238 self.results.update('add run %s' % name, 'all', makehtml=True) 4239 else: 4240 for tag in upgrade_tag[level]: 4241 4242 if getattr(self.results[self.run_name][-1], tag): 4243 # LEVEL is already define in the last tag -> need to switch tag 4244 tag = self.get_available_tag() 4245 self.run_card['run_tag'] = tag 4246 new_tag = True 4247 break 4248 if not new_tag: 4249 # We can add the results to the current run 4250 tag = self.results[self.run_name][-1]['tag'] 4251 self.run_card['run_tag'] = tag # ensure that run_tag is correct 4252 4253 4254 if name in self.results and not new_tag: 4255 self.results.def_current(self.run_name) 4256 else: 4257 self.results.add_run(self.run_name, self.run_card) 4258 4259 self.run_tag = self.run_card['run_tag'] 4260 4261 # Return the tag of the previous run having the required data for this 4262 # tag/run to working wel. 4263 if level == 'parton': 4264 return 4265 elif level == 'pythia': 4266 return self.results[self.run_name][0]['tag'] 4267 else: 4268 for i in range(-1,-len(self.results[self.run_name])-1,-1): 4269 tagRun = self.results[self.run_name][i] 4270 if tagRun.pythia: 4271 return tagRun['tag']
4272 4273
4274 - def store_result(self):
4275 """ tar the pythia results. This is done when we are quite sure that 4276 the pythia output will not be use anymore """ 4277 4278 if not self.run_name: 4279 return 4280 4281 self.results.save() 4282 4283 if not self.to_store: 4284 return 4285 4286 if 'event' in self.to_store: 4287 if os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')): 4288 if not os.path.exists(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')): 4289 self.update_status('gzipping output file: events.lhe', level='parton', error=True) 4290 misc.gzip(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4291 else: 4292 os.remove(pjoin(self.me_dir,'Events', self.run_name, 'events.lhe')) 4293 if os.path.exists(pjoin(self.me_dir,'Events','reweight.lhe')): 4294 os.remove(pjoin(self.me_dir,'Events', 'reweight.lhe')) 4295 4296 4297 tag = self.run_card['run_tag'] 4298 4299 self.to_store = []
4300 4301 4302 ############################################################################
4303 - def get_Gdir(self, Pdir=None):
4304 """get the list of Gdirectory if not yet saved.""" 4305 4306 if hasattr(self, "Gdirs"): 4307 if self.me_dir in self.Gdirs: 4308 if Pdir is None: 4309 return sum(self.Gdirs.values()) 4310 else: 4311 return self.Gdirs[Pdir] 4312 4313 Pdirs = self.get_Pdir() 4314 Gdirs = {self.me_dir:[]} 4315 for P in Pdirs: 4316 Gdirs[P] = [pjoin(P,G) for G in os.listdir(P) if G.startswith('G') and 4317 os.path.isdir(pjoin(P,G))] 4318 4319 self.Gdirs = Gdirs 4320 return self.getGdir(Pdir)
4321 4322
4323 - def get_init_dict(self, evt_file):
4324 """reads the info in the init block and returns them in a dictionary""" 4325 ev_file = open(evt_file) 4326 init = "" 4327 found = False 4328 while True: 4329 line = ev_file.readline() 4330 if "<init>" in line: 4331 found = True 4332 elif found and not line.startswith('#'): 4333 init += line 4334 if "</init>" in line or "<event>" in line: 4335 break 4336 ev_file.close() 4337 4338 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 4339 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 4340 # these are not included (so far) in the init_dict 4341 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 4342 4343 init_dict = {} 4344 init_dict['idbmup1'] = int(init.split()[0]) 4345 init_dict['idbmup2'] = int(init.split()[1]) 4346 init_dict['ebmup1'] = float(init.split()[2]) 4347 init_dict['ebmup2'] = float(init.split()[3]) 4348 init_dict['pdfgup1'] = int(init.split()[4]) 4349 init_dict['pdfgup2'] = int(init.split()[5]) 4350 init_dict['pdfsup1'] = int(init.split()[6]) 4351 init_dict['pdfsup2'] = int(init.split()[7]) 4352 init_dict['idwtup'] = int(init.split()[8]) 4353 init_dict['nprup'] = int(init.split()[9]) 4354 4355 return init_dict
4356 4357
4358 - def banner_to_mcatnlo(self, evt_file):
4359 """creates the mcatnlo input script using the values set in the header of the event_file. 4360 It also checks if the lhapdf library is used""" 4361 4362 shower = self.banner.get('run_card', 'parton_shower').upper() 4363 pdlabel = self.banner.get('run_card', 'pdlabel') 4364 itry = 0 4365 nevents = self.shower_card['nevents'] 4366 init_dict = self.get_init_dict(evt_file) 4367 4368 if nevents < 0 or \ 4369 nevents > self.banner.get_detail('run_card', 'nevents'): 4370 nevents = self.banner.get_detail('run_card', 'nevents') 4371 4372 nevents = nevents / self.shower_card['nsplit_jobs'] 4373 4374 mcmass_dict = {} 4375 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 4376 pdg = int(line.split()[0]) 4377 mass = float(line.split()[1]) 4378 mcmass_dict[pdg] = mass 4379 4380 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 4381 content += 'NEVENTS=%d\n' % nevents 4382 content += 'NEVENTS_TOT=%d\n' % (self.banner.get_detail('run_card', 'nevents') /\ 4383 self.shower_card['nsplit_jobs']) 4384 content += 'MCMODE=%s\n' % shower 4385 content += 'PDLABEL=%s\n' % pdlabel 4386 4387 try: 4388 aewm1 = self.banner.get_detail('param_card', 'sminputs', 1).value 4389 raise KeyError 4390 except KeyError: 4391 mod = self.get_model() 4392 if not hasattr(mod, 'parameter_dict'): 4393 from models import model_reader 4394 mod = model_reader.ModelReader(mod) 4395 mod.set_parameters_and_couplings(self.banner.param_card) 4396 aewm1 = 0 4397 for key in ['aEWM1', 'AEWM1', 'aEWm1', 'aewm1']: 4398 if key in mod['parameter_dict']: 4399 aewm1 = mod['parameter_dict'][key] 4400 break 4401 elif 'mdl_%s' % key in mod['parameter_dict']: 4402 aewm1 = mod['parameter_dict']['mod_%s' % key] 4403 break 4404 else: 4405 for key in ['aEW', 'AEW', 'aEw', 'aew']: 4406 if key in mod['parameter_dict']: 4407 aewm1 = 1./mod['parameter_dict'][key] 4408 break 4409 elif 'mdl_%s' % key in mod['parameter_dict']: 4410 aewm1 = 1./mod['parameter_dict']['mod_%s' % key] 4411 break 4412 4413 content += 'ALPHAEW=%s\n' % aewm1 4414 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 4415 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4416 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 4417 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 4418 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 4419 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 4420 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 4421 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 4422 try: 4423 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 4424 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 4425 except KeyError: 4426 content += 'HGGMASS=120.\n' 4427 content += 'HGGWIDTH=0.00575308848\n' 4428 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 4429 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 4430 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 4431 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 4432 content += 'DMASS=%s\n' % mcmass_dict[1] 4433 content += 'UMASS=%s\n' % mcmass_dict[2] 4434 content += 'SMASS=%s\n' % mcmass_dict[3] 4435 content += 'CMASS=%s\n' % mcmass_dict[4] 4436 content += 'BMASS=%s\n' % mcmass_dict[5] 4437 try: 4438 content += 'EMASS=%s\n' % mcmass_dict[11] 4439 content += 'MUMASS=%s\n' % mcmass_dict[13] 4440 content += 'TAUMASS=%s\n' % mcmass_dict[15] 4441 except KeyError: 4442 # this is for backward compatibility 4443 mcmass_lines = [l for l in \ 4444 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 4445 ).read().split('\n') if l] 4446 new_mcmass_dict = {} 4447 for l in mcmass_lines: 4448 key, val = l.split('=') 4449 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 4450 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 4451 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 4452 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 4453 4454 content += 'GMASS=%s\n' % mcmass_dict[21] 4455 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 4456 # check if need to link lhapdf 4457 if int(self.shower_card['pdfcode']) > 1 or \ 4458 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1) or \ 4459 shower=='HERWIGPP' : 4460 # Use LHAPDF (should be correctly installed, because 4461 # either events were already generated with them, or the 4462 # user explicitly gives an LHAPDF number in the 4463 # shower_card). 4464 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4465 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4466 stdout = subprocess.PIPE).stdout.read().decode(errors='ignore').strip() 4467 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4468 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4469 if self.shower_card['pdfcode']==0: 4470 lhaid_list = '' 4471 content += '' 4472 elif self.shower_card['pdfcode']==1: 4473 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4474 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4475 else: 4476 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 4477 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 4478 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4479 elif int(self.shower_card['pdfcode'])==1 or \ 4480 int(self.shower_card['pdfcode'])==-1 and True: 4481 # Try to use LHAPDF because user wants to use the same PDF 4482 # as was used for the event generation. However, for the 4483 # event generation, LHAPDF was not used, so non-trivial to 4484 # see if if LHAPDF is available with the corresponding PDF 4485 # set. If not found, give a warning and use build-in PDF 4486 # set instead. 4487 try: 4488 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 4489 stdout = subprocess.PIPE).stdout.read().decode(errors='ignore').strip() 4490 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 4491 content += 'LHAPDFPATH=%s\n' % lhapdfpath 4492 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 4493 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 4494 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 4495 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 4496 except Exception: 4497 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 4498 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 4499 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 4500 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 4501 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 4502 content += 'LHAPDFPATH=\n' 4503 content += 'PDFCODE=0\n' 4504 else: 4505 content += 'LHAPDFPATH=\n' 4506 content += 'PDFCODE=0\n' 4507 4508 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 4509 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 4510 # add the pythia8/hwpp path(s) 4511 if self.options['pythia8_path']: 4512 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 4513 if self.options['hwpp_path']: 4514 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 4515 if self.options['thepeg_path'] and self.options['thepeg_path'] != self.options['hwpp_path']: 4516 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 4517 if self.options['hepmc_path'] and self.options['hepmc_path'] != self.options['hwpp_path']: 4518 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 4519 4520 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 4521 output.write(content) 4522 output.close() 4523 return shower
4524 4525
4526 - def run_reweight(self, only):
4527 """runs the reweight_xsec_events executables on each sub-event file generated 4528 to compute on the fly scale and/or PDF uncertainities""" 4529 logger.info(' Doing reweight') 4530 4531 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 4532 # if only doing reweight, copy back the nevents_unweighted file 4533 if only: 4534 if os.path.exists(nev_unw + '.orig'): 4535 files.cp(nev_unw + '.orig', nev_unw) 4536 else: 4537 raise aMCatNLOError('Cannot find event file information') 4538 4539 #read the nevents_unweighted file to get the list of event files 4540 file = open(nev_unw) 4541 lines = file.read().split('\n') 4542 file.close() 4543 # make copy of the original nevent_unweighted file 4544 files.cp(nev_unw, nev_unw + '.orig') 4545 # loop over lines (all but the last one whith is empty) and check that the 4546 # number of events is not 0 4547 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 4548 evt_wghts = [float(line.split()[3]) for line in lines[:-1] if line.split()[1] != '0'] 4549 if self.run_card['event_norm'].lower()=='bias' and self.run_card['nevents'] != 0: 4550 evt_wghts[:]=[1./float(self.run_card['nevents']) for wgt in evt_wghts] 4551 #prepare the job_dict 4552 job_dict = {} 4553 exe = 'reweight_xsec_events.local' 4554 for i, evt_file in enumerate(evt_files): 4555 path, evt = os.path.split(evt_file) 4556 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 4557 pjoin(self.me_dir, 'SubProcesses', path)) 4558 job_dict[path] = [exe] 4559 4560 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 4561 4562 #check that the new event files are complete 4563 for evt_file in evt_files: 4564 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 4565 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 4566 stdout = subprocess.PIPE).stdout.read().decode(errors='ignore').strip() 4567 if last_line != "</LesHouchesEvents>": 4568 raise aMCatNLOError('An error occurred during reweight. Check the' + \ 4569 '\'reweight_xsec_events.output\' files inside the ' + \ 4570 '\'SubProcesses/P*/G*/ directories for details') 4571 4572 #update file name in nevents_unweighted 4573 newfile = open(nev_unw, 'w') 4574 for line in lines: 4575 if line: 4576 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 4577 newfile.close() 4578 return self.pdf_scale_from_reweighting(evt_files,evt_wghts)
4579
4580 - def pdf_scale_from_reweighting(self, evt_files,evt_wghts):
4581 """This function takes the files with the scale and pdf values 4582 written by the reweight_xsec_events.f code 4583 (P*/G*/pdf_scale_dependence.dat) and computes the overall 4584 scale and PDF uncertainty (the latter is computed using the 4585 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 4586 and returns it in percents. The expected format of the file 4587 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 4588 xsec_pdf0 xsec_pdf1 ....""" 4589 4590 scales=[] 4591 pdfs=[] 4592 for i,evt_file in enumerate(evt_files): 4593 path, evt=os.path.split(evt_file) 4594 with open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat'),'r') as f: 4595 data_line=f.readline() 4596 if "scale variations:" in data_line: 4597 for j,scale in enumerate(self.run_card['dynamical_scale_choice']): 4598 data_line = f.readline().split() 4599 scales_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4600 try: 4601 scales[j] = [a + b for a, b in zip(scales[j], scales_this)] 4602 except IndexError: 4603 scales+=[scales_this] 4604 data_line=f.readline() 4605 if "pdf variations:" in data_line: 4606 for j,pdf in enumerate(self.run_card['lhaid']): 4607 data_line = f.readline().split() 4608 pdfs_this = [float(val)*evt_wghts[i] for val in f.readline().replace("D", "E").split()] 4609 try: 4610 pdfs[j] = [a + b for a, b in zip(pdfs[j], pdfs_this)] 4611 except IndexError: 4612 pdfs+=[pdfs_this] 4613 4614 # get the scale uncertainty in percent 4615 scale_info=[] 4616 for j,scale in enumerate(scales): 4617 s_cen=scale[0] 4618 if s_cen != 0.0 and self.run_card['reweight_scale'][j]: 4619 # max and min of the full envelope 4620 s_max=(max(scale)/s_cen-1)*100 4621 s_min=(1-min(scale)/s_cen)*100 4622 # ren and fac scale dependence added in quadrature 4623 ren_var=[] 4624 fac_var=[] 4625 for i in range(len(self.run_card['rw_rscale'])): 4626 ren_var.append(scale[i]-s_cen) # central fac scale 4627 for i in range(len(self.run_card['rw_fscale'])): 4628 fac_var.append(scale[i*len(self.run_card['rw_rscale'])]-s_cen) # central ren scale 4629 s_max_q=((s_cen+math.sqrt(math.pow(max(ren_var),2)+math.pow(max(fac_var),2)))/s_cen-1)*100 4630 s_min_q=(1-(s_cen-math.sqrt(math.pow(min(ren_var),2)+math.pow(min(fac_var),2)))/s_cen)*100 4631 s_size=len(scale) 4632 else: 4633 s_max=0.0 4634 s_min=0.0 4635 s_max_q=0.0 4636 s_min_q=0.0 4637 s_size=len(scale) 4638 scale_info.append({'cen':s_cen, 'min':s_min, 'max':s_max, \ 4639 'min_q':s_min_q, 'max_q':s_max_q, 'size':s_size, \ 4640 'label':self.run_card['dynamical_scale_choice'][j], \ 4641 'unc':self.run_card['reweight_scale'][j]}) 4642 4643 # check if we can use LHAPDF to compute the PDF uncertainty 4644 if any(self.run_card['reweight_pdf']): 4645 lhapdf = misc.import_python_lhapdf(self.options['lhapdf']) 4646 if lhapdf: 4647 use_lhapdf = True 4648 else: 4649 logger.warning("Failed to access python version of LHAPDF: "\ 4650 "cannot compute PDF uncertainty from the "\ 4651 "weights in the events. The weights in the LHE " \ 4652 "event files will still cover all PDF set members, "\ 4653 "but there will be no PDF uncertainty printed in the run summary. \n "\ 4654 "If the python interface to LHAPDF is available on your system, try "\ 4655 "adding its location to the PYTHONPATH environment variable and the"\ 4656 "LHAPDF library location to LD_LIBRARY_PATH (linux) or DYLD_LIBRARY_PATH (mac os x).") 4657 use_lhapdf=False 4658 4659 # turn off lhapdf printing any messages 4660 if any(self.run_card['reweight_pdf']) and use_lhapdf: lhapdf.setVerbosity(0) 4661 4662 pdf_info=[] 4663 for j,pdfset in enumerate(pdfs): 4664 p_cen=pdfset[0] 4665 if p_cen != 0.0 and self.run_card['reweight_pdf'][j]: 4666 if use_lhapdf: 4667 pdfsetname=self.run_card['lhapdfsetname'][j] 4668 try: 4669 p=lhapdf.getPDFSet(pdfsetname) 4670 ep=p.uncertainty(pdfset,-1) 4671 p_cen=ep.central 4672 p_min=abs(ep.errminus/p_cen)*100 4673 p_max=abs(ep.errplus/p_cen)*100 4674 p_type=p.errorType 4675 p_size=p.size 4676 p_conf=p.errorConfLevel 4677 except: 4678 logger.warning("Could not access LHAPDF to compute uncertainties for %s" % pdfsetname) 4679 p_min=0.0 4680 p_max=0.0 4681 p_type='unknown' 4682 p_conf='unknown' 4683 p_size=len(pdfset) 4684 else: 4685 p_min=0.0 4686 p_max=0.0 4687 p_type='unknown' 4688 p_conf='unknown' 4689 p_size=len(pdfset) 4690 pdfsetname=self.run_card['lhaid'][j] 4691 else: 4692 p_min=0.0 4693 p_max=0.0 4694 p_type='none' 4695 p_conf='unknown' 4696 p_size=len(pdfset) 4697 pdfsetname=self.run_card['lhaid'][j] 4698 pdf_info.append({'cen':p_cen, 'min':p_min, 'max':p_max, \ 4699 'unc':p_type, 'name':pdfsetname, 'size':p_size, \ 4700 'label':self.run_card['lhaid'][j], 'conf':p_conf}) 4701 4702 scale_pdf_info=[scale_info,pdf_info] 4703 return scale_pdf_info
4704 4705
4706 - def wait_for_complete(self, run_type):
4707 """this function waits for jobs on cluster to complete their run.""" 4708 starttime = time.time() 4709 #logger.info(' Waiting for submitted jobs to complete') 4710 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 4711 starttime=starttime, level='parton', update_results=True) 4712 try: 4713 self.cluster.wait(self.me_dir, update_status) 4714 except: 4715 self.cluster.remove() 4716 raise
4717
4718 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
4719 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 4720 self.ijob = 0 4721 if run_type != 'shower': 4722 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 4723 for args in arg_list: 4724 for Pdir, jobs in job_dict.items(): 4725 for job in jobs: 4726 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 4727 if self.cluster_mode == 2: 4728 time.sleep(1) # security to allow all jobs to be launched 4729 else: 4730 self.njobs = len(arg_list) 4731 for args in arg_list: 4732 [(cwd, exe)] = list(job_dict.items()) 4733 self.run_exe(exe, args, run_type, cwd) 4734 4735 self.wait_for_complete(run_type)
4736 4737 4738
4739 - def check_event_files(self,jobs):
4740 """check the integrity of the event files after splitting, and resubmit 4741 those which are not nicely terminated""" 4742 jobs_to_resubmit = [] 4743 for job in jobs: 4744 last_line = '' 4745 try: 4746 last_line = subprocess.Popen( 4747 ['tail', '-n1', pjoin(job['dirname'], 'events.lhe')], \ 4748 stdout = subprocess.PIPE).stdout.read().decode(errors='ignore').strip() 4749 except IOError: 4750 pass 4751 if last_line != "</LesHouchesEvents>": 4752 jobs_to_resubmit.append(job) 4753 self.njobs = 0 4754 if jobs_to_resubmit: 4755 run_type = 'Resubmitting broken jobs' 4756 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 4757 for job in jobs_to_resubmit: 4758 logger.debug('Resubmitting ' + job['dirname'] + '\n') 4759 self.run_all_jobs(jobs_to_resubmit,2,fixed_order=False)
4760 4761
4762 - def find_jobs_to_split(self, pdir, job, arg):
4763 """looks into the nevents_unweighed_splitted file to check how many 4764 split jobs are needed for this (pdir, job). arg is F, B or V""" 4765 # find the number of the integration channel 4766 splittings = [] 4767 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 4768 pattern = re.compile('for i in (\d+) ; do') 4769 match = re.search(pattern, ajob) 4770 channel = match.groups()[0] 4771 # then open the nevents_unweighted_splitted file and look for the 4772 # number of splittings to be done 4773 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 4774 # This skips the channels with zero events, because they are 4775 # not of the form GFXX_YY, but simply GFXX 4776 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 4777 pjoin(pdir, 'G%s%s' % (arg,channel))) 4778 matches = re.findall(pattern, nevents_file) 4779 for m in matches: 4780 splittings.append(m) 4781 return splittings
4782 4783
4784 - def run_exe(self, exe, args, run_type, cwd=None):
4785 """this basic function launch locally/on cluster exe with args as argument. 4786 """ 4787 # first test that exe exists: 4788 execpath = None 4789 if cwd and os.path.exists(pjoin(cwd, exe)): 4790 execpath = pjoin(cwd, exe) 4791 elif not cwd and os.path.exists(exe): 4792 execpath = exe 4793 else: 4794 raise aMCatNLOError('Cannot find executable %s in %s' \ 4795 % (exe, os.getcwd())) 4796 # check that the executable has exec permissions 4797 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 4798 subprocess.call(['chmod', '+x', exe], cwd=cwd) 4799 # finally run it 4800 if self.cluster_mode == 0: 4801 #this is for the serial run 4802 misc.call(['./'+exe] + args, cwd=cwd) 4803 self.ijob += 1 4804 self.update_status((max([self.njobs - self.ijob - 1, 0]), 4805 min([1, self.njobs - self.ijob]), 4806 self.ijob, run_type), level='parton') 4807 4808 #this is for the cluster/multicore run 4809 elif 'reweight' in exe: 4810 # a reweight run 4811 # Find the correct PDF input file 4812 input_files, output_files = [], [] 4813 pdfinput = self.get_pdf_input_filename() 4814 if os.path.exists(pdfinput): 4815 input_files.append(pdfinput) 4816 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 4817 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 4818 input_files.append(args[0]) 4819 output_files.append('%s.rwgt' % os.path.basename(args[0])) 4820 output_files.append('reweight_xsec_events.output') 4821 output_files.append('scale_pdf_dependence.dat') 4822 4823 return self.cluster.submit2(exe, args, cwd=cwd, 4824 input_files=input_files, output_files=output_files, 4825 required_output=output_files) 4826 4827 elif 'ajob' in exe: 4828 # the 'standard' amcatnlo job 4829 # check if args is a list of string 4830 if type(args[0]) == str: 4831 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd,args) 4832 #submitting 4833 self.cluster.submit2(exe, args, cwd=cwd, 4834 input_files=input_files, output_files=output_files, 4835 required_output=required_output) 4836 4837 # # keep track of folders and arguments for splitted evt gen 4838 # subfolder=output_files[-1].split('/')[0] 4839 # if len(args) == 4 and '_' in subfolder: 4840 # self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 4841 4842 elif 'shower' in exe: 4843 # a shower job 4844 # args are [shower, output(HEP or TOP), run_name] 4845 # cwd is the shower rundir, where the executable are found 4846 input_files, output_files = [], [] 4847 shower = args[0] 4848 # the input files 4849 if shower == 'PYTHIA8': 4850 input_files.append(pjoin(cwd, 'Pythia8.exe')) 4851 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 4852 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 4853 input_files.append(pjoin(cwd, 'config.sh')) 4854 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 4855 else: 4856 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 4857 else: 4858 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 4859 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 4860 if shower == 'HERWIGPP': 4861 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++')): 4862 input_files.append(pjoin(cwd, 'Herwig++')) 4863 if os.path.exists(pjoin(self.options['hwpp_path'], 'bin', 'Herwig')): 4864 input_files.append(pjoin(cwd, 'Herwig')) 4865 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 4866 if len(args) == 3: 4867 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 4868 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 4869 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 4870 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 4871 else: 4872 raise aMCatNLOError('Event file not present in %s' % \ 4873 pjoin(self.me_dir, 'Events', self.run_name)) 4874 else: 4875 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 4876 # the output files 4877 if len(args) == 3: 4878 output_files.append('mcatnlo_run.log') 4879 else: 4880 output_files.append('mcatnlo_run_%s.log' % args[3]) 4881 if args[1] == 'HEP': 4882 if len(args) == 3: 4883 fname = 'events' 4884 else: 4885 fname = 'events_%s' % args[3] 4886 if shower in ['PYTHIA8', 'HERWIGPP']: 4887 output_files.append(fname + '.hepmc.gz') 4888 else: 4889 output_files.append(fname + '.hep.gz') 4890 elif args[1] == 'TOP' or args[1] == 'HWU': 4891 if len(args) == 3: 4892 fname = 'histfile' 4893 else: 4894 fname = 'histfile_%s' % args[3] 4895 output_files.append(fname + '.tar') 4896 else: 4897 raise aMCatNLOError('Not a valid output argument for shower job : %d' % args[1]) 4898 #submitting 4899 self.cluster.submit2(exe, args, cwd=cwd, 4900 input_files=input_files, output_files=output_files) 4901 4902 else: 4903 return self.cluster.submit(exe, args, cwd=cwd)
4904
4905 - def getIO_ajob(self,exe,cwd, args):
4906 # use local disk if possible => need to stands what are the 4907 # input/output files 4908 4909 output_files = [] 4910 required_output = [] 4911 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 4912 pjoin(cwd, 'symfact.dat'), 4913 pjoin(cwd, 'iproc.dat'), 4914 pjoin(cwd, 'initial_states_map.dat'), 4915 pjoin(cwd, 'configs_and_props_info.dat'), 4916 pjoin(cwd, 'leshouche_info.dat'), 4917 pjoin(cwd, 'FKS_params.dat')] 4918 4919 # For GoSam interface, we must copy the SLHA card as well 4920 if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): 4921 input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) 4922 4923 if os.path.exists(pjoin(cwd,'nevents.tar')): 4924 input_files.append(pjoin(cwd,'nevents.tar')) 4925 4926 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 4927 input_files.append(pjoin(cwd, 'OLE_order.olc')) 4928 4929 # File for the loop (might not be present if MadLoop is not used) 4930 if os.path.exists(pjoin(cwd,'MadLoop5_resources.tar.gz')) and \ 4931 cluster.need_transfer(self.options): 4932 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4933 elif os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 4934 cluster.need_transfer(self.options): 4935 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 4936 dereference=True) 4937 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 4938 tf.close() 4939 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 4940 4941 if args[1] == 'born' or args[1] == 'all': 4942 # MADEVENT MINT FO MODE 4943 input_files.append(pjoin(cwd, 'madevent_mintFO')) 4944 if args[2] == '0': 4945 current = '%s_G%s' % (args[1],args[0]) 4946 else: 4947 current = '%s_G%s_%s' % (args[1],args[0],args[2]) 4948 if os.path.exists(pjoin(cwd,current)): 4949 input_files.append(pjoin(cwd, current)) 4950 output_files.append(current) 4951 4952 required_output.append('%s/results.dat' % current) 4953 required_output.append('%s/res_%s.dat' % (current,args[3])) 4954 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4955 required_output.append('%s/mint_grids' % current) 4956 required_output.append('%s/grid.MC_integer' % current) 4957 if args[3] != '0': 4958 required_output.append('%s/scale_pdf_dependence.dat' % current) 4959 4960 elif args[1] == 'F' or args[1] == 'B': 4961 # MINTMC MODE 4962 input_files.append(pjoin(cwd, 'madevent_mintMC')) 4963 4964 if args[2] == '0': 4965 current = 'G%s%s' % (args[1],args[0]) 4966 else: 4967 current = 'G%s%s_%s' % (args[1],args[0],args[2]) 4968 if os.path.exists(pjoin(cwd,current)): 4969 input_files.append(pjoin(cwd, current)) 4970 output_files.append(current) 4971 if args[2] > '0': 4972 # this is for the split event generation 4973 output_files.append('G%s%s_%s' % (args[1], args[0], args[2])) 4974 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1],args[0],args[2],args[3])) 4975 4976 else: 4977 required_output.append('%s/log_MINT%s.txt' % (current,args[3])) 4978 if args[3] in ['0','1']: 4979 required_output.append('%s/results.dat' % current) 4980 if args[3] == '1': 4981 output_files.append('%s/results.dat' % current) 4982 4983 else: 4984 raise aMCatNLOError('not valid arguments: %s' %(', '.join(args))) 4985 4986 #Find the correct PDF input file 4987 pdfinput = self.get_pdf_input_filename() 4988 if os.path.exists(pdfinput): 4989 input_files.append(pdfinput) 4990 return input_files, output_files, required_output, args
4991 4992
4993 - def compile(self, mode, options):
4994 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 4995 specified in mode""" 4996 4997 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 4998 4999 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 5000 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 5001 5002 self.get_characteristics(pjoin(self.me_dir, 5003 'SubProcesses', 'proc_characteristics')) 5004 5005 #define a bunch of log files 5006 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 5007 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 5008 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 5009 test_log = pjoin(self.me_dir, 'test.log') 5010 5011 # environmental variables to be included in make_opts 5012 self.make_opts_var = {} 5013 if self.proc_characteristics['has_loops'] and \ 5014 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5015 self.make_opts_var['madloop'] = 'true' 5016 5017 self.update_status('Compiling the code', level=None, update_results=True) 5018 5019 libdir = pjoin(self.me_dir, 'lib') 5020 sourcedir = pjoin(self.me_dir, 'Source') 5021 5022 #clean files 5023 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 5024 #define which executable/tests to compile 5025 if '+' in mode: 5026 mode = mode.split('+')[0] 5027 if mode in ['NLO', 'LO']: 5028 exe = 'madevent_mintFO' 5029 tests = ['test_ME'] 5030 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 5031 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 5032 exe = 'madevent_mintMC' 5033 tests = ['test_ME', 'test_MC'] 5034 # write an analyse_opts with a dummy analysis so that compilation goes through 5035 with open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w') as fsock: 5036 fsock.write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 5037 5038 #directory where to compile exe 5039 p_dirs = [d for d in \ 5040 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 5041 # create param_card.inc and run_card.inc 5042 self.do_treatcards('', amcatnlo=True, mode=mode) 5043 # if --nocompile option is specified, check here that all exes exists. 5044 # If they exists, return 5045 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 5046 for p_dir in p_dirs]) and options['nocompile']: 5047 return 5048 5049 # rm links to lhapdflib/ PDFsets if exist 5050 if os.path.exists(pjoin(libdir, 'PDFsets')): 5051 files.rm(pjoin(libdir, 'PDFsets')) 5052 5053 # read the run_card to find if lhapdf is used or not 5054 if self.run_card['pdlabel'] == 'lhapdf' and \ 5055 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 5056 self.banner.get_detail('run_card', 'lpp2') != 0): 5057 5058 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 5059 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 5060 lhaid_list = self.run_card['lhaid'] 5061 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 5062 5063 else: 5064 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 5065 logger.info('Using built-in libraries for PDFs') 5066 5067 self.make_opts_var['lhapdf'] = "" 5068 5069 # read the run_card to find if applgrid is used or not 5070 if self.run_card['iappl'] != 0: 5071 self.make_opts_var['applgrid'] = 'True' 5072 # check versions of applgrid and amcfast 5073 for code in ['applgrid','amcfast']: 5074 try: 5075 p = subprocess.Popen([self.options[code], '--version'], \ 5076 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 5077 except OSError: 5078 raise aMCatNLOError(('No valid %s installation found. \n' + \ 5079 'Please set the path to %s-config by using \n' + \ 5080 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 5081 else: 5082 output, _ = p.communicate() 5083 output.decode(errors='ignore') 5084 if code == 'applgrid' and output < '1.4.63': 5085 raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 5086 +' You are using %s',output) 5087 if code == 'amcfast' and output < '1.1.1': 5088 raise aMCatNLOError('Version of aMCfast is too old. Use 1.1.1 or later.'\ 5089 +' You are using %s',output) 5090 5091 # set-up the Source/make_opts with the correct applgrid-config file 5092 appllibs=" APPLLIBS=$(shell %s --ldflags) $(shell %s --ldcflags) \n" \ 5093 % (self.options['amcfast'],self.options['applgrid']) 5094 text=open(pjoin(self.me_dir,'Source','make_opts'),'r').readlines() 5095 text_out=[] 5096 for line in text: 5097 if line.strip().startswith('APPLLIBS=$'): 5098 line=appllibs 5099 text_out.append(line) 5100 with open(pjoin(self.me_dir,'Source','make_opts'),'w') as fsock: 5101 fsock.writelines(text_out) 5102 else: 5103 self.make_opts_var['applgrid'] = "" 5104 5105 if 'fastjet' in list(self.options.keys()) and self.options['fastjet']: 5106 self.make_opts_var['fastjet_config'] = self.options['fastjet'] 5107 5108 # add the make_opts_var to make_opts 5109 self.update_make_opts() 5110 5111 # make Source 5112 self.update_status('Compiling source...', level=None) 5113 misc.compile(['clean4pdf'], cwd = sourcedir) 5114 misc.compile(cwd = sourcedir) 5115 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 5116 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 5117 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 5118 and os.path.exists(pjoin(libdir, 'libpdf.a')): 5119 logger.info(' ...done, continuing with P* directories') 5120 else: 5121 raise aMCatNLOError('Compilation failed') 5122 5123 # make StdHep (only necessary with MG option output_dependencies='internal') 5124 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 5125 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 5126 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 5127 if os.path.exists(pjoin(sourcedir,'StdHEP')): 5128 logger.info('Compiling StdHEP (can take a couple of minutes) ...') 5129 try: 5130 misc.compile(['StdHEP'], cwd = sourcedir) 5131 except Exception as error: 5132 logger.debug(str(error)) 5133 logger.warning("StdHep failed to compiled. This forbids to run NLO+PS with PY6 and Herwig6") 5134 logger.info("details on the compilation error are available if the code is run with --debug flag") 5135 else: 5136 logger.info(' ...done.') 5137 else: 5138 logger.warning('Could not compile StdHEP because its'+\ 5139 ' source directory could not be found in the SOURCE folder.\n'+\ 5140 " Check the MG5_aMC option 'output_dependencies'.\n"+\ 5141 " This will prevent the use of HERWIG6/Pythia6 shower.") 5142 5143 5144 # make CutTools (only necessary with MG option output_dependencies='internal') 5145 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5146 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5147 if os.path.exists(pjoin(sourcedir,'CutTools')): 5148 logger.info('Compiling CutTools (can take a couple of minutes) ...') 5149 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5150 logger.info(' ...done.') 5151 else: 5152 raise aMCatNLOError('Could not compile CutTools because its'+\ 5153 ' source directory could not be found in the SOURCE folder.\n'+\ 5154 " Check the MG5_aMC option 'output_dependencies.'") 5155 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 5156 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 5157 raise aMCatNLOError('CutTools compilation failed.') 5158 5159 # Verify compatibility between current compiler and the one which was 5160 # used when last compiling CutTools (if specified). 5161 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5162 libdir, 'libcts.a')))),'compiler_version.log') 5163 if os.path.exists(compiler_log_path): 5164 compiler_version_used = open(compiler_log_path,'r').read() 5165 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5166 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5167 if os.path.exists(pjoin(sourcedir,'CutTools')): 5168 logger.info('CutTools was compiled with a different fortran'+\ 5169 ' compiler. Re-compiling it now...') 5170 misc.compile(['cleanCT'], cwd = sourcedir) 5171 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1) 5172 logger.info(' ...done.') 5173 else: 5174 raise aMCatNLOError("CutTools installation in %s"\ 5175 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 5176 " seems to have been compiled with a different compiler than"+\ 5177 " the one specified in MG5_aMC. Please recompile CutTools.") 5178 5179 # make IREGI (only necessary with MG option output_dependencies='internal') 5180 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 5181 and os.path.exists(pjoin(sourcedir,'IREGI')): 5182 logger.info('Compiling IREGI (can take a couple of minutes) ...') 5183 misc.compile(['IREGI'], cwd = sourcedir) 5184 logger.info(' ...done.') 5185 5186 if os.path.exists(pjoin(libdir, 'libiregi.a')): 5187 # Verify compatibility between current compiler and the one which was 5188 # used when last compiling IREGI (if specified). 5189 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 5190 libdir, 'libiregi.a')))),'compiler_version.log') 5191 if os.path.exists(compiler_log_path): 5192 compiler_version_used = open(compiler_log_path,'r').read() 5193 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 5194 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 5195 if os.path.exists(pjoin(sourcedir,'IREGI')): 5196 logger.info('IREGI was compiled with a different fortran'+\ 5197 ' compiler. Re-compiling it now...') 5198 misc.compile(['cleanIR'], cwd = sourcedir) 5199 misc.compile(['IREGI'], cwd = sourcedir) 5200 logger.info(' ...done.') 5201 else: 5202 raise aMCatNLOError("IREGI installation in %s"\ 5203 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 5204 " seems to have been compiled with a different compiler than"+\ 5205 " the one specified in MG5_aMC. Please recompile IREGI.") 5206 5207 # check if MadLoop virtuals have been generated 5208 if self.proc_characteristics['has_loops'] and \ 5209 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 5210 if mode in ['NLO', 'aMC@NLO', 'noshower']: 5211 tests.append('check_poles') 5212 5213 # make and run tests (if asked for), gensym and make madevent in each dir 5214 self.update_status('Compiling directories...', level=None) 5215 5216 for test in tests: 5217 self.write_test_input(test) 5218 5219 try: 5220 import multiprocessing 5221 if not self.nb_core: 5222 try: 5223 self.nb_core = int(self.options['nb_core']) 5224 except TypeError: 5225 self.nb_core = multiprocessing.cpu_count() 5226 except ImportError: 5227 self.nb_core = 1 5228 5229 compile_options = copy.copy(self.options) 5230 compile_options['nb_core'] = self.nb_core 5231 compile_cluster = cluster.MultiCore(**compile_options) 5232 logger.info('Compiling on %d cores' % self.nb_core) 5233 5234 update_status = lambda i, r, f: self.donothing(i,r,f) 5235 for p_dir in p_dirs: 5236 compile_cluster.submit(prog = compile_dir, 5237 argument = [self.me_dir, p_dir, mode, options, 5238 tests, exe, self.options['run_mode']]) 5239 try: 5240 compile_cluster.wait(self.me_dir, update_status) 5241 except Exception as error: 5242 logger.warning("Fail to compile the Subprocesses") 5243 if __debug__: 5244 raise 5245 compile_cluster.remove() 5246 self.do_quit('') 5247 5248 logger.info('Checking test output:') 5249 for p_dir in p_dirs: 5250 logger.info(p_dir) 5251 for test in tests: 5252 logger.info(' Result for %s:' % test) 5253 5254 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 5255 #check that none of the tests failed 5256 self.check_tests(test, this_dir)
5257 5258
5259 - def donothing(*args):
5260 pass
5261 5262
5263 - def check_tests(self, test, dir):
5264 """just call the correct parser for the test log. 5265 Skip check_poles for LOonly folders""" 5266 if test in ['test_ME', 'test_MC']: 5267 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 5268 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 5269 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
5270 5271
5272 - def parse_test_mx_log(self, log):
5273 """read and parse the test_ME/MC.log file""" 5274 content = open(log).read() 5275 if 'FAILED' in content: 5276 logger.info('Output of the failing test:\n'+content[:-1],'$MG:BOLD') 5277 raise aMCatNLOError('Some tests failed, run cannot continue.\n' + \ 5278 'Please check that widths of final state particles (e.g. top) have been' + \ 5279 ' set to 0 in the param_card.dat.') 5280 else: 5281 lines = [l for l in content.split('\n') if 'PASSED' in l] 5282 logger.info(' Passed.') 5283 logger.debug('\n'+'\n'.join(lines))
5284 5285
5286 - def parse_check_poles_log(self, log):
5287 """reads and parse the check_poles.log file""" 5288 content = open(log).read() 5289 npass = 0 5290 nfail = 0 5291 for line in content.split('\n'): 5292 if 'PASSED' in line: 5293 npass +=1 5294 tolerance = float(line.split()[1]) 5295 if 'FAILED' in line: 5296 nfail +=1 5297 tolerance = float(line.split()[1]) 5298 5299 if nfail + npass == 0: 5300 logger.warning('0 points have been tried') 5301 return 5302 5303 if float(nfail)/float(nfail+npass) > 0.1: 5304 raise aMCatNLOError('Poles do not cancel, run cannot continue') 5305 else: 5306 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 5307 %(npass, nfail+npass, tolerance))
5308 5309
5310 - def write_test_input(self, test):
5311 """write the input files to run test_ME/MC or check_poles""" 5312 if test in ['test_ME', 'test_MC']: 5313 content = "-2 -2\n" #generate randomly energy/angle 5314 content+= "100 100\n" #run 100 points for soft and collinear tests 5315 content+= "0\n" #all FKS configs 5316 content+= '\n'.join(["-1"] * 50) #random diagram (=first diagram) 5317 elif test == 'check_poles': 5318 content = '20 \n -1\n' 5319 5320 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 5321 if test == 'test_MC': 5322 shower = self.run_card['parton_shower'] 5323 header = "1 \n %s\n 1 -0.1\n-1 -0.1\n" % shower 5324 file.write(header + content) 5325 elif test == 'test_ME': 5326 header = "2 \n" 5327 file.write(header + content) 5328 else: 5329 file.write(content) 5330 file.close()
5331 5332 5333 action_switcher = AskRunNLO 5334 ############################################################################
5335 - def ask_run_configuration(self, mode, options, switch={}):
5336 """Ask the question when launching generate_events/multi_run""" 5337 5338 if 'parton' not in options: 5339 options['parton'] = False 5340 if 'reweightonly' not in options: 5341 options['reweightonly'] = False 5342 5343 if mode == 'auto': 5344 mode = None 5345 if not mode and (options['parton'] or options['reweightonly']): 5346 mode = 'noshower' 5347 5348 passing_cmd = [] 5349 for key,value in switch.keys(): 5350 passing_cmd.append('%s=%s' % (key,value)) 5351 5352 if 'do_reweight' in options and options['do_reweight']: 5353 passing_cmd.append('reweight=ON') 5354 if 'do_madspin' in options and options['do_madspin']: 5355 passing_cmd.append('madspin=ON') 5356 5357 force = self.force 5358 if mode == 'onlyshower': 5359 passing_cmd.append('onlyshower') 5360 force = True 5361 elif mode: 5362 passing_cmd.append(mode) 5363 5364 switch, cmd_switch = self.ask('', '0', [], ask_class = self.action_switcher, 5365 mode=mode, force=(force or mode), 5366 first_cmd=passing_cmd, 5367 return_instance=True) 5368 5369 if 'mode' in switch: 5370 mode = switch['mode'] 5371 5372 #assign the mode depending of the switch 5373 if not mode or mode == 'auto': 5374 if switch['order'] == 'LO': 5375 if switch['runshower']: 5376 mode = 'aMC@LO' 5377 elif switch['fixed_order'] == 'ON': 5378 mode = 'LO' 5379 else: 5380 mode = 'noshowerLO' 5381 elif switch['order'] == 'NLO': 5382 if switch['runshower']: 5383 mode = 'aMC@NLO' 5384 elif switch['fixed_order'] == 'ON': 5385 mode = 'NLO' 5386 else: 5387 mode = 'noshower' 5388 logger.info('will run in mode: %s' % mode) 5389 5390 if mode == 'noshower': 5391 if switch['shower'] == 'OFF': 5392 logger.warning("""You have chosen not to run a parton shower. 5393 NLO events without showering are NOT physical. 5394 Please, shower the LesHouches events before using them for physics analyses. 5395 You have to choose NOW which parton-shower you WILL use and specify it in the run_card.""") 5396 else: 5397 logger.info("""Your Parton-shower choice is not available for running. 5398 The events will be generated for the associated Parton-Shower. 5399 Remember that NLO events without showering are NOT physical.""", '$MG:BOLD') 5400 5401 5402 # specify the cards which are needed for this run. 5403 cards = ['param_card.dat', 'run_card.dat'] 5404 ignore = [] 5405 if mode in ['LO', 'NLO']: 5406 options['parton'] = True 5407 ignore = ['shower_card.dat', 'madspin_card.dat'] 5408 cards.append('FO_analyse_card.dat') 5409 else: 5410 if switch['madspin'] != 'OFF': 5411 cards.append('madspin_card.dat') 5412 if switch['reweight'] != 'OFF': 5413 cards.append('reweight_card.dat') 5414 if switch['madanalysis'] in ['HADRON', 'ON']: 5415 cards.append('madanalysis5_hadron_card.dat') 5416 if 'aMC@' in mode: 5417 cards.append('shower_card.dat') 5418 if mode == 'onlyshower': 5419 cards = ['shower_card.dat'] 5420 if options['reweightonly']: 5421 cards = ['run_card.dat'] 5422 5423 self.keep_cards(cards, ignore) 5424 5425 if mode =='onlyshower': 5426 cards = ['shower_card.dat'] 5427 5428 5429 # automatically switch to keep_wgt option 5430 first_cmd = cmd_switch.get_cardcmd() 5431 5432 if not options['force'] and not self.force: 5433 self.ask_edit_cards(cards, plot=False, first_cmd=first_cmd) 5434 5435 self.banner = banner_mod.Banner() 5436 5437 # store the cards in the banner 5438 for card in cards: 5439 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 5440 # and the run settings 5441 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 5442 self.banner.add_text('run_settings', run_settings) 5443 5444 if not mode =='onlyshower': 5445 self.run_card = self.banner.charge_card('run_card') 5446 self.run_tag = self.run_card['run_tag'] 5447 #this is if the user did not provide a name for the current run 5448 if not hasattr(self, 'run_name') or not self.run_name: 5449 self.run_name = self.find_available_run_name(self.me_dir) 5450 #add a tag in the run_name for distinguish run_type 5451 if self.run_name.startswith('run_'): 5452 if mode in ['LO','aMC@LO','noshowerLO']: 5453 self.run_name += '_LO' 5454 self.set_run_name(self.run_name, self.run_tag, 'parton') 5455 if self.run_card['ickkw'] == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 5456 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 5457 elif self.run_card['ickkw'] == 3 and mode in ['aMC@NLO', 'noshower'] and self.run_card['parton_shower'].upper() != 'PYTHIA8': 5458 logger.warning("""You are running with FxFx merging enabled. To be able to merge 5459 samples of various multiplicities without double counting, you 5460 have to remove some events after showering 'by hand'. Please 5461 read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 5462 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 5463 raise self.InvalidCmd("""FxFx merging does not work with Q-squared ordered showers.""") 5464 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8' and self.run_card['parton_shower'].upper() != 'HERWIGPP': 5465 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 5466 "Type \'n\' to stop or \'y\' to continue" 5467 answers = ['n','y'] 5468 answer = self.ask(question, 'n', answers) 5469 if answer == 'n': 5470 error = '''Stop opertation''' 5471 self.ask_run_configuration(mode, options) 5472 # raise aMCatNLOError(error) 5473 elif self.run_card['ickkw'] == -1 and mode in ['aMC@NLO', 'noshower']: 5474 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 5475 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 5476 if 'aMC@' in mode or mode == 'onlyshower': 5477 self.shower_card = self.banner.charge_card('shower_card') 5478 5479 elif mode in ['LO', 'NLO']: 5480 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 5481 self.analyse_card = self.banner.charge_card('FO_analyse_card') 5482 5483 return mode
5484
5485 5486 #=============================================================================== 5487 # aMCatNLOCmd 5488 #=============================================================================== 5489 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
5490 """The command line processor of MadGraph"""
5491 5492 _compile_usage = "compile [MODE] [options]\n" + \ 5493 "-- compiles aMC@NLO \n" + \ 5494 " MODE can be either FO, for fixed-order computations, \n" + \ 5495 " or MC for matching with parton-shower monte-carlos. \n" + \ 5496 " (if omitted, it is set to MC)\n" 5497 _compile_parser = misc.OptionParser(usage=_compile_usage) 5498 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 5499 help="Use the card present in the directory for the launch, without editing them") 5500 5501 _launch_usage = "launch [MODE] [options]\n" + \ 5502 "-- execute aMC@NLO \n" + \ 5503 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5504 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5505 " computation of the total cross section and the filling of parton-level histograms \n" + \ 5506 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 5507 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5508 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5509 " in the run_card.dat\n" 5510 5511 _launch_parser = misc.OptionParser(usage=_launch_usage) 5512 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 5513 help="Use the card present in the directory for the launch, without editing them") 5514 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 5515 help="Submit the jobs on the cluster") 5516 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 5517 help="Submit the jobs on multicore mode") 5518 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5519 help="Skip compilation. Ignored if no executable is found") 5520 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5521 help="Skip integration and event generation, just run reweight on the" + \ 5522 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5523 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 5524 help="Stop the run after the parton level file generation (you need " + \ 5525 "to shower the file in order to get physical results)") 5526 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5527 help="Skip grid set up, just generate events starting from " + \ 5528 "the last available results") 5529 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 5530 help="Provide a name to the run") 5531 _launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5532 help="For use with APPLgrid only: start from existing grids") 5533 _launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true', 5534 help="Run the reweight module (reweighting by different model parameters)") 5535 _launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true', 5536 help="Run the madspin package") 5537 5538 5539 5540 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 5541 "-- execute aMC@NLO \n" + \ 5542 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 5543 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 5544 " computation of the total cross section and the filling of parton-level histograms \n" + \ 5545 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 5546 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 5547 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 5548 " in the run_card.dat\n" 5549 5550 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 5551 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 5552 help="Use the card present in the directory for the generate_events, without editing them") 5553 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 5554 help="Submit the jobs on the cluster") 5555 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 5556 help="Submit the jobs on multicore mode") 5557 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5558 help="Skip compilation. Ignored if no executable is found") 5559 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 5560 help="Skip integration and event generation, just run reweight on the" + \ 5561 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 5562 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 5563 help="Stop the run after the parton level file generation (you need " + \ 5564 "to shower the file in order to get physical results)") 5565 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5566 help="Skip grid set up, just generate events starting from " + \ 5567 "the last available results") 5568 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 5569 help="Provide a name to the run") 5570 5571 5572 5573 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 5574 "-- calculate cross section up to ORDER.\n" + \ 5575 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 5576 5577 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 5578 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 5579 help="Use the card present in the directory for the launch, without editing them") 5580 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 5581 help="Submit the jobs on the cluster") 5582 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 5583 help="Submit the jobs on multicore mode") 5584 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 5585 help="Skip compilation. Ignored if no executable is found") 5586 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 5587 help="Provide a name to the run") 5588 _calculate_xsect_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 5589 help="For use with APPLgrid only: start from existing grids") 5590 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 5591 help="Skip grid set up, just generate events starting from " + \ 5592 "the last available results") 5593 5594 _shower_usage = 'shower run_name [options]\n' + \ 5595 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 5596 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 5597 ' are directly read from the header of the event file\n' 5598 _shower_parser = misc.OptionParser(usage=_shower_usage) 5599 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 5600 help="Use the shower_card present in the directory for the launch, without editing") 5601 5602 if '__main__' == __name__: 5603 # Launch the interface without any check if one code is already running. 5604 # This can ONLY run a single command !! 5605 import sys 5606 if sys.version_info[1] < 7: 5607 sys.exit('MadGraph/MadEvent 5 works only with python 2.7 or python3.7 and later.\n'+\ 5608 'Please upgrade your version of python or specify a compatible version') 5609 5610 import os 5611 import optparse 5612 # Get the directory of the script real path (bin) 5613 # and add it to the current PYTHONPATH 5614 root_path = os.path.dirname(os.path.dirname(os.path.realpath( __file__ ))) 5615 sys.path.insert(0, root_path)
5616 5617 - class MyOptParser(optparse.OptionParser):
5618 - class InvalidOption(Exception): pass
5619 - def error(self, msg=''):
5620 raise MyOptParser.InvalidOption(msg)
5621 # Write out nice usage message if called with -h or --help 5622 usage = "usage: %prog [options] [FILE] " 5623 parser = MyOptParser(usage=usage) 5624 parser.add_option("-l", "--logging", default='INFO', 5625 help="logging level (DEBUG|INFO|WARNING|ERROR|CRITICAL) [%default]") 5626 parser.add_option("","--web", action="store_true", default=False, dest='web', \ 5627 help='force toce to be in secure mode') 5628 parser.add_option("","--debug", action="store_true", default=False, dest='debug', \ 5629 help='force to launch debug mode') 5630 parser_error = '' 5631 done = False 5632 5633 for i in range(len(sys.argv)-1): 5634 try: 5635 (options, args) = parser.parse_args(sys.argv[1:len(sys.argv)-i]) 5636 done = True 5637 except MyOptParser.InvalidOption as error: 5638 pass 5639 else: 5640 args += sys.argv[len(sys.argv)-i:] 5641 if not done: 5642 # raise correct error: 5643 try: 5644 (options, args) = parser.parse_args() 5645 except MyOptParser.InvalidOption as error: 5646 print(error) 5647 sys.exit(2) 5648 5649 if len(args) == 0: 5650 args = '' 5651 5652 import subprocess 5653 import logging 5654 import logging.config 5655 # Set logging level according to the logging level given by options 5656 #logging.basicConfig(level=vars(logging)[options.logging]) 5657 import internal.coloring_logging 5658 try: 5659 if __debug__ and options.logging == 'INFO': 5660 options.logging = 'DEBUG' 5661 if options.logging.isdigit(): 5662 level = int(options.logging) 5663 else: 5664 level = eval('logging.' + options.logging) 5665 print(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5666 logging.config.fileConfig(os.path.join(root_path, 'internal', 'me5_logging.conf')) 5667 logging.root.setLevel(level) 5668 logging.getLogger('madgraph').setLevel(level) 5669 except: 5670 raise 5671 pass 5672 5673 # Call the cmd interface main loop 5674 try: 5675 if args: 5676 # a single command is provided 5677 if '--web' in args: 5678 i = args.index('--web') 5679 args.pop(i) 5680 cmd_line = aMCatNLOCmd(me_dir=os.path.dirname(root_path),force_run=True) 5681 else: 5682 cmd_line = aMCatNLOCmdShell(me_dir=os.path.dirname(root_path),force_run=True) 5683 5684 if not hasattr(cmd_line, 'do_%s' % args[0]): 5685 if parser_error: 5686 print(parser_error) 5687 print('and %s can not be interpreted as a valid command.' % args[0]) 5688 else: 5689 print('ERROR: %s not a valid command. Please retry' % args[0]) 5690 else: 5691 cmd_line.use_rawinput = False 5692 cmd_line.run_cmd(' '.join(args)) 5693 cmd_line.run_cmd('quit') 5694 5695 except KeyboardInterrupt: 5696 print('quit on KeyboardInterrupt') 5697 pass 5698