Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  import atexit 
  21  import glob 
  22  import logging 
  23  import math 
  24  import optparse 
  25  import os 
  26  import pydoc 
  27  import random 
  28  import re 
  29  import shutil 
  30  import subprocess 
  31  import sys 
  32  import traceback 
  33  import time 
  34  import signal 
  35  import tarfile 
  36  import copy 
  37  import datetime 
  38  import tarfile 
  39   
  40  try: 
  41      import readline 
  42      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  43  except: 
  44      GNU_SPLITTING = True 
  45   
  46  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  47  root_path = os.path.split(root_path)[0] 
  48  sys.path.insert(0, os.path.join(root_path,'bin')) 
  49   
  50  # usefull shortcut 
  51  pjoin = os.path.join 
  52  # Special logger for the Cmd Interface 
  53  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  54  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  55    
  56  try: 
  57      import madgraph 
  58  except ImportError:  
  59      aMCatNLO = True  
  60      import internal.extended_cmd as cmd 
  61      import internal.common_run_interface as common_run 
  62      import internal.banner as banner_mod 
  63      import internal.misc as misc     
  64      from internal import InvalidCmd, MadGraph5Error 
  65      import internal.files as files 
  66      import internal.cluster as cluster 
  67      import internal.save_load_object as save_load_object 
  68      import internal.gen_crossxhtml as gen_crossxhtml 
  69      import internal.sum_html as sum_html 
  70      import internal.shower_card as shower_card 
  71      import internal.FO_analyse_card as analyse_card  
  72      import internal.histograms as histograms 
  73  else: 
  74      # import from madgraph directory 
  75      aMCatNLO = False 
  76      import madgraph.interface.extended_cmd as cmd 
  77      import madgraph.interface.common_run_interface as common_run 
  78      import madgraph.iolibs.files as files 
  79      import madgraph.iolibs.save_load_object as save_load_object 
  80      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  81      import madgraph.madevent.sum_html as sum_html 
  82      import madgraph.various.banner as banner_mod 
  83      import madgraph.various.cluster as cluster 
  84      import madgraph.various.misc as misc 
  85      import madgraph.various.shower_card as shower_card 
  86      import madgraph.various.FO_analyse_card as analyse_card 
  87      import madgraph.various.histograms as histograms 
  88      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error 
  89   
90 -class aMCatNLOError(Exception):
91 pass
92 93
94 -def compile_dir(*arguments):
95 """compile the direcory p_dir 96 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 97 this function needs not to be a class method in order to do 98 the compilation on multicore""" 99 100 if len(arguments) == 1: 101 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 102 elif len(arguments)==7: 103 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 104 else: 105 raise aMCatNLOError, 'not correct number of argument' 106 logger.info(' Compiling %s...' % p_dir) 107 108 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 109 110 try: 111 #compile everything 112 # compile and run tests 113 for test in tests: 114 # skip check_poles for LOonly dirs 115 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 116 continue 117 misc.compile([test], cwd = this_dir, job_specs = False) 118 input = pjoin(me_dir, '%s_input.txt' % test) 119 #this can be improved/better written to handle the output 120 misc.call(['./%s' % (test)], cwd=this_dir, 121 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w')) 122 123 if not options['reweightonly']: 124 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 125 open(pjoin(this_dir, 'gensym_input.txt'), 'w').write('%s\n' % run_mode) 126 misc.call(['./gensym'],cwd= this_dir, 127 stdin=open(pjoin(this_dir, 'gensym_input.txt')), 128 stdout=open(pjoin(this_dir, 'gensym.log'), 'w')) 129 #compile madevent_mintMC/mintFO 130 misc.compile([exe], cwd=this_dir, job_specs = False) 131 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 132 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 133 134 logger.info(' %s done.' % p_dir) 135 return 0 136 except MadGraph5Error, msg: 137 return msg
138 139
140 -def check_compiler(options, block=False):
141 """check that the current fortran compiler is gfortran 4.6 or later. 142 If block, stops the execution, otherwise just print a warning""" 143 144 msg = 'In order to be able to run at NLO MadGraph5_aMC@NLO, you need to have ' + \ 145 'gfortran 4.6 or later installed.\n%s has been detected\n'+\ 146 'Note that You can still run all MadEvent run without any problem!' 147 #first check that gfortran is installed 148 if options['fortran_compiler']: 149 compiler = options['fortran_compiler'] 150 elif misc.which('gfortran'): 151 compiler = 'gfortran' 152 else: 153 compiler = '' 154 155 if 'gfortran' not in compiler: 156 if block: 157 raise aMCatNLOError(msg % compiler) 158 else: 159 logger.warning(msg % compiler) 160 else: 161 curr_version = misc.get_gfortran_version(compiler) 162 if not ''.join(curr_version.split('.')) >= '46': 163 if block: 164 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 165 else: 166 logger.warning(msg % (compiler + ' ' + curr_version))
167 168 169 170 #=============================================================================== 171 # CmdExtended 172 #===============================================================================
173 -class CmdExtended(common_run.CommonRunCmd):
174 """Particularisation of the cmd command for aMCatNLO""" 175 176 #suggested list of command 177 next_possibility = { 178 'start': [], 179 } 180 181 debug_output = 'ME5_debug' 182 error_debug = 'Please report this bug on https://bugs.launchpad.net/madgraph5\n' 183 error_debug += 'More information is found in \'%(debug)s\'.\n' 184 error_debug += 'Please attach this file to your report.' 185 186 config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/madgraph5\n' 187 188 189 keyboard_stop_msg = """stopping all operation 190 in order to quit MadGraph5_aMC@NLO please enter exit""" 191 192 # Define the Error 193 InvalidCmd = InvalidCmd 194 ConfigurationError = aMCatNLOError 195
196 - def __init__(self, me_dir, options, *arg, **opt):
197 """Init history and line continuation""" 198 199 # Tag allowing/forbiding question 200 self.force = False 201 202 # If possible, build an info line with current version number 203 # and date, from the VERSION text file 204 info = misc.get_pkg_info() 205 info_line = "" 206 if info and info.has_key('version') and info.has_key('date'): 207 len_version = len(info['version']) 208 len_date = len(info['date']) 209 if len_version + len_date < 30: 210 info_line = "#* VERSION %s %s %s *\n" % \ 211 (info['version'], 212 (30 - len_version - len_date) * ' ', 213 info['date']) 214 else: 215 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 216 info_line = "#* VERSION %s %s *\n" % \ 217 (version, (24 - len(version)) * ' ') 218 219 # Create a header for the history file. 220 # Remember to fill in time at writeout time! 221 self.history_header = \ 222 '#************************************************************\n' + \ 223 '#* MadGraph5_aMC@NLO *\n' + \ 224 '#* *\n' + \ 225 "#* * * *\n" + \ 226 "#* * * * * *\n" + \ 227 "#* * * * * 5 * * * * *\n" + \ 228 "#* * * * * *\n" + \ 229 "#* * * *\n" + \ 230 "#* *\n" + \ 231 "#* *\n" + \ 232 info_line + \ 233 "#* *\n" + \ 234 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 235 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 236 "#* and *\n" + \ 237 "#* http://amcatnlo.cern.ch *\n" + \ 238 '#* *\n' + \ 239 '#************************************************************\n' + \ 240 '#* *\n' + \ 241 '#* Command File for aMCatNLO *\n' + \ 242 '#* *\n' + \ 243 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 244 '#* *\n' + \ 245 '#************************************************************\n' 246 247 if info_line: 248 info_line = info_line[1:] 249 250 logger.info(\ 251 "************************************************************\n" + \ 252 "* *\n" + \ 253 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 254 "* a M C @ N L O *\n" + \ 255 "* *\n" + \ 256 "* * * *\n" + \ 257 "* * * * * *\n" + \ 258 "* * * * * 5 * * * * *\n" + \ 259 "* * * * * *\n" + \ 260 "* * * *\n" + \ 261 "* *\n" + \ 262 info_line + \ 263 "* *\n" + \ 264 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 265 "* http://amcatnlo.cern.ch *\n" + \ 266 "* *\n" + \ 267 "* Type 'help' for in-line help. *\n" + \ 268 "* *\n" + \ 269 "************************************************************") 270 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
271 272
273 - def get_history_header(self):
274 """return the history header""" 275 return self.history_header % misc.get_time_info()
276
277 - def stop_on_keyboard_stop(self):
278 """action to perform to close nicely on a keyboard interupt""" 279 try: 280 if hasattr(self, 'cluster'): 281 logger.info('rm jobs on queue') 282 self.cluster.remove() 283 if hasattr(self, 'results'): 284 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 285 self.add_error_log_in_html(KeyboardInterrupt) 286 except: 287 pass
288
289 - def postcmd(self, stop, line):
290 """ Update the status of the run for finishing interactive command """ 291 292 # relaxing the tag forbidding question 293 self.force = False 294 295 if not self.use_rawinput: 296 return stop 297 298 299 arg = line.split() 300 if len(arg) == 0: 301 return stop 302 elif str(arg[0]) in ['exit','quit','EOF']: 303 return stop 304 305 try: 306 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 307 level=None, error=True) 308 except Exception: 309 misc.sprint('self.update_status fails', log=logger) 310 pass
311
312 - def nice_user_error(self, error, line):
313 """If a ME run is currently running add a link in the html output""" 314 315 self.add_error_log_in_html() 316 cmd.Cmd.nice_user_error(self, error, line)
317
318 - def nice_config_error(self, error, line):
319 """If a ME run is currently running add a link in the html output""" 320 321 self.add_error_log_in_html() 322 cmd.Cmd.nice_config_error(self, error, line)
323
324 - def nice_error_handling(self, error, line):
325 """If a ME run is currently running add a link in the html output""" 326 327 self.add_error_log_in_html() 328 cmd.Cmd.nice_error_handling(self, error, line)
329 330 331 332 #=============================================================================== 333 # HelpToCmd 334 #===============================================================================
335 -class HelpToCmd(object):
336 """ The Series of help routine for the aMCatNLOCmd""" 337
338 - def help_launch(self):
339 """help for launch command""" 340 _launch_parser.print_help()
341
342 - def help_banner_run(self):
343 logger.info("syntax: banner_run Path|RUN [--run_options]") 344 logger.info("-- Reproduce a run following a given banner") 345 logger.info(" One of the following argument is require:") 346 logger.info(" Path should be the path of a valid banner.") 347 logger.info(" RUN should be the name of a run of the current directory") 348 self.run_options_help([('-f','answer all question by default'), 349 ('--name=X', 'Define the name associated with the new run')])
350 351
352 - def help_compile(self):
353 """help for compile command""" 354 _compile_parser.print_help()
355
356 - def help_generate_events(self):
357 """help for generate_events commandi 358 just call help_launch""" 359 _generate_events_parser.print_help()
360 361
362 - def help_calculate_xsect(self):
363 """help for generate_events command""" 364 _calculate_xsect_parser.print_help()
365
366 - def help_shower(self):
367 """help for shower command""" 368 _shower_parser.print_help()
369 370
371 - def help_open(self):
372 logger.info("syntax: open FILE ") 373 logger.info("-- open a file with the appropriate editor.") 374 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 375 logger.info(' the path to the last created/used directory is used')
376
377 - def run_options_help(self, data):
378 if data: 379 logger.info('-- local options:') 380 for name, info in data: 381 logger.info(' %s : %s' % (name, info)) 382 383 logger.info("-- session options:") 384 logger.info(" Note that those options will be kept for the current session") 385 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 386 logger.info(" --multicore : Run in multi-core configuration") 387 logger.info(" --nb_core=X : limit the number of core to use to X.")
388 389 390 391 392 #=============================================================================== 393 # CheckValidForCmd 394 #===============================================================================
395 -class CheckValidForCmd(object):
396 """ The Series of check routine for the aMCatNLOCmd""" 397
398 - def check_shower(self, args, options):
399 """Check the validity of the line. args[0] is the run_directory""" 400 401 if options['force']: 402 self.force = True 403 404 if len(args) == 0: 405 self.help_shower() 406 raise self.InvalidCmd, 'Invalid syntax, please specify the run name' 407 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 408 raise self.InvalidCmd, 'Directory %s does not exists' % \ 409 pjoin(os.getcwd(), 'Events', args[0]) 410 411 self.set_run_name(args[0], level= 'shower') 412 args[0] = pjoin(self.me_dir, 'Events', args[0])
413
414 - def check_plot(self, args):
415 """Check the argument for the plot command 416 plot run_name modes""" 417 418 419 madir = self.options['madanalysis_path'] 420 td = self.options['td_path'] 421 422 if not madir or not td: 423 logger.info('Retry to read configuration file to find madanalysis/td') 424 self.set_configuration() 425 426 madir = self.options['madanalysis_path'] 427 td = self.options['td_path'] 428 429 if not madir: 430 error_msg = 'No Madanalysis path correctly set.' 431 error_msg += 'Please use the set command to define the path and retry.' 432 error_msg += 'You can also define it in the configuration file.' 433 raise self.InvalidCmd(error_msg) 434 if not td: 435 error_msg = 'No path to td directory correctly set.' 436 error_msg += 'Please use the set command to define the path and retry.' 437 error_msg += 'You can also define it in the configuration file.' 438 raise self.InvalidCmd(error_msg) 439 440 if len(args) == 0: 441 if not hasattr(self, 'run_name') or not self.run_name: 442 self.help_plot() 443 raise self.InvalidCmd('No run name currently define. Please add this information.') 444 args.append('all') 445 return 446 447 448 if args[0] not in self._plot_mode: 449 self.set_run_name(args[0], level='plot') 450 del args[0] 451 if len(args) == 0: 452 args.append('all') 453 elif not self.run_name: 454 self.help_plot() 455 raise self.InvalidCmd('No run name currently define. Please add this information.') 456 457 for arg in args: 458 if arg not in self._plot_mode and arg != self.run_name: 459 self.help_plot() 460 raise self.InvalidCmd('unknown options %s' % arg)
461
462 - def check_pgs(self, arg):
463 """Check the argument for pythia command 464 syntax: pgs [NAME] 465 Note that other option are already remove at this point 466 """ 467 468 # If not pythia-pgs path 469 if not self.options['pythia-pgs_path']: 470 logger.info('Retry to read configuration file to find pythia-pgs path') 471 self.set_configuration() 472 473 if not self.options['pythia-pgs_path'] or not \ 474 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 475 error_msg = 'No pythia-pgs path correctly set.' 476 error_msg += 'Please use the set command to define the path and retry.' 477 error_msg += 'You can also define it in the configuration file.' 478 raise self.InvalidCmd(error_msg) 479 480 tag = [a for a in arg if a.startswith('--tag=')] 481 if tag: 482 arg.remove(tag[0]) 483 tag = tag[0][6:] 484 485 486 if len(arg) == 0 and not self.run_name: 487 if self.results.lastrun: 488 arg.insert(0, self.results.lastrun) 489 else: 490 raise self.InvalidCmd('No run name currently define. Please add this information.') 491 492 if len(arg) == 1 and self.run_name == arg[0]: 493 arg.pop(0) 494 495 if not len(arg) and \ 496 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 497 self.help_pgs() 498 raise self.InvalidCmd('''No file file pythia_events.hep currently available 499 Please specify a valid run_name''') 500 501 lock = None 502 if len(arg) == 1: 503 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 504 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 505 'events_*.hep.gz')) 506 if not filenames: 507 raise self.InvalidCmd('No events file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 508 else: 509 input_file = filenames[0] 510 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 511 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 512 argument=['-c', input_file]) 513 else: 514 if tag: 515 self.run_card['run_tag'] = tag 516 self.set_run_name(self.run_name, tag, 'pgs') 517 518 return lock
519 520
521 - def check_delphes(self, arg):
522 """Check the argument for pythia command 523 syntax: delphes [NAME] 524 Note that other option are already remove at this point 525 """ 526 527 # If not pythia-pgs path 528 if not self.options['delphes_path']: 529 logger.info('Retry to read configuration file to find delphes path') 530 self.set_configuration() 531 532 if not self.options['delphes_path']: 533 error_msg = 'No delphes path correctly set.' 534 error_msg += 'Please use the set command to define the path and retry.' 535 error_msg += 'You can also define it in the configuration file.' 536 raise self.InvalidCmd(error_msg) 537 538 tag = [a for a in arg if a.startswith('--tag=')] 539 if tag: 540 arg.remove(tag[0]) 541 tag = tag[0][6:] 542 543 544 if len(arg) == 0 and not self.run_name: 545 if self.results.lastrun: 546 arg.insert(0, self.results.lastrun) 547 else: 548 raise self.InvalidCmd('No run name currently define. Please add this information.') 549 550 if len(arg) == 1 and self.run_name == arg[0]: 551 arg.pop(0) 552 553 if not len(arg) and \ 554 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 555 self.help_pgs() 556 raise self.InvalidCmd('''No file file pythia_events.hep currently available 557 Please specify a valid run_name''') 558 559 if len(arg) == 1: 560 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 561 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 562 'events_*.hep.gz')) 563 if not filenames: 564 raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\ 565 % (self.run_name, prev_tag, 566 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 567 else: 568 input_file = filenames[0] 569 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 570 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 571 argument=['-c', input_file]) 572 else: 573 if tag: 574 self.run_card['run_tag'] = tag 575 self.set_run_name(self.run_name, tag, 'delphes')
576
577 - def check_calculate_xsect(self, args, options):
578 """check the validity of the line. args is ORDER, 579 ORDER being LO or NLO. If no mode is passed, NLO is used""" 580 # modify args in order to be DIR 581 # mode being either standalone or madevent 582 583 if options['force']: 584 self.force = True 585 586 if not args: 587 args.append('NLO') 588 return 589 590 if len(args) > 1: 591 self.help_calculate_xsect() 592 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 593 594 elif len(args) == 1: 595 if not args[0] in ['NLO', 'LO']: 596 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 597 mode = args[0] 598 599 # check for incompatible options/modes 600 if options['multicore'] and options['cluster']: 601 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 602 ' are not compatible. Please choose one.'
603 604
605 - def check_generate_events(self, args, options):
606 """check the validity of the line. args is ORDER, 607 ORDER being LO or NLO. If no mode is passed, NLO is used""" 608 # modify args in order to be DIR 609 # mode being either standalone or madevent 610 611 if not args: 612 args.append('NLO') 613 return 614 615 if len(args) > 1: 616 self.help_generate_events() 617 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 618 619 elif len(args) == 1: 620 if not args[0] in ['NLO', 'LO']: 621 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 622 mode = args[0] 623 624 # check for incompatible options/modes 625 if options['multicore'] and options['cluster']: 626 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 627 ' are not compatible. Please choose one.'
628
629 - def check_banner_run(self, args):
630 """check the validity of line""" 631 632 if len(args) == 0: 633 self.help_banner_run() 634 raise self.InvalidCmd('banner_run requires at least one argument.') 635 636 tag = [a[6:] for a in args if a.startswith('--tag=')] 637 638 639 if os.path.exists(args[0]): 640 type ='banner' 641 format = self.detect_card_type(args[0]) 642 if format != 'banner': 643 raise self.InvalidCmd('The file is not a valid banner.') 644 elif tag: 645 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 646 (args[0], tag)) 647 if not os.path.exists(args[0]): 648 raise self.InvalidCmd('No banner associates to this name and tag.') 649 else: 650 name = args[0] 651 type = 'run' 652 banners = glob.glob(pjoin(self.me_dir,'Events', args[0], '*_banner.txt')) 653 if not banners: 654 raise self.InvalidCmd('No banner associates to this name.') 655 elif len(banners) == 1: 656 args[0] = banners[0] 657 else: 658 #list the tag and propose those to the user 659 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 660 tag = self.ask('which tag do you want to use?', tags[0], tags) 661 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 662 (args[0], tag)) 663 664 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 665 if run_name: 666 try: 667 self.exec_cmd('remove %s all banner -f' % run_name) 668 except Exception: 669 pass 670 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 671 elif type == 'banner': 672 self.set_run_name(self.find_available_run_name(self.me_dir)) 673 elif type == 'run': 674 if not self.results[name].is_empty(): 675 run_name = self.find_available_run_name(self.me_dir) 676 logger.info('Run %s is not empty so will use run_name: %s' % \ 677 (name, run_name)) 678 self.set_run_name(run_name) 679 else: 680 try: 681 self.exec_cmd('remove %s all banner -f' % run_name) 682 except Exception: 683 pass 684 self.set_run_name(name)
685 686 687
688 - def check_launch(self, args, options):
689 """check the validity of the line. args is MODE 690 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 691 # modify args in order to be DIR 692 # mode being either standalone or madevent 693 694 if options['force']: 695 self.force = True 696 697 698 if not args: 699 args.append('auto') 700 return 701 702 if len(args) > 1: 703 self.help_launch() 704 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 705 706 elif len(args) == 1: 707 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 708 raise self.InvalidCmd, '%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0] 709 mode = args[0] 710 711 # check for incompatible options/modes 712 if options['multicore'] and options['cluster']: 713 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 714 ' are not compatible. Please choose one.' 715 if mode == 'NLO' and options['reweightonly']: 716 raise self.InvalidCmd, 'option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"'
717 718
719 - def check_compile(self, args, options):
720 """check the validity of the line. args is MODE 721 MODE being FO or MC. If no mode is passed, MC is used""" 722 # modify args in order to be DIR 723 # mode being either standalone or madevent 724 725 if options['force']: 726 self.force = True 727 728 if not args: 729 args.append('MC') 730 return 731 732 if len(args) > 1: 733 self.help_compile() 734 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 735 736 elif len(args) == 1: 737 if not args[0] in ['MC', 'FO']: 738 raise self.InvalidCmd, '%s is not a valid mode, please use "FO" or "MC"' % args[0] 739 mode = args[0]
740 741 # check for incompatible options/modes 742 743 744 #=============================================================================== 745 # CompleteForCmd 746 #===============================================================================
747 -class CompleteForCmd(CheckValidForCmd):
748 """ The Series of help routine for the MadGraphCmd""" 749
750 - def complete_launch(self, text, line, begidx, endidx):
751 """auto-completion for launch command""" 752 753 args = self.split_arg(line[0:begidx]) 754 if len(args) == 1: 755 #return mode 756 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 757 elif len(args) == 2 and line[begidx-1] == '@': 758 return self.list_completion(text,['LO','NLO'],line) 759 else: 760 opts = [] 761 for opt in _launch_parser.option_list: 762 opts += opt._long_opts + opt._short_opts 763 return self.list_completion(text, opts, line)
764
765 - def complete_banner_run(self, text, line, begidx, endidx):
766 "Complete the banner run command" 767 try: 768 769 770 args = self.split_arg(line[0:begidx], error=False) 771 772 if args[-1].endswith(os.path.sep): 773 return self.path_completion(text, 774 os.path.join('.',*[a for a in args \ 775 if a.endswith(os.path.sep)])) 776 777 778 if len(args) > 1: 779 # only options are possible 780 tags = glob.glob(pjoin(self.me_dir, 'Events' , args[1],'%s_*_banner.txt' % args[1])) 781 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 782 783 if args[-1] != '--tag=': 784 tags = ['--tag=%s' % t for t in tags] 785 else: 786 return self.list_completion(text, tags) 787 return self.list_completion(text, tags +['--name=','-f'], line) 788 789 # First argument 790 possibilites = {} 791 792 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 793 if a.endswith(os.path.sep)])) 794 if os.path.sep in line: 795 return comp 796 else: 797 possibilites['Path from ./'] = comp 798 799 run_list = glob.glob(pjoin(self.me_dir, 'Events', '*','*_banner.txt')) 800 run_list = [n.rsplit('/',2)[1] for n in run_list] 801 possibilites['RUN Name'] = self.list_completion(text, run_list) 802 803 return self.deal_multiple_categories(possibilites) 804 805 806 except Exception, error: 807 print error
808 809
810 - def complete_compile(self, text, line, begidx, endidx):
811 """auto-completion for launch command""" 812 813 args = self.split_arg(line[0:begidx]) 814 if len(args) == 1: 815 #return mode 816 return self.list_completion(text,['FO','MC'],line) 817 else: 818 opts = [] 819 for opt in _compile_parser.option_list: 820 opts += opt._long_opts + opt._short_opts 821 return self.list_completion(text, opts, line)
822
823 - def complete_calculate_xsect(self, text, line, begidx, endidx):
824 """auto-completion for launch command""" 825 826 args = self.split_arg(line[0:begidx]) 827 if len(args) == 1: 828 #return mode 829 return self.list_completion(text,['LO','NLO'],line) 830 else: 831 opts = [] 832 for opt in _calculate_xsect_parser.option_list: 833 opts += opt._long_opts + opt._short_opts 834 return self.list_completion(text, opts, line)
835
836 - def complete_generate_events(self, text, line, begidx, endidx):
837 """auto-completion for generate_events command 838 call the compeltion for launch""" 839 self.complete_launch(text, line, begidx, endidx)
840 841
842 - def complete_shower(self, text, line, begidx, endidx):
843 args = self.split_arg(line[0:begidx]) 844 if len(args) == 1: 845 #return valid run_name 846 data = glob.glob(pjoin(self.me_dir, 'Events', '*','events.lhe.gz')) 847 data = [n.rsplit('/',2)[1] for n in data] 848 tmp1 = self.list_completion(text, data) 849 if not self.run_name: 850 return tmp1
851
852 - def complete_plot(self, text, line, begidx, endidx):
853 """ Complete the plot command """ 854 855 args = self.split_arg(line[0:begidx], error=False) 856 857 if len(args) == 1: 858 #return valid run_name 859 data = glob.glob(pjoin(self.me_dir, 'Events', '*','events.lhe*')) 860 data = [n.rsplit('/',2)[1] for n in data] 861 tmp1 = self.list_completion(text, data) 862 if not self.run_name: 863 return tmp1 864 865 if len(args) > 1: 866 return self.list_completion(text, self._plot_mode)
867
868 - def complete_pgs(self,text, line, begidx, endidx):
869 "Complete the pgs command" 870 args = self.split_arg(line[0:begidx], error=False) 871 if len(args) == 1: 872 #return valid run_name 873 data = glob.glob(pjoin(self.me_dir, 'Events', '*', 'events_*.hep.gz')) 874 data = [n.rsplit('/',2)[1] for n in data] 875 tmp1 = self.list_completion(text, data) 876 if not self.run_name: 877 return tmp1 878 else: 879 tmp2 = self.list_completion(text, self._run_options + ['-f', 880 '--tag=' ,'--no_default'], line) 881 return tmp1 + tmp2 882 else: 883 return self.list_completion(text, self._run_options + ['-f', 884 '--tag=','--no_default'], line)
885 886 complete_delphes = complete_pgs
887
888 -class aMCatNLOAlreadyRunning(InvalidCmd):
889 pass
890 891 #=============================================================================== 892 # aMCatNLOCmd 893 #===============================================================================
894 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
895 """The command line processor of MadGraph""" 896 897 # Truth values 898 true = ['T','.true.',True,'true'] 899 # Options and formats available 900 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 901 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 902 _calculate_decay_options = ['-f', '--accuracy=0.'] 903 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 904 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 905 _clean_mode = _plot_mode + ['channel', 'banner'] 906 _display_opts = ['run_name', 'options', 'variable'] 907 # survey options, dict from name to type, default value, and help text 908 # Variables to store object information 909 web = False 910 cluster_mode = 0 911 queue = 'madgraph' 912 nb_core = None 913 914 next_possibility = { 915 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 916 'help generate_events'], 917 'generate_events': ['generate_events [OPTIONS]', 'shower'], 918 'launch': ['launch [OPTIONS]', 'shower'], 919 'shower' : ['generate_events [OPTIONS]'] 920 } 921 922 923 ############################################################################
924 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
925 """ add information to the cmd """ 926 927 self.start_time = 0 928 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 929 #common_run.CommonRunCmd.__init__(self, me_dir, options) 930 931 self.mode = 'aMCatNLO' 932 self.nb_core = 0 933 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 934 935 # load the current status of the directory 936 if os.path.exists(pjoin(self.me_dir,'HTML','results.pkl')): 937 self.results = save_load_object.load_from_file(pjoin(self.me_dir,'HTML','results.pkl')) 938 self.results.resetall(self.me_dir) 939 self.last_mode = self.results[self.results.lastrun][-1]['run_mode'] 940 else: 941 model = self.find_model_name() 942 process = self.process # define in find_model_name 943 self.results = gen_crossxhtml.AllResultsNLO(model, process, self.me_dir) 944 self.last_mode = '' 945 self.results.def_web_mode(self.web) 946 # check that compiler is gfortran 4.6 or later if virtuals have been exported 947 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 948 949 if not '[real=QCD]' in proc_card: 950 check_compiler(self.options, block=True)
951 952 953 ############################################################################
954 - def do_shower(self, line):
955 """ run the shower on a given parton level file """ 956 argss = self.split_arg(line) 957 (options, argss) = _launch_parser.parse_args(argss) 958 # check argument validity and normalise argument 959 options = options.__dict__ 960 options['reweightonly'] = False 961 self.check_shower(argss, options) 962 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 963 self.ask_run_configuration('onlyshower', options) 964 self.run_mcatnlo(evt_file) 965 966 self.update_status('', level='all', update_results=True)
967 968 ################################################################################
969 - def do_plot(self, line):
970 """Create the plot for a given run""" 971 972 # Since in principle, all plot are already done automaticaly 973 args = self.split_arg(line) 974 # Check argument's validity 975 self.check_plot(args) 976 logger.info('plot for run %s' % self.run_name) 977 978 if not self.force: 979 self.ask_edit_cards([], args, plot=True) 980 981 if any([arg in ['parton'] for arg in args]): 982 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 983 if os.path.exists(filename+'.gz'): 984 misc.gunzip(filename) 985 if os.path.exists(filename): 986 logger.info('Found events.lhe file for run %s' % self.run_name) 987 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 988 self.create_plot('parton') 989 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 990 misc.gzip(filename) 991 992 if any([arg in ['all','parton'] for arg in args]): 993 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 994 if os.path.exists(filename): 995 logger.info('Found MADatNLO.top file for run %s' % \ 996 self.run_name) 997 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 998 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 999 1000 if not os.path.isdir(plot_dir): 1001 os.makedirs(plot_dir) 1002 top_file = pjoin(plot_dir, 'plots.top') 1003 files.cp(filename, top_file) 1004 madir = self.options['madanalysis_path'] 1005 tag = self.run_card['run_tag'] 1006 td = self.options['td_path'] 1007 misc.call(['%s/plot' % self.dirbin, madir, td], 1008 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1009 stderr = subprocess.STDOUT, 1010 cwd=plot_dir) 1011 1012 misc.call(['%s/plot_page-pl' % self.dirbin, 1013 os.path.basename(plot_dir), 1014 'parton'], 1015 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1016 stderr = subprocess.STDOUT, 1017 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1018 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1019 output) 1020 1021 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1022 1023 if any([arg in ['all','shower'] for arg in args]): 1024 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 1025 'events_*.lhe.gz')) 1026 if len(filenames) != 1: 1027 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 1028 'events_*.hep.gz')) 1029 if len(filenames) != 1: 1030 logger.info('No shower level file found for run %s' % \ 1031 self.run_name) 1032 return 1033 filename = filenames[0] 1034 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1035 1036 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1037 if aMCatNLO and not self.options['mg5_path']: 1038 raise "plotting NLO HEP file needs MG5 utilities" 1039 1040 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1041 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1042 self.run_hep2lhe() 1043 else: 1044 filename = filenames[0] 1045 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1046 1047 self.create_plot('shower') 1048 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1049 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1050 lhe_file_name) 1051 misc.gzip(lhe_file_name) 1052 1053 if any([arg in ['all','pgs'] for arg in args]): 1054 filename = pjoin(self.me_dir, 'Events', self.run_name, 1055 '%s_pgs_events.lhco' % self.run_tag) 1056 if os.path.exists(filename+'.gz'): 1057 misc.gunzip(filename) 1058 if os.path.exists(filename): 1059 self.create_plot('PGS') 1060 misc.gzip(filename) 1061 else: 1062 logger.info('No valid files for pgs plot') 1063 1064 if any([arg in ['all','delphes'] for arg in args]): 1065 filename = pjoin(self.me_dir, 'Events', self.run_name, 1066 '%s_delphes_events.lhco' % self.run_tag) 1067 if os.path.exists(filename+'.gz'): 1068 misc.gunzip(filename) 1069 if os.path.exists(filename): 1070 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1071 self.create_plot('Delphes') 1072 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1073 misc.gzip(filename) 1074 else: 1075 logger.info('No valid files for delphes plot')
1076 1077 1078 ############################################################################
1079 - def do_calculate_xsect(self, line):
1080 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1081 this function wraps the do_launch one""" 1082 1083 self.start_time = time.time() 1084 argss = self.split_arg(line) 1085 # check argument validity and normalise argument 1086 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1087 options = options.__dict__ 1088 options['reweightonly'] = False 1089 options['parton'] = True 1090 self.check_calculate_xsect(argss, options) 1091 self.do_launch(line, options, argss)
1092 1093 ############################################################################
1094 - def do_banner_run(self, line):
1095 """Make a run from the banner file""" 1096 1097 args = self.split_arg(line) 1098 #check the validity of the arguments 1099 self.check_banner_run(args) 1100 1101 # Remove previous cards 1102 for name in ['shower_card.dat', 'madspin_card.dat']: 1103 try: 1104 os.remove(pjoin(self.me_dir, 'Cards', name)) 1105 except Exception: 1106 pass 1107 1108 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1109 1110 # Check if we want to modify the run 1111 if not self.force: 1112 ans = self.ask('Do you want to modify the Cards/Run Type?', 'n', ['y','n']) 1113 if ans == 'n': 1114 self.force = True 1115 1116 # Compute run mode: 1117 if self.force: 1118 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1119 banner = banner_mod.Banner(args[0]) 1120 for line in banner['run_settings']: 1121 if '=' in line: 1122 mode, value = [t.strip() for t in line.split('=')] 1123 mode_status[mode] = value 1124 else: 1125 mode_status = {} 1126 1127 # Call Generate events 1128 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1129 switch=mode_status)
1130 1131 ############################################################################
1132 - def do_generate_events(self, line):
1133 """Main commands: generate events 1134 this function just wraps the do_launch one""" 1135 self.do_launch(line)
1136 1137 1138 ############################################################################
1139 - def do_treatcards(self, line, amcatnlo=True):
1140 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1141 #check if no 'Auto' are present in the file 1142 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1143 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1144 1145 ############################################################################
1146 - def set_configuration(self, amcatnlo=True, **opt):
1147 """assign all configuration variable from file 1148 loop over the different config file if config_file not define """ 1149 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1150 1151 ############################################################################
1152 - def do_launch(self, line, options={}, argss=[], switch={}):
1153 """Main commands: launch the full chain 1154 options and args are relevant if the function is called from other 1155 functions, such as generate_events or calculate_xsect 1156 mode gives the list of switch needed for the computation (usefull for banner_run) 1157 """ 1158 1159 if not argss and not options: 1160 self.start_time = time.time() 1161 argss = self.split_arg(line) 1162 # check argument validity and normalise argument 1163 (options, argss) = _launch_parser.parse_args(argss) 1164 options = options.__dict__ 1165 self.check_launch(argss, options) 1166 1167 if 'run_name' in options.keys() and options['run_name']: 1168 self.run_name = options['run_name'] 1169 # if a dir with the given run_name already exists 1170 # remove it and warn the user 1171 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1172 logger.warning('Removing old run information in \n'+ 1173 pjoin(self.me_dir, 'Events', self.run_name)) 1174 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1175 self.results.delete_run(self.run_name) 1176 else: 1177 self.run_name = '' # will be set later 1178 1179 if options['multicore']: 1180 self.cluster_mode = 2 1181 elif options['cluster']: 1182 self.cluster_mode = 1 1183 1184 if not switch: 1185 mode = argss[0] 1186 if mode in ['LO', 'NLO']: 1187 options['parton'] = True 1188 mode = self.ask_run_configuration(mode, options) 1189 else: 1190 mode = self.ask_run_configuration('auto', options, switch) 1191 1192 self.results.add_detail('run_mode', mode) 1193 1194 self.update_status('Starting run', level=None, update_results=True) 1195 1196 if self.options['automatic_html_opening']: 1197 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1198 self.options['automatic_html_opening'] = False 1199 1200 if '+' in mode: 1201 mode = mode.split('+')[0] 1202 self.compile(mode, options) 1203 evt_file = self.run(mode, options) 1204 1205 if int(self.run_card['nevents']) == 0 and not mode in ['LO', 'NLO']: 1206 logger.info('No event file generated: grids have been set-up with a '\ 1207 'relative precision of %s' % self.run_card['req_acc']) 1208 return 1209 1210 if not mode in ['LO', 'NLO']: 1211 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1212 self.exec_cmd('decay_events -from_cards', postcmd=False) 1213 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1214 1215 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1216 and not options['parton']: 1217 self.run_mcatnlo(evt_file) 1218 elif mode == 'noshower': 1219 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 1220 Please, shower the Les Houches events before using them for physics analyses.""") 1221 1222 1223 self.update_status('', level='all', update_results=True) 1224 if int(self.run_card['ickkw']) == 3 and mode in ['noshower', 'aMC@NLO']: 1225 logger.warning("""You are running with FxFx merging enabled. 1226 To be able to merge samples of various multiplicities without double counting, 1227 you have to remove some events after showering 'by hand'. 1228 Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""")
1229 1230 1231 1232 ############################################################################
1233 - def do_compile(self, line):
1234 """Advanced commands: just compile the executables """ 1235 argss = self.split_arg(line) 1236 # check argument validity and normalise argument 1237 (options, argss) = _compile_parser.parse_args(argss) 1238 options = options.__dict__ 1239 options['reweightonly'] = False 1240 options['nocompile'] = False 1241 self.check_compile(argss, options) 1242 1243 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1244 self.ask_run_configuration(mode, options) 1245 self.compile(mode, options) 1246 1247 1248 self.update_status('', level='all', update_results=True)
1249
1250 - def print_results_in_shell(self, data):
1251 """Have a nice results prints in the shell, 1252 data should be of type: gen_crossxhtml.OneTagResults""" 1253 if not data: 1254 return 1255 logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) 1256 if self.ninitial == 1: 1257 logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) 1258 else: 1259 logger.info(" Cross-section : %.4g +- %.4g pb" % (data['cross'], data['error'])) 1260 logger.info(" Nb of events : %s" % data['nb_event'] ) 1261 #if data['cross_pythia'] and data['nb_event_pythia']: 1262 # if self.ninitial == 1: 1263 # logger.info(" Matched Width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) 1264 # else: 1265 # logger.info(" Matched Cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) 1266 # logger.info(" Nb of events after Matching : %s" % data['nb_event_pythia']) 1267 # if self.run_card['use_syst'] in self.true: 1268 # logger.info(" Be carefull that matched information are here NOT for the central value. Refer to SysCalc output for it") 1269 logger.info(" " )
1270
1271 - def print_results_in_file(self, data, path, mode='w'):
1272 """Have a nice results prints in the shell, 1273 data should be of type: gen_crossxhtml.OneTagResults""" 1274 if not data: 1275 return 1276 1277 fsock = open(path, mode) 1278 1279 fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ 1280 (data['run_name'],data['tag'], os.path.basename(self.me_dir))) 1281 1282 if self.ninitial == 1: 1283 fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) 1284 else: 1285 fsock.write(" Cross-section : %.4g +- %.4g pb\n" % (data['cross'], data['error'])) 1286 fsock.write(" Nb of events : %s\n" % data['nb_event'] ) 1287 #if data['cross_pythia'] and data['nb_event_pythia']: 1288 # if self.ninitial == 1: 1289 # fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) 1290 # else: 1291 # fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) 1292 # fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) 1293 fsock.write(" \n" )
1294 1295 1296 1297 1298
1299 - def update_random_seed(self):
1300 """Update random number seed with the value from the run_card. 1301 If this is 0, update the number according to a fresh one""" 1302 iseed = int(self.run_card['iseed']) 1303 if iseed == 0: 1304 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1305 iseed = int(randinit.read()[2:]) + 1 1306 randinit.close() 1307 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1308 randinit.write('r=%d' % iseed) 1309 randinit.close()
1310 1311
1312 - def run(self, mode, options):
1313 """runs aMC@NLO. Returns the name of the event file created""" 1314 logger.info('Starting run') 1315 1316 if not 'only_generation' in options.keys(): 1317 options['only_generation'] = False 1318 1319 if mode in ['LO', 'NLO'] and self.run_card['iappl'] == 2 and not options['only_generation']: 1320 options['only_generation'] = True 1321 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1322 1323 if self.cluster_mode == 1: 1324 cluster_name = self.options['cluster_type'] 1325 self.cluster = cluster.from_name[cluster_name](**self.options) 1326 if self.cluster_mode == 2: 1327 try: 1328 import multiprocessing 1329 if not self.nb_core: 1330 try: 1331 self.nb_core = int(self.options['nb_core']) 1332 except TypeError: 1333 self.nb_core = multiprocessing.cpu_count() 1334 logger.info('Using %d cores' % self.nb_core) 1335 except ImportError: 1336 self.nb_core = 1 1337 logger.warning('Impossible to detect the number of cores => Using One.\n'+ 1338 'Use set nb_core X in order to set this number and be able to'+ 1339 'run in multicore.') 1340 1341 self.cluster = cluster.MultiCore(**self.options) 1342 self.update_random_seed() 1343 #find and keep track of all the jobs 1344 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1345 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1346 folder_names['noshower'] = folder_names['aMC@NLO'] 1347 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1348 job_dict = {} 1349 p_dirs = [d for d in \ 1350 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1351 #find jobs and clean previous results 1352 if not options['only_generation'] and not options['reweightonly']: 1353 self.update_status('Cleaning previous results', level=None) 1354 for dir in p_dirs: 1355 job_dict[dir] = [file for file in \ 1356 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 1357 if file.startswith('ajob')] 1358 #find old folders to be removed 1359 for obj in folder_names[mode]: 1360 to_rm = [file for file in \ 1361 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 1362 if file.startswith(obj[:-1]) and \ 1363 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 1364 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 1365 #always clean dirs for the splitted event generation 1366 # do not include the born_G/ grid_G which should be kept when 1367 # doing a f.o. run keeping old grids 1368 to_always_rm = [file for file in \ 1369 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 1370 if file.startswith(obj[:-1]) and 1371 '_' in file and not '_G' in file and \ 1372 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 1373 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 1374 1375 if not options['only_generation'] and not options['reweightonly']: 1376 to_always_rm.extend(to_rm) 1377 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 1378 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 1379 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 1380 1381 mcatnlo_status = ['Setting up grid', 'Computing upper envelope', 'Generating events'] 1382 1383 if self.run_card['iappl'] == 2: 1384 self.applgrid_distribute(options,mode,p_dirs) 1385 1386 if options['reweightonly']: 1387 event_norm=self.run_card['event_norm'] 1388 nevents=int(self.run_card['nevents']) 1389 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1390 1391 devnull = os.open(os.devnull, os.O_RDWR) 1392 if mode in ['LO', 'NLO']: 1393 # this is for fixed order runs 1394 mode_dict = {'NLO': 'all', 'LO': 'born'} 1395 logger.info('Doing fixed order %s' % mode) 1396 req_acc = self.run_card['req_acc_FO'] 1397 if not options['only_generation'] and req_acc != -1: 1398 self.write_madin_file(pjoin(self.me_dir, 'SubProcesses'), mode_dict[mode], 0, '-1', '6','0.10') 1399 self.update_status('Setting up grids', level=None) 1400 self.run_all(job_dict, [['0', mode_dict[mode], '0']], 'Setting up grids') 1401 elif not options['only_generation']: 1402 npoints = self.run_card['npoints_FO_grid'] 1403 niters = self.run_card['niters_FO_grid'] 1404 self.write_madin_file(pjoin(self.me_dir, 'SubProcesses'), mode_dict[mode], 0, npoints, niters) 1405 self.update_status('Setting up grids', level=None) 1406 self.run_all(job_dict, [['0', mode_dict[mode], '0']], 'Setting up grids') 1407 1408 npoints = self.run_card['npoints_FO'] 1409 niters = self.run_card['niters_FO'] 1410 self.write_madin_file(pjoin(self.me_dir, 'SubProcesses'), mode_dict[mode], -1, npoints, niters) 1411 # collect the results and logs 1412 self.collect_log_files(folder_names[mode], 0) 1413 p = misc.Popen(['./combine_results_FO.sh', str(req_acc), '%s_G*' % mode_dict[mode]], \ 1414 stdout=subprocess.PIPE, \ 1415 cwd=pjoin(self.me_dir, 'SubProcesses')) 1416 output = p.communicate() 1417 1418 self.cross_sect_dict = self.read_results(output, mode) 1419 self.print_summary(options, 0, mode) 1420 cross, error = sum_html.make_all_html_results(self, ['%s*' % mode_dict[mode]]) 1421 self.results.add_detail('cross', cross) 1422 self.results.add_detail('error', error) 1423 1424 self.update_status('Computing cross-section', level=None) 1425 self.run_all(job_dict, [['0', mode_dict[mode], '0', mode_dict[mode]]], 'Computing cross-section') 1426 1427 # collect the results and logs 1428 self.collect_log_files(folder_names[mode], 1) 1429 p = misc.Popen(['./combine_results_FO.sh', '-1'] + folder_names[mode], \ 1430 stdout=subprocess.PIPE, 1431 cwd=pjoin(self.me_dir, 'SubProcesses')) 1432 output = p.communicate() 1433 self.cross_sect_dict = self.read_results(output, mode) 1434 1435 # collect the scale and PDF uncertainties 1436 scale_pdf_info={} 1437 if self.run_card['reweight_scale'] or self.run_card['reweight_PDF']: 1438 data_files=[] 1439 for dir in p_dirs: 1440 for obj in folder_names[mode]: 1441 for file in os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)): 1442 if file.startswith(obj[:-1]) and \ 1443 (os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file,'scale_pdf_dependence.dat'))): 1444 data_files.append(pjoin(dir,file,'scale_pdf_dependence.dat')) 1445 scale_pdf_info = self.pdf_scale_from_reweighting(data_files) 1446 # print the results: 1447 self.print_summary(options, 1, mode, scale_pdf_info) 1448 1449 files.cp(pjoin(self.me_dir, 'SubProcesses', 'res.txt'), 1450 pjoin(self.me_dir, 'Events', self.run_name)) 1451 1452 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 1453 misc.call(['./combine_plots_FO.sh'] + folder_names[mode], \ 1454 stdout=devnull, 1455 cwd=pjoin(self.me_dir, 'SubProcesses')) 1456 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 1457 pjoin(self.me_dir, 'Events', self.run_name)) 1458 logger.info('The results of this run and the TopDrawer file with the plots' + \ 1459 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1460 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 1461 self.combine_plots_HwU(folder_names[mode]) 1462 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.HwU'), 1463 pjoin(self.me_dir, 'Events', self.run_name)) 1464 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.gnuplot'), 1465 pjoin(self.me_dir, 'Events', self.run_name)) 1466 try: 1467 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 1468 stdout=os.open(os.devnull, os.O_RDWR),\ 1469 stderr=os.open(os.devnull, os.O_RDWR),\ 1470 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 1471 except Exception: 1472 pass 1473 1474 1475 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 1476 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1477 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 1478 misc.call(['./combine_root.sh'] + folder_names[mode], \ 1479 stdout=devnull, 1480 cwd=pjoin(self.me_dir, 'SubProcesses')) 1481 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 1482 pjoin(self.me_dir, 'Events', self.run_name)) 1483 logger.info('The results of this run and the ROOT file with the plots' + \ 1484 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1485 else: 1486 logger.info('The results of this run' + \ 1487 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1488 1489 cross, error = sum_html.make_all_html_results(self, folder_names[mode]) 1490 self.results.add_detail('cross', cross) 1491 self.results.add_detail('error', error) 1492 if self.run_card['iappl'] != 0: 1493 self.applgrid_combine(cross,error) 1494 self.update_status('Run complete', level='parton', update_results=True) 1495 1496 return 1497 1498 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1499 shower = self.run_card['parton_shower'].upper() 1500 nevents = int(self.run_card['nevents']) 1501 req_acc = self.run_card['req_acc'] 1502 if nevents == 0 and float(req_acc) < 0 : 1503 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1504 'of events, because 0 events requested. Please set '\ 1505 'the "req_acc" parameter in the run_card to a value between 0 and 1') 1506 elif float(req_acc) >1 or float(req_acc) == 0 : 1507 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1508 'be between larger than 0 and smaller than 1, '\ 1509 'or set to -1 for automatic determination. Current value is %s' % req_acc) 1510 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1511 elif float(req_acc) < 0 and nevents > 1000000 : 1512 req_acc='0.001' 1513 1514 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1515 1516 if not shower in shower_list: 1517 raise aMCatNLOError('%s is not a valid parton shower. Please use one of the following: %s' \ 1518 % (shower, ', '.join(shower_list))) 1519 1520 # check that PYTHIA6PT is not used for processes with FSR 1521 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1522 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1523 1524 if mode in ['aMC@NLO', 'aMC@LO']: 1525 logger.info('Doing %s matched to parton shower' % mode[4:]) 1526 elif mode in ['noshower','noshowerLO']: 1527 logger.info('Generating events without running the shower.') 1528 elif options['only_generation']: 1529 logger.info('Generating events starting from existing results') 1530 1531 1532 for i, status in enumerate(mcatnlo_status): 1533 #check if need to split jobs 1534 # at least one channel must have enough events 1535 try: 1536 nevents_unweighted = open(pjoin(self.me_dir, 1537 'SubProcesses', 1538 'nevents_unweighted')).read().split('\n') 1539 except IOError: 1540 nevents_unweighted = [] 1541 1542 split = i == 2 and \ 1543 int(self.run_card['nevt_job']) > 0 1544 1545 if i == 2 or not options['only_generation']: 1546 # if the number of events requested is zero, 1547 # skip mint step 2 1548 if i==2 and nevents==0: 1549 self.print_summary(options, 2,mode) 1550 return 1551 1552 if split: 1553 # split the event generation 1554 misc.call([pjoin(self.me_dir, 'bin', 'internal', 'split_jobs.py')] + \ 1555 [str(self.run_card['nevt_job'])], 1556 stdout = devnull, 1557 cwd = pjoin(self.me_dir, 'SubProcesses')) 1558 assert os.path.exists(pjoin(self.me_dir, 'SubProcesses', 1559 'nevents_unweighted_splitted')) 1560 1561 self.update_status(status, level='parton') 1562 if mode in ['aMC@NLO', 'noshower']: 1563 self.write_madinMMC_file(pjoin(self.me_dir, 'SubProcesses'), 'all', i) 1564 self.run_all(job_dict, [['2', 'F', '%d' % i]], status, split_jobs = split) 1565 1566 elif mode in ['aMC@LO', 'noshowerLO']: 1567 self.write_madinMMC_file( 1568 pjoin(self.me_dir, 'SubProcesses'), 'born', i) 1569 self.run_all(job_dict, 1570 [['2', 'B', '%d' % i]], 1571 '%s at LO' % status, split_jobs = split) 1572 1573 if (i < 2 and not options['only_generation']) or i == 1 : 1574 # collect the results and logs 1575 self.collect_log_files(folder_names[mode], i) 1576 p = misc.Popen(['./combine_results.sh'] + \ 1577 ['%d' % i,'%d' % nevents, '%s' % req_acc ] + \ 1578 folder_names[mode], 1579 stdout=subprocess.PIPE, 1580 cwd = pjoin(self.me_dir, 'SubProcesses')) 1581 output = p.communicate() 1582 files.cp(pjoin(self.me_dir, 'SubProcesses', 'res_%d.txt' % i), \ 1583 pjoin(self.me_dir, 'Events', self.run_name)) 1584 1585 self.cross_sect_dict = self.read_results(output, mode) 1586 self.print_summary(options, i, mode) 1587 1588 cross, error = sum_html.make_all_html_results(self, folder_names[mode]) 1589 self.results.add_detail('cross', cross) 1590 self.results.add_detail('error', error) 1591 1592 #check that split jobs are all correctly terminated 1593 if split: 1594 self.check_event_files() 1595 1596 if self.cluster_mode == 1: 1597 #if cluster run, wait 15 sec so that event files are transferred back 1598 self.update_status( 1599 'Waiting while files are transferred back from the cluster nodes', 1600 level='parton') 1601 time.sleep(10) 1602 if split: 1603 files.cp(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted'), \ 1604 pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted')) 1605 1606 1607 event_norm=self.run_card['event_norm'] 1608 self.collect_log_files(folder_names[mode], 2) 1609 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
1610
1611 - def combine_plots_HwU(self,folder_names):
1612 """Sums all the plots in the HwU format.""" 1613 1614 logger.debug('Combining HwU plots.') 1615 1616 with open(pjoin(self.me_dir,'SubProcesses','dirs.txt')) as dirf: 1617 all_histo_paths = dirf.readlines() 1618 all_histo_paths = [pjoin(self.me_dir,'SubProcesses', 1619 path.rstrip(),"MADatNLO.HwU") for path in all_histo_paths] 1620 1621 histogram_list = histograms.HwUList(all_histo_paths[0]) 1622 1623 for histo_path in all_histo_paths[1:]: 1624 for i, histo in enumerate(histograms.HwUList(histo_path)): 1625 # First make sure the plots have the same weight labels and such 1626 histo.test_plot_compability(histogram_list[i]) 1627 # Now let the histogram module do the magic and add them. 1628 histogram_list[i] += histo 1629 1630 # And now output the finalized list 1631 histogram_list.output(pjoin(self.me_dir,'SubProcesses',"MADatNLO"), 1632 format = 'gnuplot')
1633
1634 - def applgrid_combine(self,cross,error):
1635 """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories""" 1636 logger.debug('Combining APPLgrids \n') 1637 applcomb=pjoin(self.options['applgrid'].rstrip('applgrid-config'), 1638 'applgrid-combine') 1639 with open(pjoin(self.me_dir,'SubProcesses','dirs.txt')) as dirf: 1640 all_jobs=dirf.readlines() 1641 ngrids=len(all_jobs) 1642 nobs =len([name for name in os.listdir(pjoin(self.me_dir,'SubProcesses', 1643 all_jobs[0].rstrip())) if name.endswith("_out.root")]) 1644 for obs in range(0,nobs): 1645 gdir = [pjoin(self.me_dir,'SubProcesses',job.rstrip(),"grid_obs_"+ 1646 str(obs)+"_out.root") for job in all_jobs] 1647 # combine APPLgrids from different channels for observable 'obs' 1648 if self.run_card["iappl"] == 1: 1649 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events",self.run_name, 1650 "aMCfast_obs_"+str(obs)+"_starting_grid.root"), '--optimise']+ gdir) 1651 elif self.run_card["iappl"] == 2: 1652 unc2_inv=pow(cross/error,2) 1653 unc2_inv_ngrids=pow(cross/error,2)*ngrids 1654 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events", 1655 self.run_name,"aMCfast_obs_"+str(obs)+".root"),'-s', 1656 str(unc2_inv),'--weight',str(unc2_inv)]+ gdir) 1657 for job in all_jobs: 1658 os.remove(pjoin(self.me_dir,'SubProcesses',job.rstrip(), 1659 "grid_obs_"+str(obs)+"_in.root")) 1660 else: 1661 raise aMCatNLOError('iappl parameter can only be 0, 1 or 2') 1662 # after combining, delete the original grids 1663 for ggdir in gdir: 1664 os.remove(ggdir)
1665 1666
1667 - def applgrid_distribute(self,options,mode,p_dirs):
1668 """Distributes the APPLgrids ready to be filled by a second run of the code""" 1669 # if no appl_start_grid argument given, guess it from the time stamps 1670 # of the starting grid files 1671 if not('appl_start_grid' in options.keys() and options['appl_start_grid']): 1672 gfiles=glob.glob(pjoin(self.me_dir, 'Events','*', 1673 'aMCfast_obs_0_starting_grid.root')) 1674 time_stamps={} 1675 for root_file in gfiles: 1676 time_stamps[root_file]=os.path.getmtime(root_file) 1677 options['appl_start_grid']= \ 1678 max(time_stamps.iterkeys(), key=(lambda key: 1679 time_stamps[key])).split('/')[-2] 1680 logger.info('No --appl_start_grid option given. '+\ 1681 'Guessing that start grid from run "%s" should be used.' \ 1682 % options['appl_start_grid']) 1683 1684 if 'appl_start_grid' in options.keys() and options['appl_start_grid']: 1685 self.appl_start_grid = options['appl_start_grid'] 1686 start_grid_dir=pjoin(self.me_dir, 'Events', self.appl_start_grid) 1687 # check that this dir exists and at least one grid file is there 1688 if not os.path.exists(pjoin(start_grid_dir, 1689 'aMCfast_obs_0_starting_grid.root')): 1690 raise self.InvalidCmd('APPLgrid file not found: %s' % \ 1691 pjoin(start_grid_dir,'aMCfast_obs_0_starting_grid.root')) 1692 else: 1693 all_grids=[pjoin(start_grid_dir,name) for name in os.listdir( \ 1694 start_grid_dir) if name.endswith("_starting_grid.root")] 1695 nobs =len(all_grids) 1696 gstring=" ".join(all_grids) 1697 if not hasattr(self, 'appl_start_grid') or not self.appl_start_grid: 1698 raise self.InvalidCmd('No APPLgrid name currently defined.'+ 1699 'Please provide this information.') 1700 if mode == 'NLO': 1701 gdir='all_G' 1702 elif mode == 'LO': 1703 gdir='born_G' 1704 #copy the grid to all relevant directories 1705 for pdir in p_dirs: 1706 g_dirs = [file for file in os.listdir(pjoin(self.me_dir, 1707 "SubProcesses",pdir)) if file.startswith(gdir) and 1708 os.path.isdir(pjoin(self.me_dir,"SubProcesses",pdir, file))] 1709 for g_dir in g_dirs: 1710 for grid in all_grids: 1711 obs=grid.split('_')[-3] 1712 files.cp(grid,pjoin(self.me_dir,"SubProcesses",pdir,g_dir, 1713 'grid_obs_'+obs+'_in.root'))
1714 1715
1716 - def collect_log_files(self, folders, istep):
1717 """collect the log files and put them in a single, html-friendly file 1718 inside the run_... directory""" 1719 step_list = ['Grid setting', 'Cross-section computation', 1720 'Event generation'] 1721 log_file = pjoin(self.me_dir, 'Events', self.run_name, 1722 'alllogs_%d.html' % istep) 1723 # this keeps track of which step has been computed for which channel 1724 channel_dict = {} 1725 log_files = [] 1726 for folder in folders: 1727 log_files += glob.glob(pjoin(self.me_dir, 'SubProcesses', 'P*', 1728 folder, 'log.txt')) 1729 1730 content = '' 1731 1732 outfile = open(log_file, 'w') 1733 1734 content += '<HTML><BODY>\n<font face="courier" size=2>' 1735 for log in log_files: 1736 channel_dict[os.path.dirname(log)] = [istep] 1737 # put an anchor 1738 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 1739 pjoin(self.me_dir,'SubProcesses'),'')) 1740 # and put some nice header 1741 content += '<font color="red">\n' 1742 content += '<br>LOG file for integration channel %s, %s <br>' % \ 1743 (os.path.dirname(log).replace(pjoin(self.me_dir, 1744 'SubProcesses'), ''), 1745 step_list[istep]) 1746 content += '</font>\n' 1747 #then just flush the content of the small log inside the big log 1748 #the PRE tag prints everything verbatim 1749 content += '<PRE>\n' + open(log).read() + '\n</PRE>' 1750 content +='<br>\n' 1751 outfile.write(content) 1752 content='' 1753 1754 outfile.write('</font>\n</BODY></HTML>\n') 1755 outfile.close()
1756 1757
1758 - def read_results(self, output, mode):
1759 """extract results (cross-section, absolute cross-section and errors) 1760 from output, which should be formatted as 1761 Found 4 correctly terminated jobs 1762 random seed found in 'randinit' is 33 1763 Integrated abs(cross-section) 1764 7.94473937e+03 +- 2.9953e+01 (3.7702e-01%) 1765 Integrated cross-section 1766 6.63392298e+03 +- 3.7669e+01 (5.6782e-01%) 1767 for aMC@NLO/aMC@LO, and as 1768 1769 for NLO/LO 1770 The cross_sect_dict is returned""" 1771 res = {} 1772 if mode in ['aMC@LO', 'aMC@NLO', 'noshower', 'noshowerLO']: 1773 pat = re.compile(\ 1774 '''Found (\d+) correctly terminated jobs 1775 random seed found in 'randinit' is (\d+) 1776 Integrated abs\(cross-section\) 1777 \s*(\d+\.\d+e[+-]\d+) \+\- (\d+\.\d+e[+-]\d+) \((\d+\.\d+e[+-]\d+)\%\) 1778 Integrated cross-section 1779 \s*(\-?\d+\.\d+e[+-]\d+) \+\- (\d+\.\d+e[+-]\d+) \((\-?\d+\.\d+e[+-]\d+)\%\)''') 1780 else: 1781 pat = re.compile(\ 1782 '''Found (\d+) correctly terminated jobs 1783 \s*(\-?\d+\.\d+e[+-]\d+) \+\- (\d+\.\d+e[+-]\d+) \((\-?\d+\.\d+e[+-]\d+)\%\)''') 1784 pass 1785 1786 match = re.search(pat, output[0]) 1787 if not match or output[1]: 1788 logger.info('Return code of the event collection: '+str(output[1])) 1789 logger.info('Output of the event collection:\n'+output[0]) 1790 raise aMCatNLOError('An error occurred during the collection of results.\n' + 1791 'Please check the .log files inside the directories which failed.') 1792 # if int(match.groups()[0]) != self.njobs: 1793 # raise aMCatNLOError('Not all jobs terminated successfully') 1794 if mode in ['aMC@LO', 'aMC@NLO', 'noshower', 'noshowerLO']: 1795 return {'randinit' : int(match.groups()[1]), 1796 'xseca' : float(match.groups()[2]), 1797 'erra' : float(match.groups()[3]), 1798 'xsect' : float(match.groups()[5]), 1799 'errt' : float(match.groups()[6])} 1800 else: 1801 return {'xsect' : float(match.groups()[1]), 1802 'errt' : float(match.groups()[2])}
1803
1804 - def print_summary(self, options, step, mode, scale_pdf_info={}):
1805 """print a summary of the results contained in self.cross_sect_dict. 1806 step corresponds to the mintMC step, if =2 (i.e. after event generation) 1807 some additional infos are printed""" 1808 # find process name 1809 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 1810 process = '' 1811 for line in proc_card_lines: 1812 if line.startswith('generate') or line.startswith('add process'): 1813 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 1814 lpp = {0:'l', 1:'p', -1:'pbar'} 1815 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 1816 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 1817 self.run_card['ebeam1'], self.run_card['ebeam2']) 1818 1819 # Gather some basic statistics for the run and extracted from the log files. 1820 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 1821 log_GV_files = glob.glob(pjoin(self.me_dir, \ 1822 'SubProcesses', 'P*','G*','log_MINT*.txt')) 1823 all_log_files = glob.glob(pjoin(self.me_dir, \ 1824 'SubProcesses', 'P*','G*','log*.txt')) 1825 elif mode == 'NLO': 1826 log_GV_files = glob.glob(pjoin(self.me_dir, \ 1827 'SubProcesses', 'P*','all_G*','log*.txt')) 1828 all_log_files = sum([glob.glob(pjoin(self.me_dir,'SubProcesses', 'P*', 1829 '%sG*'%foldName,'log*.txt')) for foldName in ['all_']],[]) 1830 elif mode == 'LO': 1831 log_GV_files = '' 1832 all_log_files = sum([glob.glob(pjoin(self.me_dir,'SubProcesses', 'P*', 1833 '%sG*'%foldName,'log*.txt')) for foldName in ['born_']],[]) 1834 else: 1835 raise aMCatNLOError, 'Running mode %s not supported.'%mode 1836 1837 1838 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 1839 status = ['Determining the number of unweighted events per channel', 1840 'Updating the number of unweighted events per channel', 1841 'Summary:'] 1842 if step != 2: 1843 message = status[step] + '\n\n Intermediate results:' + \ 1844 ('\n Random seed: %(randinit)d' + \ 1845 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' + \ 1846 '\n Total abs(cross-section): %(xseca)8.3e +- %(erra)6.1e pb \n') \ 1847 % self.cross_sect_dict 1848 else: 1849 1850 message = '\n ' + status[step] + proc_info + \ 1851 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' % \ 1852 self.cross_sect_dict 1853 1854 if int(self.run_card['nevents'])>=10000 and self.run_card['reweight_scale']: 1855 message = message + \ 1856 ('\n Ren. and fac. scale uncertainty: +%0.1f%% -%0.1f%%') % \ 1857 (scale_pdf_info['scale_upp'], scale_pdf_info['scale_low']) 1858 if self.run_card['nevents']>=10000 and self.run_card['reweight_PDF']: 1859 message = message + \ 1860 ('\n PDF uncertainty: +%0.1f%% -%0.1f%%') % \ 1861 (scale_pdf_info['pdf_upp'], scale_pdf_info['pdf_low']) 1862 1863 neg_frac = (self.cross_sect_dict['xseca'] - self.cross_sect_dict['xsect'])/\ 1864 (2. * self.cross_sect_dict['xseca']) 1865 message = message + \ 1866 ('\n Number of events generated: %s' + \ 1867 '\n Parton shower to be used: %s' + \ 1868 '\n Fraction of negative weights: %4.2f' + \ 1869 '\n Total running time : %s') % \ 1870 (self.run_card['nevents'], 1871 self.run_card['parton_shower'].upper(), 1872 neg_frac, 1873 misc.format_timer(time.time()-self.start_time)) 1874 1875 elif mode in ['NLO', 'LO']: 1876 status = ['Results after grid setup (cross-section is non-physical):', 1877 'Final results and run summary:'] 1878 if step == 0: 1879 message = '\n ' + status[step] + \ 1880 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' % \ 1881 self.cross_sect_dict 1882 elif step == 1: 1883 message = '\n ' + status[step] + proc_info + \ 1884 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' % \ 1885 self.cross_sect_dict 1886 if self.run_card['reweight_scale']: 1887 if int(self.run_card['ickkw'])!=-1: 1888 message = message + \ 1889 ('\n Ren. and fac. scale uncertainty: +%0.1f%% -%0.1f%%') % \ 1890 (scale_pdf_info['scale_upp'], scale_pdf_info['scale_low']) 1891 else: 1892 message = message + \ 1893 ('\n Soft and hard scale dependence (added in quadrature): +%0.1f%% -%0.1f%%') % \ 1894 (scale_pdf_info['scale_upp_quad'], scale_pdf_info['scale_low_quad']) 1895 if self.run_card['reweight_PDF']: 1896 message = message + \ 1897 ('\n PDF uncertainty: +%0.1f%% -%0.1f%%') % \ 1898 (scale_pdf_info['pdf_upp'], scale_pdf_info['pdf_low']) 1899 1900 if (mode in ['NLO', 'LO'] and step!=1) or \ 1901 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 1902 logger.info(message+'\n') 1903 return 1904 1905 # Some advanced general statistics are shown in the debug message at the 1906 # end of the run 1907 # Make sure it never stops a run 1908 try: 1909 message, debug_msg = \ 1910 self.compile_advanced_stats(log_GV_files, all_log_files, message) 1911 except Exception as e: 1912 debug_msg = 'Advanced statistics collection failed with error "%s"'%str(e) 1913 1914 logger.debug(debug_msg+'\n') 1915 logger.info(message+'\n') 1916 1917 # Now copy relevant information in the Events/Run_<xxx> directory 1918 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 1919 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 1920 open(pjoin(evt_path, '.full_summary.txt'), 1921 'w').write(message+'\n\n'+debug_msg+'\n') 1922 1923 self.archive_files(evt_path,mode)
1924
1925 - def archive_files(self, evt_path, mode):
1926 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 1927 the run.""" 1928 1929 files_to_arxiv = [pjoin('Cards','param_card.dat'), 1930 pjoin('Cards','MadLoopParams.dat'), 1931 pjoin('Cards','FKS_params.dat'), 1932 pjoin('Cards','run_card.dat'), 1933 pjoin('Subprocesses','setscales.f'), 1934 pjoin('Subprocesses','cuts.f')] 1935 1936 if mode in ['NLO', 'LO']: 1937 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 1938 1939 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 1940 os.mkdir(pjoin(evt_path,'RunMaterial')) 1941 1942 for path in files_to_arxiv: 1943 if os.path.isfile(pjoin(self.me_dir,path)): 1944 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 1945 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 1946 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
1947
1948 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
1949 """ This functions goes through the log files given in arguments and 1950 compiles statistics about MadLoop stability, virtual integration 1951 optimization and detection of potential error messages into a nice 1952 debug message to printed at the end of the run """ 1953 1954 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 1955 # > Errors is a list of tuples with this format (log_file,nErrors) 1956 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 1957 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 1958 1959 # ================================== 1960 # == MadLoop stability statistics == 1961 # ================================== 1962 1963 # Recuperate the fraction of unstable PS points found in the runs for 1964 # the virtuals 1965 UPS_stat_finder = re.compile( 1966 r"Satistics from MadLoop:.*"+\ 1967 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 1968 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 1969 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 1970 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 1971 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 1972 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 1973 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 1974 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 1975 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 1976 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 1977 1978 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 1979 1 : 'CutTools (double precision)', 1980 2 : 'PJFry++', 1981 3 : 'IREGI', 1982 4 : 'Golem95', 1983 9 : 'CutTools (quadruple precision)'} 1984 RetUnit_finder =re.compile( 1985 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 1986 #Unit 1987 1988 for gv_log in log_GV_files: 1989 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 1990 log=open(gv_log,'r').read() 1991 UPS_stats = re.search(UPS_stat_finder,log) 1992 for retunit_stats in re.finditer(RetUnit_finder, log): 1993 if channel_name not in stats['UPS'].keys(): 1994 stats['UPS'][channel_name] = [0]*10+[[0]*10] 1995 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 1996 += int(retunit_stats.group('n_occurences')) 1997 if not UPS_stats is None: 1998 try: 1999 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 2000 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 2001 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 2002 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 2003 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 2004 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 2005 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 2006 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 2007 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 2008 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 2009 except KeyError: 2010 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 2011 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 2012 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 2013 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 2014 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 2015 int(UPS_stats.group('n10')),[0]*10] 2016 debug_msg = "" 2017 if len(stats['UPS'].keys())>0: 2018 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 2019 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 2020 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 2021 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 2022 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 2023 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 2024 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 2025 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 2026 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 2027 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 2028 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 2029 for i in range(10)] 2030 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 2031 float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 2032 maxUPS = max(UPSfracs, key = lambda w: w[1]) 2033 2034 tmpStr = "" 2035 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 2036 tmpStr += '\n Stability unknown: %d'%nTotsun 2037 tmpStr += '\n Stable PS point: %d'%nTotsps 2038 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 2039 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 2040 tmpStr += '\n Only double precision used: %d'%nTotddp 2041 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 2042 tmpStr += '\n Initialization phase-space points: %d'%nTotini 2043 tmpStr += '\n Reduction methods used:' 2044 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 2045 unit_code_meaning.keys() if nTot1[i]>0] 2046 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 2047 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 2048 if nTot100 != 0: 2049 debug_msg += '\n Unknown return code (100): %d'%nTot100 2050 if nTot10 != 0: 2051 debug_msg += '\n Unknown return code (10): %d'%nTot10 2052 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 2053 not in unit_code_meaning.keys()) 2054 if nUnknownUnit != 0: 2055 debug_msg += '\n Unknown return code (1): %d'\ 2056 %nUnknownUnit 2057 2058 if maxUPS[1]>0.001: 2059 message += tmpStr 2060 message += '\n Total number of unstable PS point detected:'+\ 2061 ' %d (%4.2f%%)'%(nToteps,float(100*nToteps)/nTotPS) 2062 message += '\n Maximum fraction of UPS points in '+\ 2063 'channel %s (%4.2f%%)'%maxUPS 2064 message += '\n Please report this to the authors while '+\ 2065 'providing the file' 2066 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 2067 maxUPS[0],'UPS.log')) 2068 else: 2069 debug_msg += tmpStr 2070 2071 2072 # ==================================================== 2073 # == aMC@NLO virtual integration optimization stats == 2074 # ==================================================== 2075 2076 virt_tricks_finder = re.compile( 2077 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 2078 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 2079 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 2080 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 2081 2082 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 2083 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*-?(?P<v_average>[\d\+-Eed\.]*)") 2084 2085 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 2086 2087 channel_contr_list = {} 2088 for gv_log in log_GV_files: 2089 logfile=open(gv_log,'r') 2090 log = logfile.read() 2091 logfile.close() 2092 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 2093 vf_stats = None 2094 for vf_stats in re.finditer(virt_frac_finder, log): 2095 pass 2096 if not vf_stats is None: 2097 v_frac = float(vf_stats.group('v_frac')) 2098 v_average = float(vf_stats.group('v_average')) 2099 try: 2100 if v_frac < stats['virt_stats']['v_frac_min'][0]: 2101 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 2102 if v_frac > stats['virt_stats']['v_frac_max'][0]: 2103 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 2104 stats['virt_stats']['v_frac_avg'][0] += v_frac 2105 stats['virt_stats']['v_frac_avg'][1] += 1 2106 except KeyError: 2107 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 2108 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 2109 stats['virt_stats']['v_frac_avg']=[v_frac,1] 2110 2111 2112 ccontr_stats = None 2113 for ccontr_stats in re.finditer(channel_contr_finder, log): 2114 pass 2115 if not ccontr_stats is None: 2116 contrib = float(ccontr_stats.group('v_contr')) 2117 try: 2118 if contrib>channel_contr_list[channel_name]: 2119 channel_contr_list[channel_name]=contrib 2120 except KeyError: 2121 channel_contr_list[channel_name]=contrib 2122 2123 2124 # Now build the list of relevant virt log files to look for the maxima 2125 # of virt fractions and such. 2126 average_contrib = 0.0 2127 for value in channel_contr_list.values(): 2128 average_contrib += value 2129 if len(channel_contr_list.values()) !=0: 2130 average_contrib = average_contrib / len(channel_contr_list.values()) 2131 2132 relevant_log_GV_files = [] 2133 excluded_channels = set([]) 2134 all_channels = set([]) 2135 for log_file in log_GV_files: 2136 channel_name = '/'.join(log_file.split('/')[-3:-1]) 2137 all_channels.add(channel_name) 2138 try: 2139 if channel_contr_list[channel_name] > (0.1*average_contrib): 2140 relevant_log_GV_files.append(log_file) 2141 else: 2142 excluded_channels.add(channel_name) 2143 except KeyError: 2144 relevant_log_GV_files.append(log_file) 2145 2146 # Now we want to use the latest occurence of accumulated result in the log file 2147 for gv_log in relevant_log_GV_files: 2148 logfile=open(gv_log,'r') 2149 log = logfile.read() 2150 logfile.close() 2151 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 2152 2153 vt_stats = None 2154 for vt_stats in re.finditer(virt_tricks_finder, log): 2155 pass 2156 if not vt_stats is None: 2157 vt_stats_group = vt_stats.groupdict() 2158 v_ratio = float(vt_stats.group('v_ratio')) 2159 v_ratio_err = float(vt_stats.group('v_ratio_err')) 2160 v_contr = float(vt_stats.group('v_abs_contr')) 2161 v_contr_err = float(vt_stats.group('v_abs_contr_err')) 2162 try: 2163 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 2164 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 2165 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 2166 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 2167 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 2168 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 2169 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 2170 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 2171 if v_contr < stats['virt_stats']['v_contr_min'][0]: 2172 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 2173 if v_contr > stats['virt_stats']['v_contr_max'][0]: 2174 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 2175 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 2176 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 2177 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 2178 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 2179 except KeyError: 2180 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 2181 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 2182 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 2183 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 2184 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 2185 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 2186 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 2187 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 2188 2189 vf_stats = None 2190 for vf_stats in re.finditer(virt_frac_finder, log): 2191 pass 2192 if not vf_stats is None: 2193 v_frac = float(vf_stats.group('v_frac')) 2194 v_average = float(vf_stats.group('v_average')) 2195 try: 2196 if v_average < stats['virt_stats']['v_average_min'][0]: 2197 stats['virt_stats']['v_average_min']=(v_average,channel_name) 2198 if v_average > stats['virt_stats']['v_average_max'][0]: 2199 stats['virt_stats']['v_average_max']=(v_average,channel_name) 2200 stats['virt_stats']['v_average_avg'][0] += v_average 2201 stats['virt_stats']['v_average_avg'][1] += 1 2202 except KeyError: 2203 stats['virt_stats']['v_average_min']=[v_average,channel_name] 2204 stats['virt_stats']['v_average_max']=[v_average,channel_name] 2205 stats['virt_stats']['v_average_avg']=[v_average,1] 2206 2207 try: 2208 debug_msg += '\n\n Statistics on virtual integration optimization : ' 2209 2210 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 2211 %tuple(stats['virt_stats']['v_frac_max']) 2212 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 2213 %tuple(stats['virt_stats']['v_frac_min']) 2214 debug_msg += '\n Average virt fraction computed %.3f'\ 2215 %float(stats['virt_stats']['v_frac_avg'][0]/float(stats['virt_stats']['v_frac_avg'][1])) 2216 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 2217 (len(excluded_channels),len(all_channels)) 2218 debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 2219 %tuple(stats['virt_stats']['v_average_max']) 2220 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 2221 %tuple(stats['virt_stats']['v_ratio_max']) 2222 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 2223 %tuple(stats['virt_stats']['v_ratio_err_max']) 2224 debug_msg += tmpStr 2225 # After all it was decided that it is better not to alarm the user unecessarily 2226 # with such printout of the statistics. 2227 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 2228 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 2229 # message += "\n Suspiciously large MC error in :" 2230 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 2231 # message += tmpStr 2232 2233 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 2234 %tuple(stats['virt_stats']['v_contr_err_max']) 2235 debug_msg += tmpStr 2236 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 2237 # message += tmpStr 2238 2239 2240 except KeyError: 2241 debug_msg += '\n Could not find statistics on the integration optimization. ' 2242 2243 # ======================================= 2244 # == aMC@NLO timing profile statistics == 2245 # ======================================= 2246 2247 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 2248 "(?P<time>[\d\+-Eed\.]*)\s*") 2249 2250 for logf in log_GV_files: 2251 logfile=open(logf,'r') 2252 log = logfile.read() 2253 logfile.close() 2254 channel_name = '/'.join(logf.split('/')[-3:-1]) 2255 mint = re.search(mint_search,logf) 2256 if not mint is None: 2257 channel_name = channel_name+' [step %s]'%mint.group('ID') 2258 2259 for time_stats in re.finditer(timing_stat_finder, log): 2260 try: 2261 stats['timings'][time_stats.group('name')][channel_name]+=\ 2262 float(time_stats.group('time')) 2263 except KeyError: 2264 if time_stats.group('name') not in stats['timings'].keys(): 2265 stats['timings'][time_stats.group('name')] = {} 2266 stats['timings'][time_stats.group('name')][channel_name]=\ 2267 float(time_stats.group('time')) 2268 2269 # useful inline function 2270 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 2271 try: 2272 totTimeList = [(time, chan) for chan, time in \ 2273 stats['timings']['Total'].items()] 2274 except KeyError: 2275 totTimeList = [] 2276 2277 totTimeList.sort() 2278 if len(totTimeList)>0: 2279 debug_msg += '\n\n Inclusive timing profile :' 2280 debug_msg += '\n Overall slowest channel %s (%s)'%\ 2281 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 2282 debug_msg += '\n Average channel running time %s'%\ 2283 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 2284 debug_msg += '\n Aggregated total running time %s'%\ 2285 Tstr(sum([el[0] for el in totTimeList])) 2286 else: 2287 debug_msg += '\n\n Inclusive timing profile non available.' 2288 2289 sorted_keys = sorted(stats['timings'].keys(), key= lambda stat: \ 2290 sum(stats['timings'][stat].values()), reverse=True) 2291 for name in sorted_keys: 2292 if name=='Total': 2293 continue 2294 if sum(stats['timings'][name].values())<=0.0: 2295 debug_msg += '\n Zero time record for %s.'%name 2296 continue 2297 try: 2298 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 2299 chan) for chan, time in stats['timings'][name].items()] 2300 except KeyError, ZeroDivisionError: 2301 debug_msg += '\n\n Timing profile for %s unavailable.'%name 2302 continue 2303 TimeList.sort() 2304 debug_msg += '\n Timing profile for <%s> :'%name 2305 try: 2306 debug_msg += '\n Overall fraction of time %.3f %%'%\ 2307 float((100.0*(sum(stats['timings'][name].values())/ 2308 sum(stats['timings']['Total'].values())))) 2309 except KeyError, ZeroDivisionError: 2310 debug_msg += '\n Overall fraction of time unavailable.' 2311 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 2312 (TimeList[-1][0],TimeList[-1][1]) 2313 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 2314 (TimeList[0][0],TimeList[0][1]) 2315 2316 # ============================= 2317 # == log file eror detection == 2318 # ============================= 2319 2320 # Find the number of potential errors found in all log files 2321 # This re is a simple match on a case-insensitve 'error' but there is 2322 # also some veto added for excluding the sentence 2323 # "See Section 6 of paper for error calculation." 2324 # which appear in the header of lhapdf in the logs. 2325 err_finder = re.compile(\ 2326 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 2327 for log in all_log_files: 2328 logfile=open(log,'r') 2329 nErrors = len(re.findall(err_finder, logfile.read())) 2330 logfile.close() 2331 if nErrors != 0: 2332 stats['Errors'].append((str(log),nErrors)) 2333 2334 nErrors = sum([err[1] for err in stats['Errors']],0) 2335 if nErrors != 0: 2336 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 2337 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 2338 'found in the following log file%s:'%('s' if \ 2339 len(stats['Errors'])>1 else '') 2340 for error in stats['Errors'][:3]: 2341 log_name = '/'.join(error[0].split('/')[-5:]) 2342 debug_msg += '\n > %d error%s in %s'%\ 2343 (error[1],'s' if error[1]>1 else '',log_name) 2344 if len(stats['Errors'])>3: 2345 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 2346 nRemainingLogs = len(stats['Errors'])-3 2347 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 2348 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 2349 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 2350 2351 return message, debug_msg
2352 2353
2354 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
2355 """this function calls the reweighting routines and creates the event file in the 2356 Event dir. Return the name of the event file created 2357 """ 2358 scale_pdf_info={} 2359 if (self.run_card['reweight_scale'] or self.run_card['reweight_PDF']): 2360 scale_pdf_info = self.run_reweight(options['reweightonly']) 2361 2362 self.update_status('Collecting events', level='parton', update_results=True) 2363 misc.compile(['collect_events'], 2364 cwd=pjoin(self.me_dir, 'SubProcesses')) 2365 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 2366 stdin=subprocess.PIPE, 2367 stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w')) 2368 if event_norm.lower() == 'sum': 2369 p.communicate(input = '1\n') 2370 elif event_norm.lower() == 'unity': 2371 p.communicate(input = '3\n') 2372 else: 2373 p.communicate(input = '2\n') 2374 2375 #get filename from collect events 2376 filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1] 2377 2378 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 2379 raise aMCatNLOError('An error occurred during event generation. ' + \ 2380 'The event file has not been created. Check collect_events.log') 2381 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2382 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 2383 if not options['reweightonly']: 2384 self.print_summary(options, 2, mode, scale_pdf_info) 2385 logger.info('The %s file has been generated.\n' % (evt_file)) 2386 self.results.add_detail('nb_event', nevents) 2387 self.update_status('Events generated', level='parton', update_results=True) 2388 return evt_file[:-3]
2389 2390
2391 - def run_mcatnlo(self, evt_file):
2392 """runs mcatnlo on the generated event file, to produce showered-events 2393 """ 2394 logger.info('Preparing MCatNLO run') 2395 try: 2396 misc.gunzip(evt_file) 2397 except Exception: 2398 pass 2399 2400 self.banner = banner_mod.Banner(evt_file) 2401 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 2402 2403 #check that the number of split event files divides the number of 2404 # events, otherwise set it to 1 2405 if int(int(self.banner.get_detail('run_card', 'nevents')) / \ 2406 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 2407 != int(self.banner.get_detail('run_card', 'nevents')): 2408 logger.warning(\ 2409 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 2410 'Setting it to 1.') 2411 self.shower_card['nsplit_jobs'] = 1 2412 2413 # don't split jobs if the user asks to shower only a part of the events 2414 if self.shower_card['nevents'] > 0 and \ 2415 self.shower_card['nevents'] < int(self.banner.get_detail('run_card', 'nevents')) and \ 2416 self.shower_card['nsplit_jobs'] != 1: 2417 logger.warning(\ 2418 'Only a part of the events will be showered.\n' + \ 2419 'Setting nsplit_jobs in the shower_card to 1.') 2420 self.shower_card['nsplit_jobs'] = 1 2421 2422 self.banner_to_mcatnlo(evt_file) 2423 2424 # if fastjet has to be linked (in extralibs) then 2425 # add lib /include dirs for fastjet if fastjet-config is present on the 2426 # system, otherwise add fjcore to the files to combine 2427 if 'fastjet' in self.shower_card['extralibs']: 2428 #first, check that stdc++ is also linked 2429 if not 'stdc++' in self.shower_card['extralibs']: 2430 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 2431 self.shower_card['extralibs'] += ' stdc++' 2432 # then check if options[fastjet] corresponds to a valid fj installation 2433 try: 2434 #this is for a complete fj installation 2435 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 2436 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 2437 output, error = p.communicate() 2438 #remove the line break from output (last character) 2439 output = output[:-1] 2440 # add lib/include paths 2441 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 2442 logger.warning('Linking FastJet: updating EXTRAPATHS') 2443 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 2444 if not pjoin(output, 'include') in self.shower_card['includepaths']: 2445 logger.warning('Linking FastJet: updating INCLUDEPATHS') 2446 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 2447 # to be changed in the fortran wrapper 2448 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 2449 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 2450 except Exception: 2451 logger.warning('Linking FastJet: using fjcore') 2452 # this is for FJcore, so no FJ library has to be linked 2453 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 2454 if not 'fjcore.o' in self.shower_card['analyse']: 2455 self.shower_card['analyse'] += ' fjcore.o' 2456 # to be changed in the fortran wrapper 2457 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 2458 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 2459 # change the fortran wrapper with the correct namespaces/include 2460 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 2461 for line in fjwrapper_lines: 2462 if '//INCLUDE_FJ' in line: 2463 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 2464 if '//NAMESPACE_FJ' in line: 2465 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 2466 open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w').write(\ 2467 '\n'.join(fjwrapper_lines) + '\n') 2468 2469 extrapaths = self.shower_card['extrapaths'].split() 2470 2471 # check that the path needed by HW++ and PY8 are set if one uses these shower 2472 if shower in ['HERWIGPP', 'PYTHIA8']: 2473 path_dict = {'HERWIGPP': ['hepmc_path', 2474 'thepeg_path', 2475 'hwpp_path'], 2476 'PYTHIA8': ['pythia8_path']} 2477 2478 if not all([self.options[ppath] for ppath in path_dict[shower]]): 2479 raise aMCatNLOError('Some paths are missing in the configuration file.\n' + \ 2480 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 2481 2482 if shower == 'HERWIGPP': 2483 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 2484 2485 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 2486 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 2487 2488 if 'LD_LIBRARY_PATH' in os.environ.keys(): 2489 ldlibrarypath = os.environ['LD_LIBRARY_PATH'] 2490 else: 2491 ldlibrarypath = '' 2492 ldlibrarypath += ':' + ':'.join(extrapaths) 2493 os.putenv('LD_LIBRARY_PATH', ldlibrarypath) 2494 2495 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 2496 self.shower_card.write_card(shower, shower_card_path) 2497 2498 # overwrite if shower_card_set.dat exists in MCatNLO 2499 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 2500 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 2501 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 2502 2503 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 2504 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 2505 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 2506 stderr=open(mcatnlo_log, 'w'), 2507 cwd=pjoin(self.me_dir, 'MCatNLO')) 2508 2509 exe = 'MCATNLO_%s_EXE' % shower 2510 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 2511 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 2512 print open(mcatnlo_log).read() 2513 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 2514 logger.info(' ... done') 2515 2516 # create an empty dir where to run 2517 count = 1 2518 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 2519 (shower, count))): 2520 count += 1 2521 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 2522 (shower, count)) 2523 os.mkdir(rundir) 2524 files.cp(shower_card_path, rundir) 2525 2526 #look for the event files (don't resplit if one asks for the 2527 # same number of event files as in the previous run) 2528 event_files = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 2529 'events_*.lhe')) 2530 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 2531 logger.info('Cleaning old files and splitting the event file...') 2532 #clean the old files 2533 files.rm([f for f in event_files if 'events.lhe' not in f]) 2534 if self.shower_card['nsplit_jobs'] > 1: 2535 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities')) 2536 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 2537 stdin=subprocess.PIPE, 2538 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 2539 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2540 p.communicate(input = 'events.lhe\n%d\n' % self.shower_card['nsplit_jobs']) 2541 logger.info('Splitting done.') 2542 event_files = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 2543 'events_*.lhe')) 2544 2545 event_files.sort() 2546 2547 self.update_status('Showering events...', level='shower') 2548 logger.info('(Running in %s)' % rundir) 2549 if shower != 'PYTHIA8': 2550 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 2551 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 2552 else: 2553 # special treatment for pythia8 2554 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 2555 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 2556 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 2557 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 2558 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 2559 else: 2560 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 2561 #link the hwpp exe in the rundir 2562 if shower == 'HERWIGPP': 2563 try: 2564 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 2565 except Exception: 2566 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 2567 2568 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 2569 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 2570 2571 files.ln(evt_file, rundir, 'events.lhe') 2572 for i, f in enumerate(event_files): 2573 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 2574 2575 if not self.shower_card['analyse']: 2576 # an hep/hepmc file as output 2577 out_id = 'HEP' 2578 else: 2579 # one or more .top file(s) as output 2580 if "HwU" in self.shower_card['analyse']: 2581 out_id = 'HWU' 2582 else: 2583 out_id = 'TOP' 2584 2585 # write the executable 2586 open(pjoin(rundir, 'shower.sh'), 'w').write(\ 2587 open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 2588 % {'extralibs': ':'.join(extrapaths)}) 2589 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 2590 2591 if event_files: 2592 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 2593 for i in range(len(event_files))] 2594 else: 2595 arg_list = [[shower, out_id, self.run_name]] 2596 2597 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 2598 self.njobs = 1 2599 self.wait_for_complete('shower') 2600 2601 # now collect the results 2602 message = '' 2603 warning = '' 2604 to_gzip = [evt_file] 2605 if out_id == 'HEP': 2606 #copy the showered stdhep/hepmc file back in events 2607 if shower in ['PYTHIA8', 'HERWIGPP']: 2608 hep_format = 'HEPMC' 2609 ext = 'hepmc' 2610 else: 2611 hep_format = 'StdHEP' 2612 ext = 'hep' 2613 2614 hep_file = '%s_%s_0.%s.gz' % \ 2615 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 2616 count = 0 2617 2618 # find the first available name for the output: 2619 # check existing results with or without event splitting 2620 while os.path.exists(hep_file) or \ 2621 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 2622 count +=1 2623 hep_file = '%s_%s_%d.%s.gz' % \ 2624 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 2625 2626 try: 2627 if self.shower_card['nsplit_jobs'] == 1: 2628 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 2629 message = ('The file %s has been generated. \nIt contains showered' + \ 2630 ' and hadronized events in the %s format obtained' + \ 2631 ' showering the parton-level event file %s.gz with %s') % \ 2632 (hep_file, hep_format, evt_file, shower) 2633 else: 2634 hep_list = [] 2635 for i in range(self.shower_card['nsplit_jobs']): 2636 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 2637 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 2638 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 2639 ' and hadronized events in the %s format obtained' + \ 2640 ' showering the (split) parton-level event file %s.gz with %s') % \ 2641 ('\n '.join(hep_list), hep_format, evt_file, shower) 2642 2643 except OSError, IOError: 2644 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 2645 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 2646 2647 # run the plot creation in a secure way 2648 if hep_format == 'StdHEP': 2649 try: 2650 self.do_plot('%s -f' % self.run_name) 2651 except Exception, error: 2652 logger.info("Fail to make the plot. Continue...") 2653 pass 2654 2655 elif out_id == 'TOP' or out_id == 'HWU': 2656 #copy the topdrawer or HwU file(s) back in events 2657 if out_id=='TOP': 2658 ext='top' 2659 elif out_id=='HWU': 2660 ext='HwU' 2661 topfiles = [] 2662 top_tars = [tarfile.TarFile(f) for f in glob.glob(pjoin(rundir, 'histfile*.tar'))] 2663 for top_tar in top_tars: 2664 topfiles.extend(top_tar.getnames()) 2665 2666 # safety check 2667 if len(top_tars) != self.shower_card['nsplit_jobs']: 2668 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 2669 (self.shower_card['nsplit_jobs'], len(top_tars))) 2670 2671 # find the first available name for the output: 2672 # check existing results with or without event splitting 2673 filename = 'plot_%s_%d_' % (shower, 1) 2674 count = 1 2675 while os.path.exists(pjoin(self.me_dir, 'Events', 2676 self.run_name, '%s0.%s' % (filename,ext))) or \ 2677 os.path.exists(pjoin(self.me_dir, 'Events', 2678 self.run_name, '%s0__1.%s' % (filename,ext))): 2679 count += 1 2680 filename = 'plot_%s_%d_' % (shower, count) 2681 2682 if out_id=='TOP': 2683 hist_format='TopDrawer format' 2684 elif out_id=='HWU': 2685 hist_format='HwU and GnuPlot formats' 2686 2687 if not topfiles: 2688 # if no topfiles are found just warn the user 2689 warning = 'No .top file has been generated. For the results of your ' +\ 2690 'run, please check inside %s' % rundir 2691 elif self.shower_card['nsplit_jobs'] == 1: 2692 # only one job for the shower 2693 top_tars[0].extractall(path = rundir) 2694 plotfiles = [] 2695 for i, file in enumerate(topfiles): 2696 if out_id=='TOP': 2697 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 2698 '%s%d.top' % (filename, i)) 2699 files.mv(pjoin(rundir, file), plotfile) 2700 elif out_id=='HWU': 2701 histogram_list=histograms.HwUList(pjoin(rundir,file)) 2702 histogram_list.output(pjoin(self.me_dir,'Events',self.run_name, 2703 '%s%d'% (filename,i)),format = 'gnuplot') 2704 try: 2705 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 2706 stdout=os.open(os.devnull, os.O_RDWR),\ 2707 stderr=os.open(os.devnull, os.O_RDWR),\ 2708 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2709 except Exception: 2710 pass 2711 plotfile=pjoin(self.me_dir,'Events',self.run_name, 2712 '%s%d.HwU'% (filename,i)) 2713 plotfiles.append(plotfile) 2714 2715 ffiles = 'files' 2716 have = 'have' 2717 if len(plotfiles) == 1: 2718 ffiles = 'file' 2719 have = 'has' 2720 2721 message = ('The %s %s %s been generated, with histograms in the' + \ 2722 ' %s, obtained by showering the parton-level' + \ 2723 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 2724 hist_format, evt_file, shower) 2725 else: 2726 # many jobs for the shower have been run 2727 topfiles_set = set(topfiles) 2728 plotfiles = [] 2729 for j, top_tar in enumerate(top_tars): 2730 top_tar.extractall(path = rundir) 2731 for i, file in enumerate(topfiles_set): 2732 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 2733 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 2734 files.mv(pjoin(rundir, file), plotfile) 2735 plotfiles.append(plotfile) 2736 2737 # check if the user asked to combine the .top into a single file 2738 if self.shower_card['combine_td']: 2739 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 2740 2741 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 2742 norm = 1. 2743 elif self.banner.get('run_card', 'event_norm').lower() == 'average': 2744 norm = 1./float(self.shower_card['nsplit_jobs']) 2745 2746 plotfiles2 = [] 2747 for i, file in enumerate(topfiles_set): 2748 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 2749 for j in range(self.shower_card['nsplit_jobs'])] 2750 if out_id=='TOP': 2751 infile="%d\n%s\n%s\n" % \ 2752 (self.shower_card['nsplit_jobs'], 2753 '\n'.join(filelist), 2754 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 2755 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 2756 stdin=subprocess.PIPE, 2757 stdout=os.open(os.devnull, os.O_RDWR), 2758 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2759 p.communicate(input = infile) 2760 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 2761 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 2762 elif out_id=='HWU': 2763 histogram_list=histograms.HwUList(plotfiles[0]) 2764 for ii, histo in enumerate(histogram_list): 2765 histogram_list[ii] = histo*norm 2766 for histo_path in plotfiles[1:]: 2767 for ii, histo in enumerate(histograms.HwUList(histo_path)): 2768 # First make sure the plots have the same weight labels and such 2769 histo.test_plot_compability(histogram_list[ii]) 2770 # Now let the histogram module do the magic and add them. 2771 histogram_list[ii] += histo*norm 2772 # And now output the finalized list 2773 histogram_list.output(pjoin(self.me_dir,'Events',self.run_name,'%s%d'% (filename, i)), 2774 format = 'gnuplot') 2775 try: 2776 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 2777 stdout=os.open(os.devnull, os.O_RDWR),\ 2778 stderr=os.open(os.devnull, os.O_RDWR),\ 2779 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2780 except Exception: 2781 pass 2782 2783 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 2784 tar = tarfile.open( 2785 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 2786 for f in filelist: 2787 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 2788 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 2789 2790 tar.close() 2791 2792 ffiles = 'files' 2793 have = 'have' 2794 if len(plotfiles2) == 1: 2795 ffiles = 'file' 2796 have = 'has' 2797 2798 message = ('The %s %s %s been generated, with histograms in the' + \ 2799 ' %s, obtained by showering the parton-level' + \ 2800 ' file %s.gz with %s.\n' + \ 2801 'The files from the different shower ' + \ 2802 'jobs (before combining them) can be found inside %s.') % \ 2803 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 2804 evt_file, shower, 2805 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 2806 2807 else: 2808 message = ('The following files have been generated:\n %s\n' + \ 2809 'They contain histograms in the' + \ 2810 ' %s, obtained by showering the parton-level' + \ 2811 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 2812 hist_format, evt_file, shower) 2813 2814 # Now arxiv the shower card used if RunMaterial is present 2815 run_dir_path = pjoin(rundir, self.run_name) 2816 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 2817 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 2818 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 2819 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 2820 %(shower, count))) 2821 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 2822 cwd=run_dir_path) 2823 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 2824 # end of the run, gzip files and print out the message/warning 2825 for f in to_gzip: 2826 misc.gzip(f) 2827 if message: 2828 logger.info(message) 2829 if warning: 2830 logger.warning(warning) 2831 2832 self.update_status('Run complete', level='shower', update_results=True)
2833 2834 2835 ############################################################################
2836 - def set_run_name(self, name, tag=None, level='parton', reload_card=False):
2837 """define the run name, the run_tag, the banner and the results.""" 2838 2839 # when are we force to change the tag new_run:previous run requiring changes 2840 upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','shower'], 2841 'pythia': ['pythia','pgs','delphes'], 2842 'shower': ['shower'], 2843 'pgs': ['pgs'], 2844 'delphes':['delphes'], 2845 'plot':[]} 2846 2847 2848 2849 if name == self.run_name: 2850 if reload_card: 2851 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 2852 self.run_card = banner_mod.RunCardNLO(run_card) 2853 2854 #check if we need to change the tag 2855 if tag: 2856 self.run_card['run_tag'] = tag 2857 self.run_tag = tag 2858 self.results.add_run(self.run_name, self.run_card) 2859 else: 2860 for tag in upgrade_tag[level]: 2861 if getattr(self.results[self.run_name][-1], tag): 2862 tag = self.get_available_tag() 2863 self.run_card['run_tag'] = tag 2864 self.run_tag = tag 2865 self.results.add_run(self.run_name, self.run_card) 2866 break 2867 return # Nothing to do anymore 2868 2869 # save/clean previous run 2870 if self.run_name: 2871 self.store_result() 2872 # store new name 2873 self.run_name = name 2874 2875 # Read run_card 2876 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 2877 self.run_card = banner_mod.RunCardNLO(run_card) 2878 2879 new_tag = False 2880 # First call for this run -> set the banner 2881 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 2882 if tag: 2883 self.run_card['run_tag'] = tag 2884 new_tag = True 2885 elif not self.run_name in self.results and level =='parton': 2886 pass # No results yet, so current tag is fine 2887 elif not self.run_name in self.results: 2888 #This is only for case when you want to trick the interface 2889 logger.warning('Trying to run data on unknown run.') 2890 self.results.add_run(name, self.run_card) 2891 self.results.update('add run %s' % name, 'all', makehtml=True) 2892 else: 2893 for tag in upgrade_tag[level]: 2894 2895 if getattr(self.results[self.run_name][-1], tag): 2896 # LEVEL is already define in the last tag -> need to switch tag 2897 tag = self.get_available_tag() 2898 self.run_card['run_tag'] = tag 2899 new_tag = True 2900 break 2901 if not new_tag: 2902 # We can add the results to the current run 2903 tag = self.results[self.run_name][-1]['tag'] 2904 self.run_card['run_tag'] = tag # ensure that run_tag is correct 2905 2906 2907 if name in self.results and not new_tag: 2908 self.results.def_current(self.run_name) 2909 else: 2910 self.results.add_run(self.run_name, self.run_card) 2911 2912 self.run_tag = self.run_card['run_tag'] 2913 2914 # Return the tag of the previous run having the required data for this 2915 # tag/run to working wel. 2916 if level == 'parton': 2917 return 2918 elif level == 'pythia': 2919 return self.results[self.run_name][0]['tag'] 2920 else: 2921 for i in range(-1,-len(self.results[self.run_name])-1,-1): 2922 tagRun = self.results[self.run_name][i] 2923 if tagRun.pythia: 2924 return tagRun['tag']
2925 2926
2927 - def store_result(self):
2928 """ tar the pythia results. This is done when we are quite sure that 2929 the pythia output will not be use anymore """ 2930 2931 if not self.run_name: 2932 return 2933 2934 self.results.save() 2935 2936 if not self.to_store: 2937 return 2938 2939 tag = self.run_card['run_tag'] 2940 2941 self.to_store = []
2942 2943
2944 - def get_init_dict(self, evt_file):
2945 """reads the info in the init block and returns them in a dictionary""" 2946 ev_file = open(evt_file) 2947 init = "" 2948 found = False 2949 while True: 2950 line = ev_file.readline() 2951 if "<init>" in line: 2952 found = True 2953 elif found and not line.startswith('#'): 2954 init += line 2955 if "</init>" in line or "<event>" in line: 2956 break 2957 ev_file.close() 2958 2959 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 2960 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 2961 # these are not included (so far) in the init_dict 2962 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 2963 2964 init_dict = {} 2965 init_dict['idbmup1'] = int(init.split()[0]) 2966 init_dict['idbmup2'] = int(init.split()[1]) 2967 init_dict['ebmup1'] = float(init.split()[2]) 2968 init_dict['ebmup2'] = float(init.split()[3]) 2969 init_dict['pdfgup1'] = int(init.split()[4]) 2970 init_dict['pdfgup2'] = int(init.split()[5]) 2971 init_dict['pdfsup1'] = int(init.split()[6]) 2972 init_dict['pdfsup2'] = int(init.split()[7]) 2973 init_dict['idwtup'] = int(init.split()[8]) 2974 init_dict['nprup'] = int(init.split()[9]) 2975 2976 return init_dict
2977 2978
2979 - def banner_to_mcatnlo(self, evt_file):
2980 """creates the mcatnlo input script using the values set in the header of the event_file. 2981 It also checks if the lhapdf library is used""" 2982 shower = self.banner.get('run_card', 'parton_shower').upper() 2983 pdlabel = self.banner.get('run_card', 'pdlabel') 2984 itry = 0 2985 nevents = self.shower_card['nevents'] 2986 init_dict = self.get_init_dict(evt_file) 2987 2988 if nevents < 0 or \ 2989 nevents > int(self.banner.get_detail('run_card', 'nevents')): 2990 nevents = int(self.banner.get_detail('run_card', 'nevents')) 2991 2992 nevents = nevents / self.shower_card['nsplit_jobs'] 2993 2994 mcmass_dict = {} 2995 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 2996 pdg = int(line.split()[0]) 2997 mass = float(line.split()[1]) 2998 mcmass_dict[pdg] = mass 2999 3000 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 3001 content += 'NEVENTS=%d\n' % nevents 3002 content += 'NEVENTS_TOT=%d\n' % (int(self.banner.get_detail('run_card', 'nevents')) /\ 3003 self.shower_card['nsplit_jobs']) 3004 content += 'MCMODE=%s\n' % shower 3005 content += 'PDLABEL=%s\n' % pdlabel 3006 content += 'ALPHAEW=%s\n' % self.banner.get_detail('param_card', 'sminputs', 1).value 3007 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 3008 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 3009 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 3010 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 3011 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 3012 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 3013 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 3014 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 3015 try: 3016 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 3017 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 3018 except KeyError: 3019 content += 'HGGMASS=120.\n' 3020 content += 'HGGWIDTH=0.00575308848\n' 3021 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 3022 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 3023 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 3024 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 3025 content += 'DMASS=%s\n' % mcmass_dict[1] 3026 content += 'UMASS=%s\n' % mcmass_dict[2] 3027 content += 'SMASS=%s\n' % mcmass_dict[3] 3028 content += 'CMASS=%s\n' % mcmass_dict[4] 3029 content += 'BMASS=%s\n' % mcmass_dict[5] 3030 try: 3031 content += 'EMASS=%s\n' % mcmass_dict[11] 3032 content += 'MUMASS=%s\n' % mcmass_dict[13] 3033 content += 'TAUMASS=%s\n' % mcmass_dict[15] 3034 except KeyError: 3035 # this is for backward compatibility 3036 mcmass_lines = [l for l in \ 3037 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 3038 ).read().split('\n') if l] 3039 new_mcmass_dict = {} 3040 for l in mcmass_lines: 3041 key, val = l.split('=') 3042 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 3043 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 3044 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 3045 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 3046 3047 content += 'GMASS=%s\n' % mcmass_dict[21] 3048 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 3049 # check if need to link lhapdf 3050 if int(self.shower_card['pdfcode']) > 1 or \ 3051 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1): 3052 # Use LHAPDF (should be correctly installed, because 3053 # either events were already generated with them, or the 3054 # user explicitly gives an LHAPDF number in the 3055 # shower_card). 3056 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 3057 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 3058 stdout = subprocess.PIPE).stdout.read().strip() 3059 content += 'LHAPDFPATH=%s\n' % lhapdfpath 3060 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 3061 if self.shower_card['pdfcode']==1: 3062 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 3063 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 3064 else: 3065 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 3066 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 3067 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 3068 elif int(self.shower_card['pdfcode'])==1: 3069 # Try to use LHAPDF because user wants to use the same PDF 3070 # as was used for the event generation. However, for the 3071 # event generation, LHAPDF was not used, so non-trivial to 3072 # see if if LHAPDF is available with the corresponding PDF 3073 # set. If not found, give a warning and use build-in PDF 3074 # set instead. 3075 try: 3076 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 3077 stdout = subprocess.PIPE).stdout.read().strip() 3078 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 3079 content += 'LHAPDFPATH=%s\n' % lhapdfpath 3080 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 3081 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 3082 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 3083 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 3084 except Exception: 3085 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 3086 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 3087 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 3088 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 3089 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 3090 content += 'LHAPDFPATH=\n' 3091 content += 'PDFCODE=0\n' 3092 else: 3093 content += 'LHAPDFPATH=\n' 3094 content += 'PDFCODE=0\n' 3095 3096 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 3097 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 3098 # add the pythia8/hwpp path(s) 3099 if self.options['pythia8_path']: 3100 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 3101 if self.options['hwpp_path']: 3102 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 3103 if self.options['thepeg_path']: 3104 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 3105 if self.options['hepmc_path']: 3106 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 3107 3108 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 3109 output.write(content) 3110 output.close() 3111 return shower
3112 3113
3114 - def run_reweight(self, only):
3115 """runs the reweight_xsec_events eecutables on each sub-event file generated 3116 to compute on the fly scale and/or PDF uncertainities""" 3117 logger.info(' Doing reweight') 3118 3119 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 3120 # if only doing reweight, copy back the nevents_unweighted file 3121 if only: 3122 if os.path.exists(nev_unw + '.orig'): 3123 files.cp(nev_unw + '.orig', nev_unw) 3124 else: 3125 raise aMCatNLOError('Cannot find event file information') 3126 3127 #read the nevents_unweighted file to get the list of event files 3128 file = open(nev_unw) 3129 lines = file.read().split('\n') 3130 file.close() 3131 # make copy of the original nevent_unweighted file 3132 files.cp(nev_unw, nev_unw + '.orig') 3133 # loop over lines (all but the last one whith is empty) and check that the 3134 # number of events is not 0 3135 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 3136 #prepare the job_dict 3137 job_dict = {} 3138 exe = 'reweight_xsec_events.local' 3139 for i, evt_file in enumerate(evt_files): 3140 path, evt = os.path.split(evt_file) 3141 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 3142 pjoin(self.me_dir, 'SubProcesses', path)) 3143 job_dict[path] = [exe] 3144 3145 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 3146 3147 #check that the new event files are complete 3148 for evt_file in evt_files: 3149 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 3150 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 3151 stdout = subprocess.PIPE).stdout.read().strip() 3152 if last_line != "</LesHouchesEvents>": 3153 raise aMCatNLOError('An error occurred during reweight. Check the' + \ 3154 '\'reweight_xsec_events.output\' files inside the ' + \ 3155 '\'SubProcesses/P*/G*/ directories for details') 3156 3157 #update file name in nevents_unweighted 3158 newfile = open(nev_unw, 'w') 3159 for line in lines: 3160 if line: 3161 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 3162 newfile.close() 3163 3164 return self.pdf_scale_from_reweighting(evt_files)
3165
3166 - def pdf_scale_from_reweighting(self, evt_files):
3167 """This function takes the files with the scale and pdf values 3168 written by the reweight_xsec_events.f code 3169 (P*/G*/pdf_scale_dependence.dat) and computes the overall 3170 scale and PDF uncertainty (the latter is computed using the 3171 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 3172 and returns it in percents. The expected format of the file 3173 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 3174 xsec_pdf0 xsec_pdf1 ....""" 3175 scale_pdf_info={} 3176 scales=[] 3177 pdfs=[] 3178 numofpdf = 0 3179 numofscales = 0 3180 for evt_file in evt_files: 3181 path, evt=os.path.split(evt_file) 3182 data_file=open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat')).read() 3183 lines = data_file.replace("D", "E").split("\n") 3184 if not numofscales: 3185 numofscales = int(lines[0]) 3186 if not numofpdf: 3187 numofpdf = int(lines[2]) 3188 scales_this = [float(val) for val in lines[1].split()] 3189 pdfs_this = [float(val) for val in lines[3].split()] 3190 3191 if numofscales != len(scales_this) or numofpdf !=len(pdfs_this): 3192 # the +1 takes the 0th (central) set into account 3193 logger.info(data_file) 3194 logger.info((' Expected # of scales: %d\n'+ 3195 ' Found # of scales: %d\n'+ 3196 ' Expected # of pdfs: %d\n'+ 3197 ' Found # of pdfs: %d\n') % 3198 (numofscales, len(scales_this), numofpdf, len(pdfs_this))) 3199 raise aMCatNLOError('inconsistent scale_pdf_dependence.dat') 3200 if not scales: 3201 scales = [0.] * numofscales 3202 if not pdfs: 3203 pdfs = [0.] * numofpdf 3204 3205 scales = [a + b for a, b in zip(scales, scales_this)] 3206 pdfs = [a + b for a, b in zip(pdfs, pdfs_this)] 3207 3208 # get the central value 3209 if numofscales>0 and numofpdf==0: 3210 cntrl_val=scales[0] 3211 elif numofpdf>0 and numofscales==0: 3212 cntrl_val=pdfs[0] 3213 elif numofpdf>0 and numofscales>0: 3214 if abs(1-scales[0]/pdfs[0])>0.0001: 3215 raise aMCatNLOError('Central values for scale and PDF variation not identical') 3216 else: 3217 cntrl_val=scales[0] 3218 3219 # get the scale uncertainty in percent 3220 if numofscales>0: 3221 if cntrl_val != 0.0: 3222 # max and min of the full envelope 3223 scale_pdf_info['scale_upp'] = (max(scales)/cntrl_val-1)*100 3224 scale_pdf_info['scale_low'] = (1-min(scales)/cntrl_val)*100 3225 # ren and fac scale dependence added in quadrature 3226 scale_pdf_info['scale_upp_quad'] = ((cntrl_val+math.sqrt(math.pow(max(scales[0]-cntrl_val,scales[1]-cntrl_val,scales[2]-cntrl_val),2)+math.pow(max(scales[0]-cntrl_val,scales[3]-cntrl_val,scales[6]-cntrl_val),2)))/cntrl_val-1)*100 3227 scale_pdf_info['scale_low_quad'] = (1-(cntrl_val-math.sqrt(math.pow(min(scales[0]-cntrl_val,scales[1]-cntrl_val,scales[2]-cntrl_val),2)+math.pow(min(scales[0]-cntrl_val,scales[3]-cntrl_val,scales[6]-cntrl_val),2)))/cntrl_val)*100 3228 else: 3229 scale_pdf_info['scale_upp'] = 0.0 3230 scale_pdf_info['scale_low'] = 0.0 3231 3232 # get the pdf uncertainty in percent (according to the Hessian method) 3233 lhaid=int(self.run_card['lhaid']) 3234 pdf_upp=0.0 3235 pdf_low=0.0 3236 if lhaid <= 90000: 3237 # use Hessian method (CTEQ & MSTW) 3238 if numofpdf>1: 3239 for i in range(int(numofpdf/2)): 3240 pdf_upp=pdf_upp+math.pow(max(0.0,pdfs[2*i+1]-cntrl_val,pdfs[2*i+2]-cntrl_val),2) 3241 pdf_low=pdf_low+math.pow(max(0.0,cntrl_val-pdfs[2*i+1],cntrl_val-pdfs[2*i+2]),2) 3242 if cntrl_val != 0.0: 3243 scale_pdf_info['pdf_upp'] = math.sqrt(pdf_upp)/cntrl_val*100 3244 scale_pdf_info['pdf_low'] = math.sqrt(pdf_low)/cntrl_val*100 3245 else: 3246 scale_pdf_info['pdf_upp'] = 0.0 3247 scale_pdf_info['pdf_low'] = 0.0 3248 3249 else: 3250 # use Gaussian method (NNPDF) 3251 pdf_stdev=0.0 3252 for i in range(int(numofpdf-1)): 3253 pdf_stdev = pdf_stdev + pow(pdfs[i+1] - cntrl_val,2) 3254 pdf_stdev = math.sqrt(pdf_stdev/int(numofpdf-2)) 3255 if cntrl_val != 0.0: 3256 scale_pdf_info['pdf_upp'] = pdf_stdev/cntrl_val*100 3257 else: 3258 scale_pdf_info['pdf_upp'] = 0.0 3259 scale_pdf_info['pdf_low'] = scale_pdf_info['pdf_upp'] 3260 return scale_pdf_info
3261 3262
3263 - def wait_for_complete(self, run_type):
3264 """this function waits for jobs on cluster to complete their run.""" 3265 3266 starttime = time.time() 3267 #logger.info(' Waiting for submitted jobs to complete') 3268 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 3269 starttime=starttime, level='parton', update_results=True) 3270 try: 3271 self.cluster.wait(self.me_dir, update_status) 3272 except: 3273 self.cluster.remove() 3274 raise
3275
3276 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
3277 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 3278 njob_split = 0 3279 self.ijob = 0 3280 3281 # this is to keep track, if splitting evt generation, of the various 3282 # folders/args in order to resubmit the jobs if some of them fail 3283 self.split_folders = {} 3284 3285 if run_type != 'shower': 3286 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 3287 for args in arg_list: 3288 for Pdir, jobs in job_dict.items(): 3289 for job in jobs: 3290 if not split_jobs: 3291 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 3292 else: 3293 for n in self.find_jobs_to_split(Pdir, job, args[1]): 3294 self.run_exe(job, args + [n], run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 3295 njob_split += 1 3296 # print some statistics if running serially 3297 if self.cluster_mode == 2: 3298 time.sleep(1) # security to allow all jobs to be launched 3299 if njob_split > 0: 3300 self.njobs = njob_split 3301 else: 3302 self.njobs = len(arg_list) 3303 for args in arg_list: 3304 [(cwd, exe)] = job_dict.items() 3305 self.run_exe(exe, args, run_type, cwd) 3306 3307 self.wait_for_complete(run_type)
3308 3309 3310
3311 - def check_event_files(self):
3312 """check the integrity of the event files after splitting, and resubmit 3313 those which are not nicely terminated""" 3314 to_resubmit = [] 3315 for dir in self.split_folders.keys(): 3316 last_line = '' 3317 try: 3318 last_line = subprocess.Popen( 3319 ['tail', '-n1', pjoin(dir, 'events.lhe')], \ 3320 stdout = subprocess.PIPE).stdout.read().strip() 3321 except IOError: 3322 pass 3323 3324 if last_line != "</LesHouchesEvents>": 3325 to_resubmit.append(dir) 3326 3327 self.njobs = 0 3328 if to_resubmit: 3329 run_type = 'Resubmitting broken jobs' 3330 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 3331 logger.debug('Resubmitting\n' + '\n'.join(to_resubmit) + '\n') 3332 for dir in to_resubmit: 3333 files.rm([dir]) 3334 job = self.split_folders[dir][0] 3335 args = self.split_folders[dir][1:] 3336 run_type = 'monitor' 3337 cwd = os.path.split(dir)[0] 3338 self.run_exe(job, args, run_type, cwd=cwd ) 3339 self.njobs +=1 3340 3341 self.wait_for_complete(run_type)
3342 3343
3344 - def find_jobs_to_split(self, pdir, job, arg):
3345 """looks into the nevents_unweighed_splitted file to check how many 3346 split jobs are needed for this (pdir, job). arg is F, B or V""" 3347 # find the number of the integration channel 3348 splittings = [] 3349 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 3350 pattern = re.compile('for i in (\d+) ; do') 3351 match = re.search(pattern, ajob) 3352 channel = match.groups()[0] 3353 # then open the nevents_unweighted_splitted file and look for the 3354 # number of splittings to be done 3355 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 3356 # This skips the channels with zero events, because they are 3357 # not of the form GFXX_YY, but simply GFXX 3358 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 3359 pjoin(pdir, 'G%s%s' % (arg,channel))) 3360 matches = re.findall(pattern, nevents_file) 3361 for m in matches: 3362 splittings.append(m) 3363 return splittings
3364 3365
3366 - def run_exe(self, exe, args, run_type, cwd=None):
3367 """this basic function launch locally/on cluster exe with args as argument. 3368 """ 3369 3370 # first test that exe exists: 3371 execpath = None 3372 if cwd and os.path.exists(pjoin(cwd, exe)): 3373 execpath = pjoin(cwd, exe) 3374 elif not cwd and os.path.exists(exe): 3375 execpath = exe 3376 else: 3377 raise aMCatNLOError('Cannot find executable %s in %s' \ 3378 % (exe, os.getcwd())) 3379 # check that the executable has exec permissions 3380 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 3381 subprocess.call(['chmod', '+x', exe], cwd=cwd) 3382 # finally run it 3383 if self.cluster_mode == 0: 3384 #this is for the serial run 3385 misc.call(['./'+exe] + args, cwd=cwd) 3386 self.ijob += 1 3387 self.update_status((max([self.njobs - self.ijob - 1, 0]), 3388 min([1, self.njobs - self.ijob]), 3389 self.ijob, run_type), level='parton') 3390 3391 #this is for the cluster/multicore run 3392 elif 'reweight' in exe: 3393 # a reweight run 3394 # Find the correct PDF input file 3395 input_files, output_files = [], [] 3396 pdfinput = self.get_pdf_input_filename() 3397 if os.path.exists(pdfinput): 3398 input_files.append(pdfinput) 3399 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 3400 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 3401 input_files.append(args[0]) 3402 output_files.append('%s.rwgt' % os.path.basename(args[0])) 3403 output_files.append('reweight_xsec_events.output') 3404 output_files.append('scale_pdf_dependence.dat') 3405 3406 return self.cluster.submit2(exe, args, cwd=cwd, 3407 input_files=input_files, output_files=output_files, 3408 required_output=output_files) 3409 3410 elif 'ajob' in exe: 3411 # the 'standard' amcatnlo job 3412 # check if args is a list of string 3413 if type(args[0]) == str: 3414 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd, args) 3415 #submitting 3416 self.cluster.submit2(exe, args, cwd=cwd, 3417 input_files=input_files, output_files=output_files, 3418 required_output=required_output) 3419 3420 # keep track of folders and arguments for splitted evt gen 3421 subfolder=output_files[-1].split('/')[0] 3422 if len(args) == 4 and '_' in subfolder: 3423 self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 3424 3425 elif 'shower' in exe: 3426 # a shower job 3427 # args are [shower, output(HEP or TOP), run_name] 3428 # cwd is the shower rundir, where the executable are found 3429 input_files, output_files = [], [] 3430 shower = args[0] 3431 # the input files 3432 if shower == 'PYTHIA8': 3433 input_files.append(pjoin(cwd, 'Pythia8.exe')) 3434 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 3435 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3436 input_files.append(pjoin(cwd, 'config.sh')) 3437 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 3438 else: 3439 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 3440 else: 3441 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 3442 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 3443 if shower == 'HERWIGPP': 3444 input_files.append(pjoin(cwd, 'Herwig++')) 3445 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 3446 if len(args) == 3: 3447 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 3448 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 3449 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 3450 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 3451 else: 3452 raise aMCatNLOError, 'Event file not present in %s' % \ 3453 pjoin(self.me_dir, 'Events', self.run_name) 3454 else: 3455 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 3456 # the output files 3457 if len(args) == 3: 3458 output_files.append('mcatnlo_run.log') 3459 else: 3460 output_files.append('mcatnlo_run_%s.log' % args[3]) 3461 if args[1] == 'HEP': 3462 if len(args) == 3: 3463 fname = 'events' 3464 else: 3465 fname = 'events_%s' % args[3] 3466 if shower in ['PYTHIA8', 'HERWIGPP']: 3467 output_files.append(fname + '.hepmc.gz') 3468 else: 3469 output_files.append(fname + '.hep.gz') 3470 elif args[1] == 'TOP' or args[1] == 'HWU': 3471 if len(args) == 3: 3472 fname = 'histfile' 3473 else: 3474 fname = 'histfile_%s' % args[3] 3475 output_files.append(fname + '.tar') 3476 else: 3477 raise aMCatNLOError, 'Not a valid output argument for shower job : %d' % args[1] 3478 #submitting 3479 self.cluster.submit2(exe, args, cwd=cwd, 3480 input_files=input_files, output_files=output_files) 3481 3482 else: 3483 return self.cluster.submit(exe, args, cwd=cwd)
3484
3485 - def getIO_ajob(self,exe,cwd, args):
3486 # use local disk if possible => need to stands what are the 3487 # input/output files 3488 3489 keep_fourth_arg = False 3490 output_files = [] 3491 required_output = [] 3492 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 3493 pjoin(cwd, 'symfact.dat'), 3494 pjoin(cwd, 'iproc.dat'), 3495 pjoin(cwd, 'initial_states_map.dat'), 3496 pjoin(cwd, 'configs_and_props_info.dat'), 3497 pjoin(cwd, 'leshouche_info.dat'), 3498 pjoin(cwd, 'FKS_params.dat')] 3499 3500 if os.path.exists(pjoin(cwd,'nevents.tar')): 3501 input_files.append(pjoin(cwd,'nevents.tar')) 3502 3503 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 3504 input_files.append(pjoin(cwd, 'OLE_order.olc')) 3505 3506 # File for the loop (might not be present if MadLoop is not used) 3507 if os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 3508 cluster.need_transfer(self.options): 3509 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 3510 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 3511 dereference=True) 3512 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 3513 tf.close() 3514 3515 Ire = re.compile("for i in ([\d\s]*) ; do") 3516 try : 3517 fsock = open(exe) 3518 except IOError: 3519 fsock = open(pjoin(cwd,exe)) 3520 text = fsock.read() 3521 data = Ire.findall(text) 3522 subdir = ' '.join(data).split() 3523 3524 if args[0] == '0': 3525 # MADEVENT MINT FO MODE 3526 input_files.append(pjoin(cwd, 'madevent_mintFO')) 3527 input_files.append(pjoin(self.me_dir, 'SubProcesses','madin.%s' % args[1])) 3528 #j=$2\_G$i 3529 for i in subdir: 3530 current = '%s_G%s' % (args[1],i) 3531 if os.path.exists(pjoin(cwd,current)): 3532 input_files.append(pjoin(cwd, current)) 3533 output_files.append(current) 3534 3535 required_output.append('%s/results.dat' % current) 3536 required_output.append('%s/log.txt' % current) 3537 required_output.append('%s/mint_grids' % current) 3538 required_output.append('%s/grid.MC_integer' % current) 3539 if len(args) == 4: 3540 required_output.append('%s/scale_pdf_dependence.dat' % current) 3541 args[2] = '-1' 3542 # use a grid train on another part 3543 base = '%s_G%s' % (args[3],i) 3544 if args[0] == '0': 3545 to_move = ['grid.MC_integer','mint_grids'] 3546 elif args[0] == '1': 3547 to_move = ['mint_grids', 'grid.MC_integer'] 3548 else: 3549 to_move = [] 3550 if self.run_card['iappl'] == 2: 3551 for grid in glob.glob(pjoin(cwd,base,'grid_obs_*_in.root')): 3552 to_move.append(grid) 3553 if not os.path.exists(pjoin(cwd,current)): 3554 os.mkdir(pjoin(cwd,current)) 3555 input_files.append(pjoin(cwd, current)) 3556 for name in to_move: 3557 files.cp(pjoin(cwd,base, name), 3558 pjoin(cwd,current)) 3559 files.cp(pjoin(cwd,base, 'grid.MC_integer'), 3560 pjoin(cwd,current)) 3561 3562 elif args[0] == '2': 3563 # MINTMC MODE 3564 input_files.append(pjoin(cwd, 'madevent_mintMC')) 3565 if args[2] in ['0','2']: 3566 input_files.append(pjoin(self.me_dir, 'SubProcesses','madinMMC_%s.2' % args[1])) 3567 3568 for i in subdir: 3569 current = 'G%s%s' % (args[1], i) 3570 if os.path.exists(pjoin(cwd,current)): 3571 input_files.append(pjoin(cwd, current)) 3572 output_files.append(current) 3573 if len(args) == 4 and args[3] in ['H','S','V','B','F']: 3574 # use a grid train on another part 3575 base = '%s_%s' % (args[3],i) 3576 files.ln(pjoin(cwd,base,'mint_grids'), name = 'preset_mint_grids', 3577 starting_dir=pjoin(cwd,current)) 3578 files.ln(pjoin(cwd,base,'grid.MC_integer'), 3579 starting_dir=pjoin(cwd,current)) 3580 elif len(args) ==4: 3581 keep_fourth_arg = True 3582 # this is for the split event generation 3583 output_files.append('G%s%s_%s' % (args[1], i, args[3])) 3584 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1], i, args[3],args[2])) 3585 3586 else: 3587 required_output.append('%s/log_MINT%s.txt' % (current,args[2])) 3588 if args[2] in ['0','1']: 3589 required_output.append('%s/results.dat' % current) 3590 if args[2] == '1': 3591 output_files.append('%s/results.dat' % current) 3592 3593 else: 3594 raise aMCatNLOError, 'not valid arguments: %s' %(', '.join(args)) 3595 3596 #Find the correct PDF input file 3597 pdfinput = self.get_pdf_input_filename() 3598 if os.path.exists(pdfinput): 3599 input_files.append(pdfinput) 3600 3601 if len(args) == 4 and not keep_fourth_arg: 3602 args = args[:3] 3603 3604 return input_files, output_files, required_output, args
3605
3606 - def write_madinMMC_file(self, path, run_mode, mint_mode):
3607 """writes the madinMMC_?.2 file""" 3608 #check the validity of the arguments 3609 run_modes = ['born', 'virt', 'novi', 'all', 'viSB', 'novB'] 3610 if run_mode not in run_modes: 3611 raise aMCatNLOError('%s is not a valid mode for run. Please use one of the following: %s' \ 3612 % (run_mode, ', '.join(run_modes))) 3613 mint_modes = [0, 1, 2] 3614 if mint_mode not in mint_modes: 3615 raise aMCatNLOError('%s is not a valid mode for mintMC. Please use one of the following: %s' \ 3616 % (mint_mode, ', '.join(mint_modes))) 3617 if run_mode in ['born']: 3618 name_suffix = 'B' 3619 elif run_mode in ['virt', 'viSB']: 3620 name_suffix = 'V' 3621 else: 3622 name_suffix = 'F' 3623 3624 content = \ 3625 """-1 12 ! points, iterations 3626 0.03 ! desired fractional accuracy 3627 1 -0.1 ! alpha, beta for Gsoft 3628 -1 -0.1 ! alpha, beta for Gazi 3629 1 ! Suppress amplitude (0 no, 1 yes)? 3630 1 ! Exact helicity sum (0 yes, n = number/event)? 3631 1 ! Enter Configuration Number: 3632 %1d ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 3633 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 3634 %s ! all, born, real, virt 3635 """ \ 3636 % (mint_mode, run_mode) 3637 file = open(pjoin(path, 'madinMMC_%s.2' % name_suffix), 'w') 3638 file.write(content) 3639 file.close()
3640
3641 - def write_madin_file(self, path, run_mode, vegas_mode, npoints, niters, accuracy='0'):
3642 """writes the madin.run_mode file""" 3643 #check the validity of the arguments 3644 run_modes = ['born', 'virt', 'novi', 'all', 'viSB', 'novB', 'grid'] 3645 if run_mode not in run_modes: 3646 raise aMCatNLOError('%s is not a valid mode for run. Please use one of the following: %s' \ 3647 % (run_mode, ', '.join(run_modes))) 3648 name_suffix = run_mode 3649 3650 content = \ 3651 """%s %s ! points, iterations 3652 %s ! accuracy 3653 2 ! 0 fixed grid 2 adjust 3654 1 ! 1 suppress amp, 0 doesnt 3655 1 ! 0 for exact hel sum 3656 1 ! hel configuration numb 3657 'test' 3658 1 ! 1 to save grids 3659 %s ! 0 to exclude, 1 for new run, 2 to restart, 3 to reset w/ keeping grid 3660 %s ! all, born, real, virt 3661 """ \ 3662 % (npoints,niters,accuracy,vegas_mode,run_mode) 3663 file = open(pjoin(path, 'madin.%s' % name_suffix), 'w') 3664 file.write(content) 3665 file.close()
3666
3667 - def compile(self, mode, options):
3668 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 3669 specified in mode""" 3670 3671 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 3672 3673 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 3674 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 3675 3676 self.get_characteristics(pjoin(self.me_dir, 3677 'SubProcesses', 'proc_characteristics')) 3678 3679 #define a bunch of log files 3680 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 3681 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 3682 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 3683 test_log = pjoin(self.me_dir, 'test.log') 3684 3685 self.update_status('Compiling the code', level=None, update_results=True) 3686 3687 3688 libdir = pjoin(self.me_dir, 'lib') 3689 sourcedir = pjoin(self.me_dir, 'Source') 3690 3691 #clean files 3692 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 3693 #define which executable/tests to compile 3694 if '+' in mode: 3695 mode = mode.split('+')[0] 3696 if mode in ['NLO', 'LO']: 3697 exe = 'madevent_mintFO' 3698 tests = ['test_ME'] 3699 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 3700 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 3701 exe = 'madevent_mintMC' 3702 tests = ['test_ME', 'test_MC'] 3703 # write an analyse_opts with a dummy analysis so that compilation goes through 3704 open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w').write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 3705 3706 #directory where to compile exe 3707 p_dirs = [d for d in \ 3708 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 3709 # create param_card.inc and run_card.inc 3710 self.do_treatcards('', amcatnlo=True) 3711 # if --nocompile option is specified, check here that all exes exists. 3712 # If they exists, return 3713 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 3714 for p_dir in p_dirs]) and options['nocompile']: 3715 return 3716 3717 # rm links to lhapdflib/ PDFsets if exist 3718 if os.path.exists(pjoin(libdir, 'PDFsets')): 3719 files.rm(pjoin(libdir, 'PDFsets')) 3720 3721 # read the run_card to find if lhapdf is used or not 3722 if self.run_card['pdlabel'] == 'lhapdf' and \ 3723 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 3724 self.banner.get_detail('run_card', 'lpp2') != 0): 3725 3726 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 3727 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 3728 lhaid_list = [int(self.run_card['lhaid'])] 3729 if self.run_card['reweight_PDF']: 3730 lhaid_list.append(int(self.run_card['PDF_set_min'])) 3731 lhaid_list.append(int(self.run_card['PDF_set_max'])) 3732 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 3733 3734 else: 3735 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 3736 logger.info('Using built-in libraries for PDFs') 3737 if self.run_card['lpp1'] == 0 == self.run_card['lpp2']: 3738 logger.info('Lepton-Lepton collision: Ignoring \'pdlabel\' and \'lhaid\' in the run_card.') 3739 try: 3740 del os.environ['lhapdf'] 3741 except KeyError: 3742 pass 3743 3744 # read the run_card to find if applgrid is used or not 3745 if self.run_card['iappl'] != 0: 3746 os.environ['applgrid'] = 'True' 3747 # check versions of applgrid and amcfast 3748 for code in ['applgrid','amcfast']: 3749 try: 3750 p = subprocess.Popen([self.options[code], '--version'], \ 3751 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3752 except OSError: 3753 raise aMCatNLOError(('No valid %s installation found. \n' + \ 3754 'Please set the path to %s-config by using \n' + \ 3755 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 3756 else: 3757 output, _ = p.communicate() 3758 if code is 'applgrid' and output < '1.4.63': 3759 raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 3760 +' You are using %s',output) 3761 if code is 'amcfast' and output < '1.1.1': 3762 raise aMCatNLOError('Version of aMCfast is too old. Use 1.1.1 or later.'\ 3763 +' You are using %s',output) 3764 3765 # set-up the Source/make_opts with the correct applgrid-config file 3766 appllibs=" APPLLIBS=$(shell %s --ldflags) $(shell %s --ldcflags) \n" \ 3767 % (self.options['amcfast'],self.options['applgrid']) 3768 text=open(pjoin(self.me_dir,'Source','make_opts'),'r').readlines() 3769 text_out=[] 3770 for line in text: 3771 if line.strip().startswith('APPLLIBS=$'): 3772 line=appllibs 3773 text_out.append(line) 3774 open(pjoin(self.me_dir,'Source','make_opts'),'w').writelines(text_out) 3775 else: 3776 try: 3777 del os.environ['applgrid'] 3778 except KeyError: 3779 pass 3780 3781 try: 3782 os.environ['fastjet_config'] = self.options['fastjet'] 3783 except (TypeError, KeyError): 3784 if 'fastjet_config' in os.environ: 3785 del os.environ['fastjet_config'] 3786 os.unsetenv('fastjet_config') 3787 3788 # make Source 3789 self.update_status('Compiling source...', level=None) 3790 misc.compile(['clean4pdf'], cwd = sourcedir) 3791 misc.compile(cwd = sourcedir) 3792 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 3793 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 3794 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 3795 and os.path.exists(pjoin(libdir, 'libpdf.a')): 3796 logger.info(' ...done, continuing with P* directories') 3797 else: 3798 raise aMCatNLOError('Compilation failed') 3799 3800 # make StdHep (only necessary with MG option output_dependencies='internal') 3801 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 3802 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 3803 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 3804 if os.path.exists(pjoin(sourcedir,'StdHEP')): 3805 logger.info('Compiling StdHEP (can take a couple of minutes) ...') 3806 misc.compile(['StdHEP'], cwd = sourcedir) 3807 logger.info(' ...done.') 3808 else: 3809 raise aMCatNLOError('Could not compile StdHEP because its'+\ 3810 ' source directory could not be found in the SOURCE folder.\n'+\ 3811 " Check the MG5_aMC option 'output_dependencies.'") 3812 3813 # make CutTools (only necessary with MG option output_dependencies='internal') 3814 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 3815 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 3816 if os.path.exists(pjoin(sourcedir,'CutTools')): 3817 logger.info('Compiling CutTools (can take a couple of minutes) ...') 3818 misc.compile(['CutTools'], cwd = sourcedir) 3819 logger.info(' ...done.') 3820 else: 3821 raise aMCatNLOError('Could not compile CutTools because its'+\ 3822 ' source directory could not be found in the SOURCE folder.\n'+\ 3823 " Check the MG5_aMC option 'output_dependencies.'") 3824 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 3825 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 3826 raise aMCatNLOError('CutTools compilation failed.') 3827 3828 # Verify compatibility between current compiler and the one which was 3829 # used when last compiling CutTools (if specified). 3830 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 3831 libdir, 'libcts.a')))),'compiler_version.log') 3832 if os.path.exists(compiler_log_path): 3833 compiler_version_used = open(compiler_log_path,'r').read() 3834 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 3835 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 3836 if os.path.exists(pjoin(sourcedir,'CutTools')): 3837 logger.info('CutTools was compiled with a different fortran'+\ 3838 ' compiler. Re-compiling it now...') 3839 misc.compile(['cleanCT'], cwd = sourcedir) 3840 misc.compile(['CutTools'], cwd = sourcedir) 3841 logger.info(' ...done.') 3842 else: 3843 raise aMCatNLOError("CutTools installation in %s"\ 3844 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 3845 " seems to have been compiled with a different compiler than"+\ 3846 " the one specified in MG5_aMC. Please recompile CutTools.") 3847 3848 # make IREGI (only necessary with MG option output_dependencies='internal') 3849 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 3850 and os.path.exists(pjoin(sourcedir,'IREGI')): 3851 logger.info('Compiling IREGI (can take a couple of minutes) ...') 3852 misc.compile(['IREGI'], cwd = sourcedir) 3853 logger.info(' ...done.') 3854 3855 if os.path.exists(pjoin(libdir, 'libiregi.a')): 3856 # Verify compatibility between current compiler and the one which was 3857 # used when last compiling IREGI (if specified). 3858 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 3859 libdir, 'libiregi.a')))),'compiler_version.log') 3860 if os.path.exists(compiler_log_path): 3861 compiler_version_used = open(compiler_log_path,'r').read() 3862 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 3863 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 3864 if os.path.exists(pjoin(sourcedir,'IREGI')): 3865 logger.info('IREGI was compiled with a different fortran'+\ 3866 ' compiler. Re-compiling it now...') 3867 misc.compile(['cleanIR'], cwd = sourcedir) 3868 misc.compile(['IREGI'], cwd = sourcedir) 3869 logger.info(' ...done.') 3870 else: 3871 raise aMCatNLOError("IREGI installation in %s"\ 3872 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 3873 " seems to have been compiled with a different compiler than"+\ 3874 " the one specified in MG5_aMC. Please recompile IREGI.") 3875 3876 # check if MadLoop virtuals have been generated 3877 if self.proc_characteristics['has_loops'] and \ 3878 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 3879 os.environ['madloop'] = 'true' 3880 if mode in ['NLO', 'aMC@NLO', 'noshower']: 3881 tests.append('check_poles') 3882 else: 3883 os.unsetenv('madloop') 3884 3885 # make and run tests (if asked for), gensym and make madevent in each dir 3886 self.update_status('Compiling directories...', level=None) 3887 3888 for test in tests: 3889 self.write_test_input(test) 3890 3891 try: 3892 import multiprocessing 3893 if not self.nb_core: 3894 try: 3895 self.nb_core = int(self.options['nb_core']) 3896 except TypeError: 3897 self.nb_core = multiprocessing.cpu_count() 3898 except ImportError: 3899 self.nb_core = 1 3900 3901 compile_options = copy.copy(self.options) 3902 compile_options['nb_core'] = self.nb_core 3903 compile_cluster = cluster.MultiCore(**compile_options) 3904 logger.info('Compiling on %d cores' % self.nb_core) 3905 3906 update_status = lambda i, r, f: self.donothing(i,r,f) 3907 for p_dir in p_dirs: 3908 compile_cluster.submit(prog = compile_dir, 3909 argument = [self.me_dir, p_dir, mode, options, 3910 tests, exe, self.options['run_mode']]) 3911 try: 3912 compile_cluster.wait(self.me_dir, update_status) 3913 except Exception, error: 3914 logger.warning("Fail to compile the Subprocesses") 3915 if __debug__: 3916 raise 3917 compile_cluster.remove() 3918 self.do_quit('') 3919 3920 logger.info('Checking test output:') 3921 for p_dir in p_dirs: 3922 logger.info(p_dir) 3923 for test in tests: 3924 logger.info(' Result for %s:' % test) 3925 3926 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 3927 #check that none of the tests failed 3928 self.check_tests(test, this_dir)
3929 3930
3931 - def donothing(*args):
3932 pass
3933 3934
3935 - def check_tests(self, test, dir):
3936 """just call the correct parser for the test log. 3937 Skip check_poles for LOonly folders""" 3938 if test in ['test_ME', 'test_MC']: 3939 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 3940 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 3941 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
3942 3943
3944 - def parse_test_mx_log(self, log):
3945 """read and parse the test_ME/MC.log file""" 3946 content = open(log).read() 3947 if 'FAILED' in content: 3948 logger.info('Output of the failing test:\n'+content[:-1],'$MG:color:BLACK') 3949 raise aMCatNLOError('Some tests failed, run cannot continue.\n' + \ 3950 'Please check that widths of final state particles (e.g. top) have been' + \ 3951 ' set to 0 in the param_card.dat.') 3952 else: 3953 lines = [l for l in content.split('\n') if 'PASSED' in l] 3954 logger.info(' Passed.') 3955 logger.debug('\n'+'\n'.join(lines))
3956 3957
3958 - def parse_check_poles_log(self, log):
3959 """reads and parse the check_poles.log file""" 3960 content = open(log).read() 3961 npass = 0 3962 nfail = 0 3963 for line in content.split('\n'): 3964 if 'PASSED' in line: 3965 npass +=1 3966 tolerance = float(line.split()[1]) 3967 if 'FAILED' in line: 3968 nfail +=1 3969 tolerance = float(line.split()[1]) 3970 3971 if nfail + npass == 0: 3972 logger.warning('0 points have been tried') 3973 return 3974 3975 if float(nfail)/float(nfail+npass) > 0.1: 3976 raise aMCatNLOError('Poles do not cancel, run cannot continue') 3977 else: 3978 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 3979 %(npass, nfail+npass, tolerance))
3980 3981
3982 - def write_test_input(self, test):
3983 """write the input files to run test_ME/MC or check_poles""" 3984 if test in ['test_ME', 'test_MC']: 3985 content = "-2 -2\n" #generate randomly energy/angle 3986 content+= "100 100\n" #run 100 points for soft and collinear tests 3987 content+= "0\n" #sum over helicities 3988 content+= "0\n" #all FKS configs 3989 content+= '\n'.join(["-1"] * 50) #random diagram 3990 elif test == 'check_poles': 3991 content = '20 \n -1\n' 3992 3993 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 3994 if test == 'test_MC': 3995 shower = self.run_card['parton_shower'] 3996 MC_header = "%s\n " % shower + \ 3997 "1 \n1 -0.1\n-1 -0.1\n" 3998 file.write(MC_header + content) 3999 else: 4000 file.write(content) 4001 file.close()
4002 4003 4004 4005 ############################################################################
4006 - def find_model_name(self):
4007 """ return the model name """ 4008 if hasattr(self, 'model_name'): 4009 return self.model_name 4010 4011 model = 'sm' 4012 proc = [] 4013 for line in open(os.path.join(self.me_dir,'Cards','proc_card_mg5.dat')): 4014 line = line.split('#')[0] 4015 #line = line.split('=')[0] 4016 if line.startswith('import') and 'model' in line: 4017 model = line.split()[2] 4018 proc = [] 4019 elif line.startswith('generate'): 4020 proc.append(line.split(None,1)[1]) 4021 elif line.startswith('add process'): 4022 proc.append(line.split(None,2)[2]) 4023 4024 self.model = model 4025 self.process = proc 4026 return model
4027 4028 4029 4030 ############################################################################
4031 - def ask_run_configuration(self, mode, options, switch={}):
4032 """Ask the question when launching generate_events/multi_run""" 4033 4034 if 'parton' not in options: 4035 options['parton'] = False 4036 if 'reweightonly' not in options: 4037 options['reweightonly'] = False 4038 4039 4040 void = 'NOT INSTALLED' 4041 switch_order = ['order', 'fixed_order', 'shower','madspin'] 4042 switch_default = {'order': 'NLO', 'fixed_order': 'OFF', 'shower': void, 4043 'madspin': void} 4044 if not switch: 4045 switch = switch_default 4046 else: 4047 switch.update(dict((k,value) for k,v in switch_default.items() if k not in switch)) 4048 4049 default_switch = ['ON', 'OFF'] 4050 allowed_switch_value = {'order': ['LO', 'NLO'], 4051 'fixed_order': default_switch, 4052 'shower': default_switch, 4053 'madspin': default_switch} 4054 4055 description = {'order': 'Perturbative order of the calculation:', 4056 'fixed_order': 'Fixed order (no event generation and no MC@[N]LO matching):', 4057 'shower': 'Shower the generated events:', 4058 'madspin': 'Decay particles with the MadSpin module:' } 4059 4060 force_switch = {('shower', 'ON'): {'fixed_order': 'OFF'}, 4061 ('madspin', 'ON'): {'fixed_order':'OFF'}, 4062 ('fixed_order', 'ON'): {'shower': 'OFF', 'madspin': 'OFF'} 4063 } 4064 special_values = ['LO', 'NLO', 'aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] 4065 4066 assign_switch = lambda key, value: switch.__setitem__(key, value if switch[key] != void else void ) 4067 4068 4069 if mode == 'auto': 4070 mode = None 4071 if not mode and (options['parton'] or options['reweightonly']): 4072 mode = 'noshower' 4073 4074 # Init the switch value according to the current status 4075 available_mode = ['0', '1', '2'] 4076 available_mode.append('3') 4077 if os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 4078 switch['shower'] = 'ON' 4079 else: 4080 switch['shower'] = 'OFF' 4081 4082 if not aMCatNLO or self.options['mg5_path']: 4083 available_mode.append('4') 4084 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 4085 switch['madspin'] = 'ON' 4086 else: 4087 switch['madspin'] = 'OFF' 4088 4089 answers = list(available_mode) + ['auto', 'done'] 4090 alias = {} 4091 for id, key in enumerate(switch_order): 4092 if switch[key] != void: 4093 answers += ['%s=%s' % (key, s) for s in allowed_switch_value[key]] 4094 #allow lower case for on/off 4095 alias.update(dict(('%s=%s' % (key, s.lower()), '%s=%s' % (key, s)) 4096 for s in allowed_switch_value[key])) 4097 answers += special_values 4098 4099 def create_question(switch): 4100 switch_format = " %i %-60s %12s=%s\n" 4101 question = "The following switches determine which operations are executed:\n" 4102 for id, key in enumerate(switch_order): 4103 question += switch_format % (id+1, description[key], key, switch[key]) 4104 question += ' Either type the switch number (1 to %s) to change its default setting,\n' % (id+1) 4105 question += ' or set any switch explicitly (e.g. type \'order=LO\' at the prompt)\n' 4106 question += ' Type \'0\', \'auto\', \'done\' or just press enter when you are done.\n' 4107 return question
4108 4109 4110 def modify_switch(mode, answer, switch): 4111 if '=' in answer: 4112 key, status = answer.split('=') 4113 switch[key] = status 4114 if (key, status) in force_switch: 4115 for key2, status2 in force_switch[(key, status)].items(): 4116 if switch[key2] not in [status2, void]: 4117 logger.info('For coherence \'%s\' is set to \'%s\'' 4118 % (key2, status2), '$MG:color:BLACK') 4119 switch[key2] = status2 4120 elif answer in ['0', 'auto', 'done']: 4121 return 4122 elif answer in special_values: 4123 logger.info('Enter mode value: Go to the related mode', '$MG:color:BLACK') 4124 if answer == 'LO': 4125 switch['order'] = 'LO' 4126 switch['fixed_order'] = 'ON' 4127 assign_switch('shower', 'OFF') 4128 assign_switch('madspin', 'OFF') 4129 elif answer == 'NLO': 4130 switch['order'] = 'NLO' 4131 switch['fixed_order'] = 'ON' 4132 assign_switch('shower', 'OFF') 4133 assign_switch('madspin', 'OFF') 4134 elif answer == 'aMC@NLO': 4135 switch['order'] = 'NLO' 4136 switch['fixed_order'] = 'OFF' 4137 assign_switch('shower', 'ON') 4138 assign_switch('madspin', 'OFF') 4139 elif answer == 'aMC@LO': 4140 switch['order'] = 'LO' 4141 switch['fixed_order'] = 'OFF' 4142 assign_switch('shower', 'ON') 4143 assign_switch('madspin', 'OFF') 4144 elif answer == 'noshower': 4145 switch['order'] = 'NLO' 4146 switch['fixed_order'] = 'OFF' 4147 assign_switch('shower', 'OFF') 4148 assign_switch('madspin', 'OFF') 4149 elif answer == 'noshowerLO': 4150 switch['order'] = 'LO' 4151 switch['fixed_order'] = 'OFF' 4152 assign_switch('shower', 'OFF') 4153 assign_switch('madspin', 'OFF') 4154 if mode: 4155 return 4156 return switch 4157 4158 4159 modify_switch(mode, self.last_mode, switch) 4160 if switch['madspin'] == 'OFF' and os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 4161 assign_switch('madspin', 'ON') 4162 4163 if not self.force: 4164 answer = '' 4165 while answer not in ['0', 'done', 'auto', 'onlyshower']: 4166 question = create_question(switch) 4167 if mode: 4168 answer = mode 4169 else: 4170 answer = self.ask(question, '0', answers, alias=alias) 4171 if answer.isdigit() and answer != '0': 4172 key = switch_order[int(answer) - 1] 4173 opt1 = allowed_switch_value[key][0] 4174 opt2 = allowed_switch_value[key][1] 4175 answer = '%s=%s' % (key, opt1 if switch[key] == opt2 else opt2) 4176 4177 if not modify_switch(mode, answer, switch): 4178 break 4179 4180 #assign the mode depending of the switch 4181 if not mode or mode == 'auto': 4182 if switch['order'] == 'LO': 4183 if switch['shower'] == 'ON': 4184 mode = 'aMC@LO' 4185 elif switch['fixed_order'] == 'ON': 4186 mode = 'LO' 4187 else: 4188 mode = 'noshowerLO' 4189 elif switch['order'] == 'NLO': 4190 if switch['shower'] == 'ON': 4191 mode = 'aMC@NLO' 4192 elif switch['fixed_order'] == 'ON': 4193 mode = 'NLO' 4194 else: 4195 mode = 'noshower' 4196 logger.info('will run in mode: %s' % mode) 4197 4198 if mode == 'noshower': 4199 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 4200 Please, shower the Les Houches events before using them for physics analyses.""") 4201 4202 4203 # specify the cards which are needed for this run. 4204 cards = ['param_card.dat', 'run_card.dat'] 4205 ignore = [] 4206 if mode in ['LO', 'NLO']: 4207 options['parton'] = True 4208 ignore = ['shower_card.dat', 'madspin_card.dat'] 4209 cards.append('FO_analyse_card.dat') 4210 elif switch['madspin'] == 'ON': 4211 cards.append('madspin_card.dat') 4212 if 'aMC@' in mode: 4213 cards.append('shower_card.dat') 4214 if mode == 'onlyshower': 4215 cards = ['shower_card.dat'] 4216 if options['reweightonly']: 4217 cards = ['run_card.dat'] 4218 4219 self.keep_cards(cards, ignore) 4220 4221 if mode =='onlyshower': 4222 cards = ['shower_card.dat'] 4223 4224 if not options['force'] and not self.force: 4225 self.ask_edit_cards(cards, plot=False) 4226 4227 self.banner = banner_mod.Banner() 4228 4229 # store the cards in the banner 4230 for card in cards: 4231 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 4232 # and the run settings 4233 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 4234 self.banner.add_text('run_settings', run_settings) 4235 4236 if not mode =='onlyshower': 4237 self.run_card = self.banner.charge_card('run_card') 4238 self.run_tag = self.run_card['run_tag'] 4239 #this is if the user did not provide a name for the current run 4240 if not hasattr(self, 'run_name') or not self.run_name: 4241 self.run_name = self.find_available_run_name(self.me_dir) 4242 #add a tag in the run_name for distinguish run_type 4243 if self.run_name.startswith('run_'): 4244 if mode in ['LO','aMC@LO','noshowerLO']: 4245 self.run_name += '_LO' 4246 self.set_run_name(self.run_name, self.run_tag, 'parton') 4247 if int(self.run_card['ickkw']) == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 4248 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 4249 elif int(self.run_card['ickkw']) == 3 and mode in ['aMC@NLO', 'noshower']: 4250 logger.warning("""You are running with FxFx merging enabled. To be able to merge 4251 samples of various multiplicities without double counting, you 4252 have to remove some events after showering 'by hand'. Please 4253 read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 4254 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 4255 raise self.InvalidCmd("""FxFx merging does not work with Q-squared ordered showers.""") 4256 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8': 4257 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 4258 "Type \'n\' to stop or \'y\' to continue" 4259 answers = ['n','y'] 4260 answer = self.ask(question, 'n', answers, alias=alias) 4261 if answer == 'n': 4262 error = '''Stop opertation''' 4263 self.ask_run_configuration(mode, options) 4264 # raise aMCatNLOError(error) 4265 elif int(self.run_card['ickkw']) == -1 and mode in ['aMC@NLO', 'noshower']: 4266 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 4267 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 4268 if 'aMC@' in mode or mode == 'onlyshower': 4269 self.shower_card = self.banner.charge_card('shower_card') 4270 4271 elif mode in ['LO', 'NLO']: 4272 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 4273 self.analyse_card = self.banner.charge_card('FO_analyse_card') 4274 4275 4276 return mode 4277 4278 4279 #=============================================================================== 4280 # aMCatNLOCmd 4281 #===============================================================================
4282 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
4283 """The command line processor of MadGraph"""
4284 4285 _compile_usage = "compile [MODE] [options]\n" + \ 4286 "-- compiles aMC@NLO \n" + \ 4287 " MODE can be either FO, for fixed-order computations, \n" + \ 4288 " or MC for matching with parton-shower monte-carlos. \n" + \ 4289 " (if omitted, it is set to MC)\n" 4290 _compile_parser = misc.OptionParser(usage=_compile_usage) 4291 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 4292 help="Use the card present in the directory for the launch, without editing them") 4293 4294 _launch_usage = "launch [MODE] [options]\n" + \ 4295 "-- execute aMC@NLO \n" + \ 4296 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 4297 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 4298 " computation of the total cross-section and the filling of parton-level histograms \n" + \ 4299 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 4300 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 4301 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 4302 " in the run_card.dat\n" 4303 4304 _launch_parser = misc.OptionParser(usage=_launch_usage) 4305 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 4306 help="Use the card present in the directory for the launch, without editing them") 4307 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 4308 help="Submit the jobs on the cluster") 4309 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 4310 help="Submit the jobs on multicore mode") 4311 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 4312 help="Skip compilation. Ignored if no executable is found") 4313 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 4314 help="Skip integration and event generation, just run reweight on the" + \ 4315 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 4316 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 4317 help="Stop the run after the parton level file generation (you need " + \ 4318 "to shower the file in order to get physical results)") 4319 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 4320 help="Skip grid set up, just generate events starting from " + \ 4321 "the last available results") 4322 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 4323 help="Provide a name to the run") 4324 _launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 4325 help="For use with APPLgrid only: start from existing grids") 4326 4327 4328 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 4329 "-- execute aMC@NLO \n" + \ 4330 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 4331 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 4332 " computation of the total cross-section and the filling of parton-level histograms \n" + \ 4333 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 4334 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 4335 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 4336 " in the run_card.dat\n" 4337 4338 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 4339 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 4340 help="Use the card present in the directory for the generate_events, without editing them") 4341 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 4342 help="Submit the jobs on the cluster") 4343 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 4344 help="Submit the jobs on multicore mode") 4345 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 4346 help="Skip compilation. Ignored if no executable is found") 4347 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 4348 help="Skip integration and event generation, just run reweight on the" + \ 4349 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 4350 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 4351 help="Stop the run after the parton level file generation (you need " + \ 4352 "to shower the file in order to get physical results)") 4353 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 4354 help="Skip grid set up, just generate events starting from " + \ 4355 "the last available results") 4356 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 4357 help="Provide a name to the run") 4358 4359 4360 4361 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 4362 "-- calculate cross-section up to ORDER.\n" + \ 4363 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 4364 4365 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 4366 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 4367 help="Use the card present in the directory for the launch, without editing them") 4368 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 4369 help="Submit the jobs on the cluster") 4370 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 4371 help="Submit the jobs on multicore mode") 4372 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 4373 help="Skip compilation. Ignored if no executable is found") 4374 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 4375 help="Provide a name to the run") 4376 _calculate_xsect_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 4377 help="For use with APPLgrid only: start from existing grids") 4378 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 4379 help="Skip grid set up, just generate events starting from " + \ 4380 "the last available results") 4381 4382 _shower_usage = 'shower run_name [options]\n' + \ 4383 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 4384 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 4385 ' are directly read from the header of the event file\n' 4386 _shower_parser = misc.OptionParser(usage=_shower_usage) 4387 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 4388 help="Use the shower_card present in the directory for the launch, without editing") 4389