Package madgraph :: Package interface :: Module amcatnlo_run_interface
[hide private]
[frames] | no frames]

Source Code for Module madgraph.interface.amcatnlo_run_interface

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2011 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """A user friendly command line interface to access MadGraph5_aMC@NLO features. 
  16     Uses the cmd package for command interpretation and tab completion. 
  17  """ 
  18  from __future__ import division 
  19   
  20  import atexit 
  21  import glob 
  22  import logging 
  23  import math 
  24  import optparse 
  25  import os 
  26  import pydoc 
  27  import random 
  28  import re 
  29  import shutil 
  30  import subprocess 
  31  import sys 
  32  import traceback 
  33  import time 
  34  import signal 
  35  import tarfile 
  36  import copy 
  37  import datetime 
  38  import tarfile 
  39   
  40  try: 
  41      import readline 
  42      GNU_SPLITTING = ('GNU' in readline.__doc__) 
  43  except: 
  44      GNU_SPLITTING = True 
  45   
  46  root_path = os.path.split(os.path.dirname(os.path.realpath( __file__ )))[0] 
  47  root_path = os.path.split(root_path)[0] 
  48  sys.path.insert(0, os.path.join(root_path,'bin')) 
  49   
  50  # usefull shortcut 
  51  pjoin = os.path.join 
  52  # Special logger for the Cmd Interface 
  53  logger = logging.getLogger('madgraph.stdout') # -> stdout 
  54  logger_stderr = logging.getLogger('madgraph.stderr') # ->stderr 
  55    
  56  try: 
  57      import madgraph 
  58  except ImportError:  
  59      aMCatNLO = True  
  60      import internal.extended_cmd as cmd 
  61      import internal.common_run_interface as common_run 
  62      import internal.banner as banner_mod 
  63      import internal.misc as misc     
  64      from internal import InvalidCmd, MadGraph5Error 
  65      import internal.files as files 
  66      import internal.cluster as cluster 
  67      import internal.save_load_object as save_load_object 
  68      import internal.gen_crossxhtml as gen_crossxhtml 
  69      import internal.sum_html as sum_html 
  70      import internal.shower_card as shower_card 
  71      import internal.FO_analyse_card as analyse_card  
  72      import internal.histograms as histograms 
  73  else: 
  74      # import from madgraph directory 
  75      aMCatNLO = False 
  76      import madgraph.interface.extended_cmd as cmd 
  77      import madgraph.interface.common_run_interface as common_run 
  78      import madgraph.iolibs.files as files 
  79      import madgraph.iolibs.save_load_object as save_load_object 
  80      import madgraph.madevent.gen_crossxhtml as gen_crossxhtml 
  81      import madgraph.madevent.sum_html as sum_html 
  82      import madgraph.various.banner as banner_mod 
  83      import madgraph.various.cluster as cluster 
  84      import madgraph.various.misc as misc 
  85      import madgraph.various.shower_card as shower_card 
  86      import madgraph.various.FO_analyse_card as analyse_card 
  87      import madgraph.various.histograms as histograms 
  88      from madgraph import InvalidCmd, aMCatNLOError, MadGraph5Error 
  89   
90 -class aMCatNLOError(Exception):
91 pass
92 93
94 -def compile_dir(*arguments):
95 """compile the direcory p_dir 96 arguments is the tuple (me_dir, p_dir, mode, options, tests, exe, run_mode) 97 this function needs not to be a class method in order to do 98 the compilation on multicore""" 99 100 if len(arguments) == 1: 101 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments[0] 102 elif len(arguments)==7: 103 (me_dir, p_dir, mode, options, tests, exe, run_mode) = arguments 104 else: 105 raise aMCatNLOError, 'not correct number of argument' 106 logger.info(' Compiling %s...' % p_dir) 107 108 this_dir = pjoin(me_dir, 'SubProcesses', p_dir) 109 110 try: 111 #compile everything 112 # compile and run tests 113 for test in tests: 114 # skip check_poles for LOonly dirs 115 if test == 'check_poles' and os.path.exists(pjoin(this_dir, 'parton_lum_0.f')): 116 continue 117 misc.compile([test], cwd = this_dir, job_specs = False) 118 input = pjoin(me_dir, '%s_input.txt' % test) 119 #this can be improved/better written to handle the output 120 misc.call(['./%s' % (test)], cwd=this_dir, 121 stdin = open(input), stdout=open(pjoin(this_dir, '%s.log' % test), 'w')) 122 123 if not options['reweightonly']: 124 misc.compile(['gensym'], cwd=this_dir, job_specs = False) 125 open(pjoin(this_dir, 'gensym_input.txt'), 'w').write('%s\n' % run_mode) 126 misc.call(['./gensym'],cwd= this_dir, 127 stdin=open(pjoin(this_dir, 'gensym_input.txt')), 128 stdout=open(pjoin(this_dir, 'gensym.log'), 'w')) 129 #compile madevent_mintMC/mintFO 130 misc.compile([exe], cwd=this_dir, job_specs = False) 131 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 132 misc.compile(['reweight_xsec_events'], cwd=this_dir, job_specs = False) 133 134 logger.info(' %s done.' % p_dir) 135 return 0 136 except MadGraph5Error, msg: 137 return msg
138 139
140 -def check_compiler(options, block=False):
141 """check that the current fortran compiler is gfortran 4.6 or later. 142 If block, stops the execution, otherwise just print a warning""" 143 144 msg = 'In order to be able to run at NLO MadGraph5_aMC@NLO, you need to have ' + \ 145 'gfortran 4.6 or later installed.\n%s has been detected\n'+\ 146 'Note that You can still run all MadEvent run without any problem!' 147 #first check that gfortran is installed 148 if options['fortran_compiler']: 149 compiler = options['fortran_compiler'] 150 elif misc.which('gfortran'): 151 compiler = 'gfortran' 152 else: 153 compiler = '' 154 155 if 'gfortran' not in compiler: 156 if block: 157 raise aMCatNLOError(msg % compiler) 158 else: 159 logger.warning(msg % compiler) 160 else: 161 curr_version = misc.get_gfortran_version(compiler) 162 if not ''.join(curr_version.split('.')) >= '46': 163 if block: 164 raise aMCatNLOError(msg % (compiler + ' ' + curr_version)) 165 else: 166 logger.warning(msg % (compiler + ' ' + curr_version))
167 168 169 170 #=============================================================================== 171 # CmdExtended 172 #===============================================================================
173 -class CmdExtended(common_run.CommonRunCmd):
174 """Particularisation of the cmd command for aMCatNLO""" 175 176 #suggested list of command 177 next_possibility = { 178 'start': [], 179 } 180 181 debug_output = 'ME5_debug' 182 error_debug = 'Please report this bug on https://bugs.launchpad.net/madgraph5\n' 183 error_debug += 'More information is found in \'%(debug)s\'.\n' 184 error_debug += 'Please attach this file to your report.' 185 186 config_debug = 'If you need help with this issue please contact us on https://answers.launchpad.net/madgraph5\n' 187 188 189 keyboard_stop_msg = """stopping all operation 190 in order to quit MadGraph5_aMC@NLO please enter exit""" 191 192 # Define the Error 193 InvalidCmd = InvalidCmd 194 ConfigurationError = aMCatNLOError 195
196 - def __init__(self, me_dir, options, *arg, **opt):
197 """Init history and line continuation""" 198 199 # Tag allowing/forbiding question 200 self.force = False 201 202 # If possible, build an info line with current version number 203 # and date, from the VERSION text file 204 info = misc.get_pkg_info() 205 info_line = "" 206 if info and info.has_key('version') and info.has_key('date'): 207 len_version = len(info['version']) 208 len_date = len(info['date']) 209 if len_version + len_date < 30: 210 info_line = "#* VERSION %s %s %s *\n" % \ 211 (info['version'], 212 (30 - len_version - len_date) * ' ', 213 info['date']) 214 else: 215 version = open(pjoin(root_path,'MGMEVersion.txt')).readline().strip() 216 info_line = "#* VERSION %s %s *\n" % \ 217 (version, (24 - len(version)) * ' ') 218 219 # Create a header for the history file. 220 # Remember to fill in time at writeout time! 221 self.history_header = \ 222 '#************************************************************\n' + \ 223 '#* MadGraph5_aMC@NLO *\n' + \ 224 '#* *\n' + \ 225 "#* * * *\n" + \ 226 "#* * * * * *\n" + \ 227 "#* * * * * 5 * * * * *\n" + \ 228 "#* * * * * *\n" + \ 229 "#* * * *\n" + \ 230 "#* *\n" + \ 231 "#* *\n" + \ 232 info_line + \ 233 "#* *\n" + \ 234 "#* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 235 "#* https://server06.fynu.ucl.ac.be/projects/madgraph *\n" + \ 236 "#* and *\n" + \ 237 "#* http://amcatnlo.cern.ch *\n" + \ 238 '#* *\n' + \ 239 '#************************************************************\n' + \ 240 '#* *\n' + \ 241 '#* Command File for aMCatNLO *\n' + \ 242 '#* *\n' + \ 243 '#* run as ./bin/aMCatNLO.py filename *\n' + \ 244 '#* *\n' + \ 245 '#************************************************************\n' 246 247 if info_line: 248 info_line = info_line[1:] 249 250 logger.info(\ 251 "************************************************************\n" + \ 252 "* *\n" + \ 253 "* W E L C O M E to M A D G R A P H 5 *\n" + \ 254 "* a M C @ N L O *\n" + \ 255 "* *\n" + \ 256 "* * * *\n" + \ 257 "* * * * * *\n" + \ 258 "* * * * * 5 * * * * *\n" + \ 259 "* * * * * *\n" + \ 260 "* * * *\n" + \ 261 "* *\n" + \ 262 info_line + \ 263 "* *\n" + \ 264 "* The MadGraph5_aMC@NLO Development Team - Find us at *\n" + \ 265 "* http://amcatnlo.cern.ch *\n" + \ 266 "* *\n" + \ 267 "* Type 'help' for in-line help. *\n" + \ 268 "* *\n" + \ 269 "************************************************************") 270 super(CmdExtended, self).__init__(me_dir, options, *arg, **opt)
271 272
273 - def get_history_header(self):
274 """return the history header""" 275 return self.history_header % misc.get_time_info()
276
277 - def stop_on_keyboard_stop(self):
278 """action to perform to close nicely on a keyboard interupt""" 279 try: 280 if hasattr(self, 'cluster'): 281 logger.info('rm jobs on queue') 282 self.cluster.remove() 283 if hasattr(self, 'results'): 284 self.update_status('Stop by the user', level=None, makehtml=True, error=True) 285 self.add_error_log_in_html(KeyboardInterrupt) 286 except: 287 pass
288
289 - def postcmd(self, stop, line):
290 """ Update the status of the run for finishing interactive command """ 291 292 # relaxing the tag forbidding question 293 self.force = False 294 295 if not self.use_rawinput: 296 return stop 297 298 299 arg = line.split() 300 if len(arg) == 0: 301 return stop 302 elif str(arg[0]) in ['exit','quit','EOF']: 303 return stop 304 305 try: 306 self.update_status('Command \'%s\' done.<br> Waiting for instruction.' % arg[0], 307 level=None, error=True) 308 except Exception: 309 misc.sprint('self.update_status fails', log=logger) 310 pass
311
312 - def nice_user_error(self, error, line):
313 """If a ME run is currently running add a link in the html output""" 314 315 self.add_error_log_in_html() 316 cmd.Cmd.nice_user_error(self, error, line)
317
318 - def nice_config_error(self, error, line):
319 """If a ME run is currently running add a link in the html output""" 320 321 self.add_error_log_in_html() 322 cmd.Cmd.nice_config_error(self, error, line)
323
324 - def nice_error_handling(self, error, line):
325 """If a ME run is currently running add a link in the html output""" 326 327 self.add_error_log_in_html() 328 cmd.Cmd.nice_error_handling(self, error, line)
329 330 331 332 #=============================================================================== 333 # HelpToCmd 334 #===============================================================================
335 -class HelpToCmd(object):
336 """ The Series of help routine for the aMCatNLOCmd""" 337
338 - def help_launch(self):
339 """help for launch command""" 340 _launch_parser.print_help()
341
342 - def help_banner_run(self):
343 logger.info("syntax: banner_run Path|RUN [--run_options]") 344 logger.info("-- Reproduce a run following a given banner") 345 logger.info(" One of the following argument is require:") 346 logger.info(" Path should be the path of a valid banner.") 347 logger.info(" RUN should be the name of a run of the current directory") 348 self.run_options_help([('-f','answer all question by default'), 349 ('--name=X', 'Define the name associated with the new run')])
350 351
352 - def help_compile(self):
353 """help for compile command""" 354 _compile_parser.print_help()
355
356 - def help_generate_events(self):
357 """help for generate_events commandi 358 just call help_launch""" 359 _generate_events_parser.print_help()
360 361
362 - def help_calculate_xsect(self):
363 """help for generate_events command""" 364 _calculate_xsect_parser.print_help()
365
366 - def help_shower(self):
367 """help for shower command""" 368 _shower_parser.print_help()
369 370
371 - def help_open(self):
372 logger.info("syntax: open FILE ") 373 logger.info("-- open a file with the appropriate editor.") 374 logger.info(' If FILE belongs to index.html, param_card.dat, run_card.dat') 375 logger.info(' the path to the last created/used directory is used')
376
377 - def run_options_help(self, data):
378 if data: 379 logger.info('-- local options:') 380 for name, info in data: 381 logger.info(' %s : %s' % (name, info)) 382 383 logger.info("-- session options:") 384 logger.info(" Note that those options will be kept for the current session") 385 logger.info(" --cluster : Submit to the cluster. Current cluster: %s" % self.options['cluster_type']) 386 logger.info(" --multicore : Run in multi-core configuration") 387 logger.info(" --nb_core=X : limit the number of core to use to X.")
388 389 390 391 392 #=============================================================================== 393 # CheckValidForCmd 394 #===============================================================================
395 -class CheckValidForCmd(object):
396 """ The Series of check routine for the aMCatNLOCmd""" 397
398 - def check_shower(self, args, options):
399 """Check the validity of the line. args[0] is the run_directory""" 400 401 if options['force']: 402 self.force = True 403 404 if len(args) == 0: 405 self.help_shower() 406 raise self.InvalidCmd, 'Invalid syntax, please specify the run name' 407 if not os.path.isdir(pjoin(self.me_dir, 'Events', args[0])): 408 raise self.InvalidCmd, 'Directory %s does not exists' % \ 409 pjoin(os.getcwd(), 'Events', args[0]) 410 411 self.set_run_name(args[0], level= 'shower') 412 args[0] = pjoin(self.me_dir, 'Events', args[0])
413
414 - def check_plot(self, args):
415 """Check the argument for the plot command 416 plot run_name modes""" 417 418 419 madir = self.options['madanalysis_path'] 420 td = self.options['td_path'] 421 422 if not madir or not td: 423 logger.info('Retry to read configuration file to find madanalysis/td') 424 self.set_configuration() 425 426 madir = self.options['madanalysis_path'] 427 td = self.options['td_path'] 428 429 if not madir: 430 error_msg = 'No Madanalysis path correctly set.' 431 error_msg += 'Please use the set command to define the path and retry.' 432 error_msg += 'You can also define it in the configuration file.' 433 raise self.InvalidCmd(error_msg) 434 if not td: 435 error_msg = 'No path to td directory correctly set.' 436 error_msg += 'Please use the set command to define the path and retry.' 437 error_msg += 'You can also define it in the configuration file.' 438 raise self.InvalidCmd(error_msg) 439 440 if len(args) == 0: 441 if not hasattr(self, 'run_name') or not self.run_name: 442 self.help_plot() 443 raise self.InvalidCmd('No run name currently define. Please add this information.') 444 args.append('all') 445 return 446 447 448 if args[0] not in self._plot_mode: 449 self.set_run_name(args[0], level='plot') 450 del args[0] 451 if len(args) == 0: 452 args.append('all') 453 elif not self.run_name: 454 self.help_plot() 455 raise self.InvalidCmd('No run name currently define. Please add this information.') 456 457 for arg in args: 458 if arg not in self._plot_mode and arg != self.run_name: 459 self.help_plot() 460 raise self.InvalidCmd('unknown options %s' % arg)
461
462 - def check_pgs(self, arg):
463 """Check the argument for pythia command 464 syntax: pgs [NAME] 465 Note that other option are already remove at this point 466 """ 467 468 # If not pythia-pgs path 469 if not self.options['pythia-pgs_path']: 470 logger.info('Retry to read configuration file to find pythia-pgs path') 471 self.set_configuration() 472 473 if not self.options['pythia-pgs_path'] or not \ 474 os.path.exists(pjoin(self.options['pythia-pgs_path'],'src')): 475 error_msg = 'No pythia-pgs path correctly set.' 476 error_msg += 'Please use the set command to define the path and retry.' 477 error_msg += 'You can also define it in the configuration file.' 478 raise self.InvalidCmd(error_msg) 479 480 tag = [a for a in arg if a.startswith('--tag=')] 481 if tag: 482 arg.remove(tag[0]) 483 tag = tag[0][6:] 484 485 486 if len(arg) == 0 and not self.run_name: 487 if self.results.lastrun: 488 arg.insert(0, self.results.lastrun) 489 else: 490 raise self.InvalidCmd('No run name currently define. Please add this information.') 491 492 if len(arg) == 1 and self.run_name == arg[0]: 493 arg.pop(0) 494 495 if not len(arg) and \ 496 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 497 self.help_pgs() 498 raise self.InvalidCmd('''No file file pythia_events.hep currently available 499 Please specify a valid run_name''') 500 501 lock = None 502 if len(arg) == 1: 503 prev_tag = self.set_run_name(arg[0], tag, 'pgs') 504 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 505 'events_*.hep.gz')) 506 if not filenames: 507 raise self.InvalidCmd('No events file corresponding to %s run with tag %s. '% (self.run_name, prev_tag)) 508 else: 509 input_file = filenames[0] 510 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 511 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 512 argument=['-c', input_file]) 513 else: 514 if tag: 515 self.run_card['run_tag'] = tag 516 self.set_run_name(self.run_name, tag, 'pgs') 517 518 return lock
519 520
521 - def check_delphes(self, arg):
522 """Check the argument for pythia command 523 syntax: delphes [NAME] 524 Note that other option are already remove at this point 525 """ 526 527 # If not pythia-pgs path 528 if not self.options['delphes_path']: 529 logger.info('Retry to read configuration file to find delphes path') 530 self.set_configuration() 531 532 if not self.options['delphes_path']: 533 error_msg = 'No delphes path correctly set.' 534 error_msg += 'Please use the set command to define the path and retry.' 535 error_msg += 'You can also define it in the configuration file.' 536 raise self.InvalidCmd(error_msg) 537 538 tag = [a for a in arg if a.startswith('--tag=')] 539 if tag: 540 arg.remove(tag[0]) 541 tag = tag[0][6:] 542 543 544 if len(arg) == 0 and not self.run_name: 545 if self.results.lastrun: 546 arg.insert(0, self.results.lastrun) 547 else: 548 raise self.InvalidCmd('No run name currently define. Please add this information.') 549 550 if len(arg) == 1 and self.run_name == arg[0]: 551 arg.pop(0) 552 553 if not len(arg) and \ 554 not os.path.exists(pjoin(self.me_dir,'Events','pythia_events.hep')): 555 self.help_pgs() 556 raise self.InvalidCmd('''No file file pythia_events.hep currently available 557 Please specify a valid run_name''') 558 559 if len(arg) == 1: 560 prev_tag = self.set_run_name(arg[0], tag, 'delphes') 561 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 562 'events_*.hep.gz')) 563 if not filenames: 564 raise self.InvalidCmd('No events file corresponding to %s run with tag %s.:%s '\ 565 % (self.run_name, prev_tag, 566 pjoin(self.me_dir,'Events',self.run_name, '%s_pythia_events.hep.gz' % prev_tag))) 567 else: 568 input_file = filenames[0] 569 output_file = pjoin(self.me_dir, 'Events', 'pythia_events.hep') 570 lock = cluster.asyncrone_launch('gunzip',stdout=open(output_file,'w'), 571 argument=['-c', input_file]) 572 else: 573 if tag: 574 self.run_card['run_tag'] = tag 575 self.set_run_name(self.run_name, tag, 'delphes')
576
577 - def check_calculate_xsect(self, args, options):
578 """check the validity of the line. args is ORDER, 579 ORDER being LO or NLO. If no mode is passed, NLO is used""" 580 # modify args in order to be DIR 581 # mode being either standalone or madevent 582 583 if options['force']: 584 self.force = True 585 586 if not args: 587 args.append('NLO') 588 return 589 590 if len(args) > 1: 591 self.help_calculate_xsect() 592 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 593 594 elif len(args) == 1: 595 if not args[0] in ['NLO', 'LO']: 596 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 597 mode = args[0] 598 599 # check for incompatible options/modes 600 if options['multicore'] and options['cluster']: 601 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 602 ' are not compatible. Please choose one.'
603 604
605 - def check_generate_events(self, args, options):
606 """check the validity of the line. args is ORDER, 607 ORDER being LO or NLO. If no mode is passed, NLO is used""" 608 # modify args in order to be DIR 609 # mode being either standalone or madevent 610 611 if not args: 612 args.append('NLO') 613 return 614 615 if len(args) > 1: 616 self.help_generate_events() 617 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 618 619 elif len(args) == 1: 620 if not args[0] in ['NLO', 'LO']: 621 raise self.InvalidCmd, '%s is not a valid mode, please use "LO" or "NLO"' % args[1] 622 mode = args[0] 623 624 # check for incompatible options/modes 625 if options['multicore'] and options['cluster']: 626 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 627 ' are not compatible. Please choose one.'
628
629 - def check_banner_run(self, args):
630 """check the validity of line""" 631 632 if len(args) == 0: 633 self.help_banner_run() 634 raise self.InvalidCmd('banner_run requires at least one argument.') 635 636 tag = [a[6:] for a in args if a.startswith('--tag=')] 637 638 639 if os.path.exists(args[0]): 640 type ='banner' 641 format = self.detect_card_type(args[0]) 642 if format != 'banner': 643 raise self.InvalidCmd('The file is not a valid banner.') 644 elif tag: 645 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 646 (args[0], tag)) 647 if not os.path.exists(args[0]): 648 raise self.InvalidCmd('No banner associates to this name and tag.') 649 else: 650 name = args[0] 651 type = 'run' 652 banners = glob.glob(pjoin(self.me_dir,'Events', args[0], '*_banner.txt')) 653 if not banners: 654 raise self.InvalidCmd('No banner associates to this name.') 655 elif len(banners) == 1: 656 args[0] = banners[0] 657 else: 658 #list the tag and propose those to the user 659 tags = [os.path.basename(p)[len(args[0])+1:-11] for p in banners] 660 tag = self.ask('which tag do you want to use?', tags[0], tags) 661 args[0] = pjoin(self.me_dir,'Events', args[0], '%s_%s_banner.txt' % \ 662 (args[0], tag)) 663 664 run_name = [arg[7:] for arg in args if arg.startswith('--name=')] 665 if run_name: 666 try: 667 self.exec_cmd('remove %s all banner -f' % run_name) 668 except Exception: 669 pass 670 self.set_run_name(args[0], tag=None, level='parton', reload_card=True) 671 elif type == 'banner': 672 self.set_run_name(self.find_available_run_name(self.me_dir)) 673 elif type == 'run': 674 if not self.results[name].is_empty(): 675 run_name = self.find_available_run_name(self.me_dir) 676 logger.info('Run %s is not empty so will use run_name: %s' % \ 677 (name, run_name)) 678 self.set_run_name(run_name) 679 else: 680 try: 681 self.exec_cmd('remove %s all banner -f' % run_name) 682 except Exception: 683 pass 684 self.set_run_name(name)
685 686 687
688 - def check_launch(self, args, options):
689 """check the validity of the line. args is MODE 690 MODE being LO, NLO, aMC@NLO or aMC@LO. If no mode is passed, auto is used""" 691 # modify args in order to be DIR 692 # mode being either standalone or madevent 693 694 if options['force']: 695 self.force = True 696 697 698 if not args: 699 args.append('auto') 700 return 701 702 if len(args) > 1: 703 self.help_launch() 704 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 705 706 elif len(args) == 1: 707 if not args[0] in ['LO', 'NLO', 'aMC@NLO', 'aMC@LO','auto']: 708 raise self.InvalidCmd, '%s is not a valid mode, please use "LO", "NLO", "aMC@NLO" or "aMC@LO"' % args[0] 709 mode = args[0] 710 711 # check for incompatible options/modes 712 if options['multicore'] and options['cluster']: 713 raise self.InvalidCmd, 'options -m (--multicore) and -c (--cluster)' + \ 714 ' are not compatible. Please choose one.' 715 if mode == 'NLO' and options['reweightonly']: 716 raise self.InvalidCmd, 'option -r (--reweightonly) needs mode "aMC@NLO" or "aMC@LO"'
717 718
719 - def check_compile(self, args, options):
720 """check the validity of the line. args is MODE 721 MODE being FO or MC. If no mode is passed, MC is used""" 722 # modify args in order to be DIR 723 # mode being either standalone or madevent 724 725 if options['force']: 726 self.force = True 727 728 if not args: 729 args.append('MC') 730 return 731 732 if len(args) > 1: 733 self.help_compile() 734 raise self.InvalidCmd, 'Invalid Syntax: Too many argument' 735 736 elif len(args) == 1: 737 if not args[0] in ['MC', 'FO']: 738 raise self.InvalidCmd, '%s is not a valid mode, please use "FO" or "MC"' % args[0] 739 mode = args[0]
740 741 # check for incompatible options/modes 742 743 744 #=============================================================================== 745 # CompleteForCmd 746 #===============================================================================
747 -class CompleteForCmd(CheckValidForCmd):
748 """ The Series of help routine for the MadGraphCmd""" 749
750 - def complete_launch(self, text, line, begidx, endidx):
751 """auto-completion for launch command""" 752 753 args = self.split_arg(line[0:begidx]) 754 if len(args) == 1: 755 #return mode 756 return self.list_completion(text,['LO','NLO','aMC@NLO','aMC@LO'],line) 757 elif len(args) == 2 and line[begidx-1] == '@': 758 return self.list_completion(text,['LO','NLO'],line) 759 else: 760 opts = [] 761 for opt in _launch_parser.option_list: 762 opts += opt._long_opts + opt._short_opts 763 return self.list_completion(text, opts, line)
764
765 - def complete_banner_run(self, text, line, begidx, endidx):
766 "Complete the banner run command" 767 try: 768 769 770 args = self.split_arg(line[0:begidx], error=False) 771 772 if args[-1].endswith(os.path.sep): 773 return self.path_completion(text, 774 os.path.join('.',*[a for a in args \ 775 if a.endswith(os.path.sep)])) 776 777 778 if len(args) > 1: 779 # only options are possible 780 tags = glob.glob(pjoin(self.me_dir, 'Events' , args[1],'%s_*_banner.txt' % args[1])) 781 tags = ['%s' % os.path.basename(t)[len(args[1])+1:-11] for t in tags] 782 783 if args[-1] != '--tag=': 784 tags = ['--tag=%s' % t for t in tags] 785 else: 786 return self.list_completion(text, tags) 787 return self.list_completion(text, tags +['--name=','-f'], line) 788 789 # First argument 790 possibilites = {} 791 792 comp = self.path_completion(text, os.path.join('.',*[a for a in args \ 793 if a.endswith(os.path.sep)])) 794 if os.path.sep in line: 795 return comp 796 else: 797 possibilites['Path from ./'] = comp 798 799 run_list = glob.glob(pjoin(self.me_dir, 'Events', '*','*_banner.txt')) 800 run_list = [n.rsplit('/',2)[1] for n in run_list] 801 possibilites['RUN Name'] = self.list_completion(text, run_list) 802 803 return self.deal_multiple_categories(possibilites) 804 805 806 except Exception, error: 807 print error
808 809
810 - def complete_compile(self, text, line, begidx, endidx):
811 """auto-completion for launch command""" 812 813 args = self.split_arg(line[0:begidx]) 814 if len(args) == 1: 815 #return mode 816 return self.list_completion(text,['FO','MC'],line) 817 else: 818 opts = [] 819 for opt in _compile_parser.option_list: 820 opts += opt._long_opts + opt._short_opts 821 return self.list_completion(text, opts, line)
822
823 - def complete_calculate_xsect(self, text, line, begidx, endidx):
824 """auto-completion for launch command""" 825 826 args = self.split_arg(line[0:begidx]) 827 if len(args) == 1: 828 #return mode 829 return self.list_completion(text,['LO','NLO'],line) 830 else: 831 opts = [] 832 for opt in _calculate_xsect_parser.option_list: 833 opts += opt._long_opts + opt._short_opts 834 return self.list_completion(text, opts, line)
835
836 - def complete_generate_events(self, text, line, begidx, endidx):
837 """auto-completion for generate_events command 838 call the compeltion for launch""" 839 self.complete_launch(text, line, begidx, endidx)
840 841
842 - def complete_shower(self, text, line, begidx, endidx):
843 args = self.split_arg(line[0:begidx]) 844 if len(args) == 1: 845 #return valid run_name 846 data = glob.glob(pjoin(self.me_dir, 'Events', '*','events.lhe.gz')) 847 data = [n.rsplit('/',2)[1] for n in data] 848 tmp1 = self.list_completion(text, data) 849 if not self.run_name: 850 return tmp1
851
852 - def complete_plot(self, text, line, begidx, endidx):
853 """ Complete the plot command """ 854 855 args = self.split_arg(line[0:begidx], error=False) 856 857 if len(args) == 1: 858 #return valid run_name 859 data = glob.glob(pjoin(self.me_dir, 'Events', '*','events.lhe*')) 860 data = [n.rsplit('/',2)[1] for n in data] 861 tmp1 = self.list_completion(text, data) 862 if not self.run_name: 863 return tmp1 864 865 if len(args) > 1: 866 return self.list_completion(text, self._plot_mode)
867
868 - def complete_pgs(self,text, line, begidx, endidx):
869 "Complete the pgs command" 870 args = self.split_arg(line[0:begidx], error=False) 871 if len(args) == 1: 872 #return valid run_name 873 data = glob.glob(pjoin(self.me_dir, 'Events', '*', 'events_*.hep.gz')) 874 data = [n.rsplit('/',2)[1] for n in data] 875 tmp1 = self.list_completion(text, data) 876 if not self.run_name: 877 return tmp1 878 else: 879 tmp2 = self.list_completion(text, self._run_options + ['-f', 880 '--tag=' ,'--no_default'], line) 881 return tmp1 + tmp2 882 else: 883 return self.list_completion(text, self._run_options + ['-f', 884 '--tag=','--no_default'], line)
885 886 complete_delphes = complete_pgs
887
888 -class aMCatNLOAlreadyRunning(InvalidCmd):
889 pass
890 891 #=============================================================================== 892 # aMCatNLOCmd 893 #===============================================================================
894 -class aMCatNLOCmd(CmdExtended, HelpToCmd, CompleteForCmd, common_run.CommonRunCmd):
895 """The command line processor of MadGraph""" 896 897 # Truth values 898 true = ['T','.true.',True,'true'] 899 # Options and formats available 900 _run_options = ['--cluster','--multicore','--nb_core=','--nb_core=2', '-c', '-m'] 901 _generate_options = ['-f', '--laststep=parton', '--laststep=pythia', '--laststep=pgs', '--laststep=delphes'] 902 _calculate_decay_options = ['-f', '--accuracy=0.'] 903 _set_options = ['stdout_level','fortran_compiler','cpp_compiler','timeout'] 904 _plot_mode = ['all', 'parton','shower','pgs','delphes'] 905 _clean_mode = _plot_mode + ['channel', 'banner'] 906 _display_opts = ['run_name', 'options', 'variable'] 907 # survey options, dict from name to type, default value, and help text 908 # Variables to store object information 909 web = False 910 cluster_mode = 0 911 queue = 'madgraph' 912 nb_core = None 913 914 next_possibility = { 915 'start': ['generate_events [OPTIONS]', 'calculate_crossx [OPTIONS]', 'launch [OPTIONS]', 916 'help generate_events'], 917 'generate_events': ['generate_events [OPTIONS]', 'shower'], 918 'launch': ['launch [OPTIONS]', 'shower'], 919 'shower' : ['generate_events [OPTIONS]'] 920 } 921 922 923 ############################################################################
924 - def __init__(self, me_dir = None, options = {}, *completekey, **stdin):
925 """ add information to the cmd """ 926 927 self.start_time = 0 928 CmdExtended.__init__(self, me_dir, options, *completekey, **stdin) 929 #common_run.CommonRunCmd.__init__(self, me_dir, options) 930 931 self.mode = 'aMCatNLO' 932 self.nb_core = 0 933 self.prompt = "%s>"%os.path.basename(pjoin(self.me_dir)) 934 935 # load the current status of the directory 936 if os.path.exists(pjoin(self.me_dir,'HTML','results.pkl')): 937 self.results = save_load_object.load_from_file(pjoin(self.me_dir,'HTML','results.pkl')) 938 self.results.resetall(self.me_dir) 939 self.last_mode = self.results[self.results.lastrun][-1]['run_mode'] 940 else: 941 model = self.find_model_name() 942 process = self.process # define in find_model_name 943 self.results = gen_crossxhtml.AllResultsNLO(model, process, self.me_dir) 944 self.last_mode = '' 945 self.results.def_web_mode(self.web) 946 # check that compiler is gfortran 4.6 or later if virtuals have been exported 947 proc_card = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read() 948 949 if not '[real=QCD]' in proc_card: 950 check_compiler(self.options, block=True)
951 952 953 ############################################################################
954 - def do_shower(self, line):
955 """ run the shower on a given parton level file """ 956 argss = self.split_arg(line) 957 (options, argss) = _launch_parser.parse_args(argss) 958 # check argument validity and normalise argument 959 options = options.__dict__ 960 options['reweightonly'] = False 961 self.check_shower(argss, options) 962 evt_file = pjoin(os.getcwd(), argss[0], 'events.lhe') 963 self.ask_run_configuration('onlyshower', options) 964 self.run_mcatnlo(evt_file) 965 966 self.update_status('', level='all', update_results=True)
967 968 ################################################################################
969 - def do_plot(self, line):
970 """Create the plot for a given run""" 971 972 # Since in principle, all plot are already done automaticaly 973 args = self.split_arg(line) 974 # Check argument's validity 975 self.check_plot(args) 976 logger.info('plot for run %s' % self.run_name) 977 978 if not self.force: 979 self.ask_edit_cards([], args, plot=True) 980 981 if any([arg in ['parton'] for arg in args]): 982 filename = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe') 983 if os.path.exists(filename+'.gz'): 984 misc.gunzip(filename) 985 if os.path.exists(filename): 986 logger.info('Found events.lhe file for run %s' % self.run_name) 987 shutil.move(filename, pjoin(self.me_dir, 'Events', 'unweighted_events.lhe')) 988 self.create_plot('parton') 989 shutil.move(pjoin(self.me_dir, 'Events', 'unweighted_events.lhe'), filename) 990 misc.gzip(filename) 991 992 if any([arg in ['all','parton'] for arg in args]): 993 filename = pjoin(self.me_dir, 'Events', self.run_name, 'MADatNLO.top') 994 if os.path.exists(filename): 995 logger.info('Found MADatNLO.top file for run %s' % \ 996 self.run_name) 997 output = pjoin(self.me_dir, 'HTML',self.run_name, 'plots_parton.html') 998 plot_dir = pjoin(self.me_dir, 'HTML', self.run_name, 'plots_parton') 999 1000 if not os.path.isdir(plot_dir): 1001 os.makedirs(plot_dir) 1002 top_file = pjoin(plot_dir, 'plots.top') 1003 files.cp(filename, top_file) 1004 madir = self.options['madanalysis_path'] 1005 tag = self.run_card['run_tag'] 1006 td = self.options['td_path'] 1007 misc.call(['%s/plot' % self.dirbin, madir, td], 1008 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1009 stderr = subprocess.STDOUT, 1010 cwd=plot_dir) 1011 1012 misc.call(['%s/plot_page-pl' % self.dirbin, 1013 os.path.basename(plot_dir), 1014 'parton'], 1015 stdout = open(pjoin(plot_dir, 'plot.log'),'a'), 1016 stderr = subprocess.STDOUT, 1017 cwd=pjoin(self.me_dir, 'HTML', self.run_name)) 1018 shutil.move(pjoin(self.me_dir, 'HTML',self.run_name ,'plots.html'), 1019 output) 1020 1021 os.remove(pjoin(self.me_dir, 'Events', 'plots.top')) 1022 1023 if any([arg in ['all','shower'] for arg in args]): 1024 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 1025 'events_*.lhe.gz')) 1026 if len(filenames) != 1: 1027 filenames = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 1028 'events_*.hep.gz')) 1029 if len(filenames) != 1: 1030 logger.info('No shower level file found for run %s' % \ 1031 self.run_name) 1032 return 1033 filename = filenames[0] 1034 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1035 1036 if not os.path.exists(pjoin(self.me_dir, 'Cards', 'pythia_card.dat')): 1037 if aMCatNLO and not self.options['mg5_path']: 1038 raise "plotting NLO HEP file needs MG5 utilities" 1039 1040 files.cp(pjoin(self.options['mg5_path'], 'Template','LO', 'Cards', 'pythia_card_default.dat'), 1041 pjoin(self.me_dir, 'Cards', 'pythia_card.dat')) 1042 self.run_hep2lhe() 1043 else: 1044 filename = filenames[0] 1045 misc.gunzip(filename, keep=True, stdout=pjoin(self.me_dir, 'Events','pythia_events.hep')) 1046 1047 self.create_plot('shower') 1048 lhe_file_name = filename.replace('.hep.gz', '.lhe') 1049 shutil.move(pjoin(self.me_dir, 'Events','pythia_events.lhe'), 1050 lhe_file_name) 1051 misc.gzip(lhe_file_name) 1052 1053 if any([arg in ['all','pgs'] for arg in args]): 1054 filename = pjoin(self.me_dir, 'Events', self.run_name, 1055 '%s_pgs_events.lhco' % self.run_tag) 1056 if os.path.exists(filename+'.gz'): 1057 misc.gunzip(filename) 1058 if os.path.exists(filename): 1059 self.create_plot('PGS') 1060 misc.gzip(filename) 1061 else: 1062 logger.info('No valid files for pgs plot') 1063 1064 if any([arg in ['all','delphes'] for arg in args]): 1065 filename = pjoin(self.me_dir, 'Events', self.run_name, 1066 '%s_delphes_events.lhco' % self.run_tag) 1067 if os.path.exists(filename+'.gz'): 1068 misc.gunzip(filename) 1069 if os.path.exists(filename): 1070 #shutil.move(filename, pjoin(self.me_dir, 'Events','delphes_events.lhco')) 1071 self.create_plot('Delphes') 1072 #shutil.move(pjoin(self.me_dir, 'Events','delphes_events.lhco'), filename) 1073 misc.gzip(filename) 1074 else: 1075 logger.info('No valid files for delphes plot')
1076 1077 1078 ############################################################################
1079 - def do_calculate_xsect(self, line):
1080 """Main commands: calculates LO/NLO cross-section, using madevent_mintFO 1081 this function wraps the do_launch one""" 1082 1083 self.start_time = time.time() 1084 argss = self.split_arg(line) 1085 # check argument validity and normalise argument 1086 (options, argss) = _calculate_xsect_parser.parse_args(argss) 1087 options = options.__dict__ 1088 options['reweightonly'] = False 1089 options['parton'] = True 1090 self.check_calculate_xsect(argss, options) 1091 self.do_launch(line, options, argss)
1092 1093 ############################################################################
1094 - def do_banner_run(self, line):
1095 """Make a run from the banner file""" 1096 1097 args = self.split_arg(line) 1098 #check the validity of the arguments 1099 self.check_banner_run(args) 1100 1101 # Remove previous cards 1102 for name in ['shower_card.dat', 'madspin_card.dat']: 1103 try: 1104 os.remove(pjoin(self.me_dir, 'Cards', name)) 1105 except Exception: 1106 pass 1107 1108 banner_mod.split_banner(args[0], self.me_dir, proc_card=False) 1109 1110 # Check if we want to modify the run 1111 if not self.force: 1112 ans = self.ask('Do you want to modify the Cards/Run Type?', 'n', ['y','n']) 1113 if ans == 'n': 1114 self.force = True 1115 1116 # Compute run mode: 1117 if self.force: 1118 mode_status = {'order': 'NLO', 'fixed_order': False, 'madspin':False, 'shower':True} 1119 banner = banner_mod.Banner(args[0]) 1120 for line in banner['run_settings']: 1121 if '=' in line: 1122 mode, value = [t.strip() for t in line.split('=')] 1123 mode_status[mode] = value 1124 else: 1125 mode_status = {} 1126 1127 # Call Generate events 1128 self.do_launch('-n %s %s' % (self.run_name, '-f' if self.force else ''), 1129 switch=mode_status)
1130 1131 ############################################################################
1132 - def do_generate_events(self, line):
1133 """Main commands: generate events 1134 this function just wraps the do_launch one""" 1135 self.do_launch(line)
1136 1137 1138 ############################################################################
1139 - def do_treatcards(self, line, amcatnlo=True):
1140 """Advanced commands: this is for creating the correct run_card.inc from the nlo format""" 1141 #check if no 'Auto' are present in the file 1142 self.check_param_card(pjoin(self.me_dir, 'Cards','param_card.dat')) 1143 return super(aMCatNLOCmd,self).do_treatcards(line, amcatnlo)
1144 1145 ############################################################################
1146 - def set_configuration(self, amcatnlo=True, **opt):
1147 """assign all configuration variable from file 1148 loop over the different config file if config_file not define """ 1149 return super(aMCatNLOCmd,self).set_configuration(amcatnlo=amcatnlo, **opt)
1150 1151 ############################################################################
1152 - def do_launch(self, line, options={}, argss=[], switch={}):
1153 """Main commands: launch the full chain 1154 options and args are relevant if the function is called from other 1155 functions, such as generate_events or calculate_xsect 1156 mode gives the list of switch needed for the computation (usefull for banner_run) 1157 """ 1158 1159 if not argss and not options: 1160 self.start_time = time.time() 1161 argss = self.split_arg(line) 1162 # check argument validity and normalise argument 1163 (options, argss) = _launch_parser.parse_args(argss) 1164 options = options.__dict__ 1165 self.check_launch(argss, options) 1166 1167 if 'run_name' in options.keys() and options['run_name']: 1168 self.run_name = options['run_name'] 1169 # if a dir with the given run_name already exists 1170 # remove it and warn the user 1171 if os.path.isdir(pjoin(self.me_dir, 'Events', self.run_name)): 1172 logger.warning('Removing old run information in \n'+ 1173 pjoin(self.me_dir, 'Events', self.run_name)) 1174 files.rm(pjoin(self.me_dir, 'Events', self.run_name)) 1175 self.results.delete_run(self.run_name) 1176 else: 1177 self.run_name = '' # will be set later 1178 1179 if options['multicore']: 1180 self.cluster_mode = 2 1181 elif options['cluster']: 1182 self.cluster_mode = 1 1183 1184 if not switch: 1185 mode = argss[0] 1186 if mode in ['LO', 'NLO']: 1187 options['parton'] = True 1188 mode = self.ask_run_configuration(mode, options) 1189 else: 1190 mode = self.ask_run_configuration('auto', options, switch) 1191 1192 self.results.add_detail('run_mode', mode) 1193 1194 self.update_status('Starting run', level=None, update_results=True) 1195 1196 if self.options['automatic_html_opening']: 1197 misc.open_file(os.path.join(self.me_dir, 'crossx.html')) 1198 self.options['automatic_html_opening'] = False 1199 1200 if '+' in mode: 1201 mode = mode.split('+')[0] 1202 self.compile(mode, options) 1203 evt_file = self.run(mode, options) 1204 1205 if int(self.run_card['nevents']) == 0 and not mode in ['LO', 'NLO']: 1206 logger.info('No event file generated: grids have been set-up with a '\ 1207 'relative precision of %s' % self.run_card['req_acc']) 1208 return 1209 1210 if not mode in ['LO', 'NLO']: 1211 assert evt_file == pjoin(self.me_dir,'Events', self.run_name, 'events.lhe'), '%s != %s' %(evt_file, pjoin(self.me_dir,'Events', self.run_name, 'events.lhe.gz')) 1212 self.exec_cmd('reweight -from_cards', postcmd=False) 1213 self.exec_cmd('decay_events -from_cards', postcmd=False) 1214 evt_file = pjoin(self.me_dir,'Events', self.run_name, 'events.lhe') 1215 1216 if not mode in ['LO', 'NLO', 'noshower', 'noshowerLO'] \ 1217 and not options['parton']: 1218 self.run_mcatnlo(evt_file) 1219 elif mode == 'noshower': 1220 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 1221 Please, shower the Les Houches events before using them for physics analyses.""") 1222 1223 1224 self.update_status('', level='all', update_results=True) 1225 if int(self.run_card['ickkw']) == 3 and mode in ['noshower', 'aMC@NLO']: 1226 logger.warning("""You are running with FxFx merging enabled. 1227 To be able to merge samples of various multiplicities without double counting, 1228 you have to remove some events after showering 'by hand'. 1229 Please read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""")
1230 1231 1232 1233 ############################################################################
1234 - def do_compile(self, line):
1235 """Advanced commands: just compile the executables """ 1236 argss = self.split_arg(line) 1237 # check argument validity and normalise argument 1238 (options, argss) = _compile_parser.parse_args(argss) 1239 options = options.__dict__ 1240 options['reweightonly'] = False 1241 options['nocompile'] = False 1242 self.check_compile(argss, options) 1243 1244 mode = {'FO': 'NLO', 'MC': 'aMC@NLO'}[argss[0]] 1245 self.ask_run_configuration(mode, options) 1246 self.compile(mode, options) 1247 1248 1249 self.update_status('', level='all', update_results=True)
1250
1251 - def print_results_in_shell(self, data):
1252 """Have a nice results prints in the shell, 1253 data should be of type: gen_crossxhtml.OneTagResults""" 1254 if not data: 1255 return 1256 logger.info(" === Results Summary for run: %s tag: %s ===\n" % (data['run_name'],data['tag'])) 1257 if self.ninitial == 1: 1258 logger.info(" Width : %.4g +- %.4g GeV" % (data['cross'], data['error'])) 1259 else: 1260 logger.info(" Cross-section : %.4g +- %.4g pb" % (data['cross'], data['error'])) 1261 logger.info(" Nb of events : %s" % data['nb_event'] ) 1262 #if data['cross_pythia'] and data['nb_event_pythia']: 1263 # if self.ninitial == 1: 1264 # logger.info(" Matched Width : %.4g +- %.4g GeV" % (data['cross_pythia'], data['error_pythia'])) 1265 # else: 1266 # logger.info(" Matched Cross-section : %.4g +- %.4g pb" % (data['cross_pythia'], data['error_pythia'])) 1267 # logger.info(" Nb of events after Matching : %s" % data['nb_event_pythia']) 1268 # if self.run_card['use_syst'] in self.true: 1269 # logger.info(" Be carefull that matched information are here NOT for the central value. Refer to SysCalc output for it") 1270 logger.info(" " )
1271
1272 - def print_results_in_file(self, data, path, mode='w'):
1273 """Have a nice results prints in the shell, 1274 data should be of type: gen_crossxhtml.OneTagResults""" 1275 if not data: 1276 return 1277 1278 fsock = open(path, mode) 1279 1280 fsock.write(" === Results Summary for run: %s tag: %s process: %s ===\n" % \ 1281 (data['run_name'],data['tag'], os.path.basename(self.me_dir))) 1282 1283 if self.ninitial == 1: 1284 fsock.write(" Width : %.4g +- %.4g GeV\n" % (data['cross'], data['error'])) 1285 else: 1286 fsock.write(" Cross-section : %.4g +- %.4g pb\n" % (data['cross'], data['error'])) 1287 fsock.write(" Nb of events : %s\n" % data['nb_event'] ) 1288 #if data['cross_pythia'] and data['nb_event_pythia']: 1289 # if self.ninitial == 1: 1290 # fsock.write(" Matched Width : %.4g +- %.4g GeV\n" % (data['cross_pythia'], data['error_pythia'])) 1291 # else: 1292 # fsock.write(" Matched Cross-section : %.4g +- %.4g pb\n" % (data['cross_pythia'], data['error_pythia'])) 1293 # fsock.write(" Nb of events after Matching : %s\n" % data['nb_event_pythia']) 1294 fsock.write(" \n" )
1295 1296 1297 1298 1299
1300 - def update_random_seed(self):
1301 """Update random number seed with the value from the run_card. 1302 If this is 0, update the number according to a fresh one""" 1303 iseed = int(self.run_card['iseed']) 1304 if iseed == 0: 1305 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit')) 1306 iseed = int(randinit.read()[2:]) + 1 1307 randinit.close() 1308 randinit = open(pjoin(self.me_dir, 'SubProcesses', 'randinit'), 'w') 1309 randinit.write('r=%d' % iseed) 1310 randinit.close()
1311 1312
1313 - def run(self, mode, options):
1314 """runs aMC@NLO. Returns the name of the event file created""" 1315 logger.info('Starting run') 1316 1317 if not 'only_generation' in options.keys(): 1318 options['only_generation'] = False 1319 1320 if mode in ['LO', 'NLO'] and self.run_card['iappl'] == 2 and not options['only_generation']: 1321 options['only_generation'] = True 1322 self.get_characteristics(pjoin(self.me_dir, 'SubProcesses', 'proc_characteristics')) 1323 1324 if self.cluster_mode == 1: 1325 cluster_name = self.options['cluster_type'] 1326 self.cluster = cluster.from_name[cluster_name](**self.options) 1327 if self.cluster_mode == 2: 1328 try: 1329 import multiprocessing 1330 if not self.nb_core: 1331 try: 1332 self.nb_core = int(self.options['nb_core']) 1333 except TypeError: 1334 self.nb_core = multiprocessing.cpu_count() 1335 logger.info('Using %d cores' % self.nb_core) 1336 except ImportError: 1337 self.nb_core = 1 1338 logger.warning('Impossible to detect the number of cores => Using One.\n'+ 1339 'Use set nb_core X in order to set this number and be able to'+ 1340 'run in multicore.') 1341 1342 self.cluster = cluster.MultiCore(**self.options) 1343 self.update_random_seed() 1344 #find and keep track of all the jobs 1345 folder_names = {'LO': ['born_G*'], 'NLO': ['all_G*'], 1346 'aMC@LO': ['GB*'], 'aMC@NLO': ['GF*']} 1347 folder_names['noshower'] = folder_names['aMC@NLO'] 1348 folder_names['noshowerLO'] = folder_names['aMC@LO'] 1349 job_dict = {} 1350 p_dirs = [d for d in \ 1351 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 1352 #find jobs and clean previous results 1353 if not options['only_generation'] and not options['reweightonly']: 1354 self.update_status('Cleaning previous results', level=None) 1355 for dir in p_dirs: 1356 job_dict[dir] = [file for file in \ 1357 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 1358 if file.startswith('ajob')] 1359 #find old folders to be removed 1360 for obj in folder_names[mode]: 1361 to_rm = [file for file in \ 1362 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 1363 if file.startswith(obj[:-1]) and \ 1364 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 1365 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 1366 #always clean dirs for the splitted event generation 1367 # do not include the born_G/ grid_G which should be kept when 1368 # doing a f.o. run keeping old grids 1369 to_always_rm = [file for file in \ 1370 os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)) \ 1371 if file.startswith(obj[:-1]) and 1372 '_' in file and not '_G' in file and \ 1373 (os.path.isdir(pjoin(self.me_dir, 'SubProcesses', dir, file)) or \ 1374 os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file)))] 1375 1376 if not options['only_generation'] and not options['reweightonly']: 1377 to_always_rm.extend(to_rm) 1378 if os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')): 1379 to_always_rm.append(pjoin(self.me_dir, 'SubProcesses', dir,'MadLoop5_resources.tar.gz')) 1380 files.rm([pjoin(self.me_dir, 'SubProcesses', dir, d) for d in to_always_rm]) 1381 1382 mcatnlo_status = ['Setting up grid', 'Computing upper envelope', 'Generating events'] 1383 1384 if self.run_card['iappl'] == 2: 1385 self.applgrid_distribute(options,mode,p_dirs) 1386 1387 if options['reweightonly']: 1388 event_norm=self.run_card['event_norm'] 1389 nevents=int(self.run_card['nevents']) 1390 return self.reweight_and_collect_events(options, mode, nevents, event_norm) 1391 1392 devnull = os.open(os.devnull, os.O_RDWR) 1393 if mode in ['LO', 'NLO']: 1394 # this is for fixed order runs 1395 mode_dict = {'NLO': 'all', 'LO': 'born'} 1396 logger.info('Doing fixed order %s' % mode) 1397 req_acc = self.run_card['req_acc_FO'] 1398 if not options['only_generation'] and req_acc != -1: 1399 self.write_madin_file(pjoin(self.me_dir, 'SubProcesses'), mode_dict[mode], 0, '-1', '6','0.10') 1400 self.update_status('Setting up grids', level=None) 1401 self.run_all(job_dict, [['0', mode_dict[mode], '0']], 'Setting up grids') 1402 elif not options['only_generation']: 1403 npoints = self.run_card['npoints_FO_grid'] 1404 niters = self.run_card['niters_FO_grid'] 1405 self.write_madin_file(pjoin(self.me_dir, 'SubProcesses'), mode_dict[mode], 0, npoints, niters) 1406 self.update_status('Setting up grids', level=None) 1407 self.run_all(job_dict, [['0', mode_dict[mode], '0']], 'Setting up grids') 1408 1409 npoints = self.run_card['npoints_FO'] 1410 niters = self.run_card['niters_FO'] 1411 self.write_madin_file(pjoin(self.me_dir, 'SubProcesses'), mode_dict[mode], -1, npoints, niters) 1412 # collect the results and logs 1413 self.collect_log_files(folder_names[mode], 0) 1414 p = misc.Popen(['./combine_results_FO.sh', str(req_acc), '%s_G*' % mode_dict[mode]], \ 1415 stdout=subprocess.PIPE, \ 1416 cwd=pjoin(self.me_dir, 'SubProcesses')) 1417 output = p.communicate() 1418 1419 self.cross_sect_dict = self.read_results(output, mode) 1420 self.print_summary(options, 0, mode) 1421 cross, error = sum_html.make_all_html_results(self, ['%s*' % mode_dict[mode]]) 1422 self.results.add_detail('cross', cross) 1423 self.results.add_detail('error', error) 1424 1425 self.update_status('Computing cross-section', level=None) 1426 self.run_all(job_dict, [['0', mode_dict[mode], '0', mode_dict[mode]]], 'Computing cross-section') 1427 1428 # collect the results and logs 1429 self.collect_log_files(folder_names[mode], 1) 1430 p = misc.Popen(['./combine_results_FO.sh', '-1'] + folder_names[mode], \ 1431 stdout=subprocess.PIPE, 1432 cwd=pjoin(self.me_dir, 'SubProcesses')) 1433 output = p.communicate() 1434 self.cross_sect_dict = self.read_results(output, mode) 1435 1436 # collect the scale and PDF uncertainties 1437 scale_pdf_info={} 1438 if self.run_card['reweight_scale'] or self.run_card['reweight_PDF']: 1439 data_files=[] 1440 for dir in p_dirs: 1441 for obj in folder_names[mode]: 1442 for file in os.listdir(pjoin(self.me_dir, 'SubProcesses', dir)): 1443 if file.startswith(obj[:-1]) and \ 1444 (os.path.exists(pjoin(self.me_dir, 'SubProcesses', dir, file,'scale_pdf_dependence.dat'))): 1445 data_files.append(pjoin(dir,file,'scale_pdf_dependence.dat')) 1446 scale_pdf_info = self.pdf_scale_from_reweighting(data_files) 1447 # print the results: 1448 self.print_summary(options, 1, mode, scale_pdf_info) 1449 1450 files.cp(pjoin(self.me_dir, 'SubProcesses', 'res.txt'), 1451 pjoin(self.me_dir, 'Events', self.run_name)) 1452 1453 if self.analyse_card['fo_analysis_format'].lower() == 'topdrawer': 1454 misc.call(['./combine_plots_FO.sh'] + folder_names[mode], \ 1455 stdout=devnull, 1456 cwd=pjoin(self.me_dir, 'SubProcesses')) 1457 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.top'), 1458 pjoin(self.me_dir, 'Events', self.run_name)) 1459 logger.info('The results of this run and the TopDrawer file with the plots' + \ 1460 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1461 elif self.analyse_card['fo_analysis_format'].lower() == 'hwu': 1462 self.combine_plots_HwU(folder_names[mode]) 1463 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.HwU'), 1464 pjoin(self.me_dir, 'Events', self.run_name)) 1465 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.gnuplot'), 1466 pjoin(self.me_dir, 'Events', self.run_name)) 1467 try: 1468 misc.call(['gnuplot','MADatNLO.gnuplot'],\ 1469 stdout=os.open(os.devnull, os.O_RDWR),\ 1470 stderr=os.open(os.devnull, os.O_RDWR),\ 1471 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 1472 except Exception: 1473 pass 1474 1475 1476 logger.info('The results of this run and the HwU and GnuPlot files with the plots' + \ 1477 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1478 elif self.analyse_card['fo_analysis_format'].lower() == 'root': 1479 misc.call(['./combine_root.sh'] + folder_names[mode], \ 1480 stdout=devnull, 1481 cwd=pjoin(self.me_dir, 'SubProcesses')) 1482 files.cp(pjoin(self.me_dir, 'SubProcesses', 'MADatNLO.root'), 1483 pjoin(self.me_dir, 'Events', self.run_name)) 1484 logger.info('The results of this run and the ROOT file with the plots' + \ 1485 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1486 else: 1487 logger.info('The results of this run' + \ 1488 ' have been saved in %s' % pjoin(self.me_dir, 'Events', self.run_name)) 1489 1490 cross, error = sum_html.make_all_html_results(self, folder_names[mode]) 1491 self.results.add_detail('cross', cross) 1492 self.results.add_detail('error', error) 1493 if self.run_card['iappl'] != 0: 1494 self.applgrid_combine(cross,error) 1495 self.update_status('Run complete', level='parton', update_results=True) 1496 1497 return 1498 1499 elif mode in ['aMC@NLO','aMC@LO','noshower','noshowerLO']: 1500 shower = self.run_card['parton_shower'].upper() 1501 nevents = int(self.run_card['nevents']) 1502 req_acc = self.run_card['req_acc'] 1503 if nevents == 0 and float(req_acc) < 0 : 1504 raise aMCatNLOError('Cannot determine the required accuracy from the number '\ 1505 'of events, because 0 events requested. Please set '\ 1506 'the "req_acc" parameter in the run_card to a value between 0 and 1') 1507 elif float(req_acc) >1 or float(req_acc) == 0 : 1508 raise aMCatNLOError('Required accuracy ("req_acc" in the run_card) should '\ 1509 'be between larger than 0 and smaller than 1, '\ 1510 'or set to -1 for automatic determination. Current value is %s' % req_acc) 1511 # For more than 1M events, set req_acc to 0.001 (except when it was explicitly set in the run_card) 1512 elif float(req_acc) < 0 and nevents > 1000000 : 1513 req_acc='0.001' 1514 1515 shower_list = ['HERWIG6', 'HERWIGPP', 'PYTHIA6Q', 'PYTHIA6PT', 'PYTHIA8'] 1516 1517 if not shower in shower_list: 1518 raise aMCatNLOError('%s is not a valid parton shower. Please use one of the following: %s' \ 1519 % (shower, ', '.join(shower_list))) 1520 1521 # check that PYTHIA6PT is not used for processes with FSR 1522 if shower == 'PYTHIA6PT' and self.proc_characteristics['has_fsr']: 1523 raise aMCatNLOError('PYTHIA6PT does not support processes with FSR') 1524 1525 if mode in ['aMC@NLO', 'aMC@LO']: 1526 logger.info('Doing %s matched to parton shower' % mode[4:]) 1527 elif mode in ['noshower','noshowerLO']: 1528 logger.info('Generating events without running the shower.') 1529 elif options['only_generation']: 1530 logger.info('Generating events starting from existing results') 1531 1532 1533 for i, status in enumerate(mcatnlo_status): 1534 #check if need to split jobs 1535 # at least one channel must have enough events 1536 try: 1537 nevents_unweighted = open(pjoin(self.me_dir, 1538 'SubProcesses', 1539 'nevents_unweighted')).read().split('\n') 1540 except IOError: 1541 nevents_unweighted = [] 1542 1543 split = i == 2 and \ 1544 int(self.run_card['nevt_job']) > 0 1545 1546 if i == 2 or not options['only_generation']: 1547 # if the number of events requested is zero, 1548 # skip mint step 2 1549 if i==2 and nevents==0: 1550 self.print_summary(options, 2,mode) 1551 return 1552 1553 if split: 1554 # split the event generation 1555 misc.call([pjoin(self.me_dir, 'bin', 'internal', 'split_jobs.py')] + \ 1556 [str(self.run_card['nevt_job'])], 1557 stdout = devnull, 1558 cwd = pjoin(self.me_dir, 'SubProcesses')) 1559 assert os.path.exists(pjoin(self.me_dir, 'SubProcesses', 1560 'nevents_unweighted_splitted')) 1561 1562 self.update_status(status, level='parton') 1563 if mode in ['aMC@NLO', 'noshower']: 1564 self.write_madinMMC_file(pjoin(self.me_dir, 'SubProcesses'), 'all', i) 1565 self.run_all(job_dict, [['2', 'F', '%d' % i]], status, split_jobs = split) 1566 1567 elif mode in ['aMC@LO', 'noshowerLO']: 1568 self.write_madinMMC_file( 1569 pjoin(self.me_dir, 'SubProcesses'), 'born', i) 1570 self.run_all(job_dict, 1571 [['2', 'B', '%d' % i]], 1572 '%s at LO' % status, split_jobs = split) 1573 1574 if (i < 2 and not options['only_generation']) or i == 1 : 1575 # collect the results and logs 1576 self.collect_log_files(folder_names[mode], i) 1577 p = misc.Popen(['./combine_results.sh'] + \ 1578 ['%d' % i,'%d' % nevents, '%s' % req_acc ] + \ 1579 folder_names[mode], 1580 stdout=subprocess.PIPE, 1581 cwd = pjoin(self.me_dir, 'SubProcesses')) 1582 output = p.communicate() 1583 files.cp(pjoin(self.me_dir, 'SubProcesses', 'res_%d.txt' % i), \ 1584 pjoin(self.me_dir, 'Events', self.run_name)) 1585 1586 self.cross_sect_dict = self.read_results(output, mode) 1587 self.print_summary(options, i, mode) 1588 1589 cross, error = sum_html.make_all_html_results(self, folder_names[mode]) 1590 self.results.add_detail('cross', cross) 1591 self.results.add_detail('error', error) 1592 1593 #check that split jobs are all correctly terminated 1594 if split: 1595 self.check_event_files() 1596 1597 if self.cluster_mode == 1: 1598 #if cluster run, wait 15 sec so that event files are transferred back 1599 self.update_status( 1600 'Waiting while files are transferred back from the cluster nodes', 1601 level='parton') 1602 time.sleep(10) 1603 if split: 1604 files.cp(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted'), \ 1605 pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted')) 1606 1607 1608 event_norm=self.run_card['event_norm'] 1609 self.collect_log_files(folder_names[mode], 2) 1610 return self.reweight_and_collect_events(options, mode, nevents, event_norm)
1611
1612 - def combine_plots_HwU(self,folder_names):
1613 """Sums all the plots in the HwU format.""" 1614 1615 logger.debug('Combining HwU plots.') 1616 1617 with open(pjoin(self.me_dir,'SubProcesses','dirs.txt')) as dirf: 1618 all_histo_paths = dirf.readlines() 1619 all_histo_paths = [pjoin(self.me_dir,'SubProcesses', 1620 path.rstrip(),"MADatNLO.HwU") for path in all_histo_paths] 1621 1622 histogram_list = histograms.HwUList(all_histo_paths[0]) 1623 1624 for histo_path in all_histo_paths[1:]: 1625 for i, histo in enumerate(histograms.HwUList(histo_path)): 1626 # First make sure the plots have the same weight labels and such 1627 histo.test_plot_compability(histogram_list[i]) 1628 # Now let the histogram module do the magic and add them. 1629 histogram_list[i] += histo 1630 1631 # And now output the finalized list 1632 histogram_list.output(pjoin(self.me_dir,'SubProcesses',"MADatNLO"), 1633 format = 'gnuplot')
1634
1635 - def applgrid_combine(self,cross,error):
1636 """Combines the APPLgrids in all the SubProcess/P*/all_G*/ directories""" 1637 logger.debug('Combining APPLgrids \n') 1638 applcomb=pjoin(self.options['applgrid'].rstrip('applgrid-config'), 1639 'applgrid-combine') 1640 with open(pjoin(self.me_dir,'SubProcesses','dirs.txt')) as dirf: 1641 all_jobs=dirf.readlines() 1642 ngrids=len(all_jobs) 1643 nobs =len([name for name in os.listdir(pjoin(self.me_dir,'SubProcesses', 1644 all_jobs[0].rstrip())) if name.endswith("_out.root")]) 1645 for obs in range(0,nobs): 1646 gdir = [pjoin(self.me_dir,'SubProcesses',job.rstrip(),"grid_obs_"+ 1647 str(obs)+"_out.root") for job in all_jobs] 1648 # combine APPLgrids from different channels for observable 'obs' 1649 if self.run_card["iappl"] == 1: 1650 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events",self.run_name, 1651 "aMCfast_obs_"+str(obs)+"_starting_grid.root"), '--optimise']+ gdir) 1652 elif self.run_card["iappl"] == 2: 1653 unc2_inv=pow(cross/error,2) 1654 unc2_inv_ngrids=pow(cross/error,2)*ngrids 1655 misc.call([applcomb,'-o', pjoin(self.me_dir,"Events", 1656 self.run_name,"aMCfast_obs_"+str(obs)+".root"),'-s', 1657 str(unc2_inv),'--weight',str(unc2_inv)]+ gdir) 1658 for job in all_jobs: 1659 os.remove(pjoin(self.me_dir,'SubProcesses',job.rstrip(), 1660 "grid_obs_"+str(obs)+"_in.root")) 1661 else: 1662 raise aMCatNLOError('iappl parameter can only be 0, 1 or 2') 1663 # after combining, delete the original grids 1664 for ggdir in gdir: 1665 os.remove(ggdir)
1666 1667
1668 - def applgrid_distribute(self,options,mode,p_dirs):
1669 """Distributes the APPLgrids ready to be filled by a second run of the code""" 1670 # if no appl_start_grid argument given, guess it from the time stamps 1671 # of the starting grid files 1672 if not('appl_start_grid' in options.keys() and options['appl_start_grid']): 1673 gfiles=glob.glob(pjoin(self.me_dir, 'Events','*', 1674 'aMCfast_obs_0_starting_grid.root')) 1675 time_stamps={} 1676 for root_file in gfiles: 1677 time_stamps[root_file]=os.path.getmtime(root_file) 1678 options['appl_start_grid']= \ 1679 max(time_stamps.iterkeys(), key=(lambda key: 1680 time_stamps[key])).split('/')[-2] 1681 logger.info('No --appl_start_grid option given. '+\ 1682 'Guessing that start grid from run "%s" should be used.' \ 1683 % options['appl_start_grid']) 1684 1685 if 'appl_start_grid' in options.keys() and options['appl_start_grid']: 1686 self.appl_start_grid = options['appl_start_grid'] 1687 start_grid_dir=pjoin(self.me_dir, 'Events', self.appl_start_grid) 1688 # check that this dir exists and at least one grid file is there 1689 if not os.path.exists(pjoin(start_grid_dir, 1690 'aMCfast_obs_0_starting_grid.root')): 1691 raise self.InvalidCmd('APPLgrid file not found: %s' % \ 1692 pjoin(start_grid_dir,'aMCfast_obs_0_starting_grid.root')) 1693 else: 1694 all_grids=[pjoin(start_grid_dir,name) for name in os.listdir( \ 1695 start_grid_dir) if name.endswith("_starting_grid.root")] 1696 nobs =len(all_grids) 1697 gstring=" ".join(all_grids) 1698 if not hasattr(self, 'appl_start_grid') or not self.appl_start_grid: 1699 raise self.InvalidCmd('No APPLgrid name currently defined.'+ 1700 'Please provide this information.') 1701 if mode == 'NLO': 1702 gdir='all_G' 1703 elif mode == 'LO': 1704 gdir='born_G' 1705 #copy the grid to all relevant directories 1706 for pdir in p_dirs: 1707 g_dirs = [file for file in os.listdir(pjoin(self.me_dir, 1708 "SubProcesses",pdir)) if file.startswith(gdir) and 1709 os.path.isdir(pjoin(self.me_dir,"SubProcesses",pdir, file))] 1710 for g_dir in g_dirs: 1711 for grid in all_grids: 1712 obs=grid.split('_')[-3] 1713 files.cp(grid,pjoin(self.me_dir,"SubProcesses",pdir,g_dir, 1714 'grid_obs_'+obs+'_in.root'))
1715 1716
1717 - def collect_log_files(self, folders, istep):
1718 """collect the log files and put them in a single, html-friendly file 1719 inside the run_... directory""" 1720 step_list = ['Grid setting', 'Cross-section computation', 1721 'Event generation'] 1722 log_file = pjoin(self.me_dir, 'Events', self.run_name, 1723 'alllogs_%d.html' % istep) 1724 # this keeps track of which step has been computed for which channel 1725 channel_dict = {} 1726 log_files = [] 1727 for folder in folders: 1728 log_files += glob.glob(pjoin(self.me_dir, 'SubProcesses', 'P*', 1729 folder, 'log.txt')) 1730 1731 content = '' 1732 1733 outfile = open(log_file, 'w') 1734 1735 content += '<HTML><BODY>\n<font face="courier" size=2>' 1736 for log in log_files: 1737 channel_dict[os.path.dirname(log)] = [istep] 1738 # put an anchor 1739 content += '<a name=%s></a>\n' % (os.path.dirname(log).replace( 1740 pjoin(self.me_dir,'SubProcesses'),'')) 1741 # and put some nice header 1742 content += '<font color="red">\n' 1743 content += '<br>LOG file for integration channel %s, %s <br>' % \ 1744 (os.path.dirname(log).replace(pjoin(self.me_dir, 1745 'SubProcesses'), ''), 1746 step_list[istep]) 1747 content += '</font>\n' 1748 #then just flush the content of the small log inside the big log 1749 #the PRE tag prints everything verbatim 1750 content += '<PRE>\n' + open(log).read() + '\n</PRE>' 1751 content +='<br>\n' 1752 outfile.write(content) 1753 content='' 1754 1755 outfile.write('</font>\n</BODY></HTML>\n') 1756 outfile.close()
1757 1758
1759 - def read_results(self, output, mode):
1760 """extract results (cross-section, absolute cross-section and errors) 1761 from output, which should be formatted as 1762 Found 4 correctly terminated jobs 1763 random seed found in 'randinit' is 33 1764 Integrated abs(cross-section) 1765 7.94473937e+03 +- 2.9953e+01 (3.7702e-01%) 1766 Integrated cross-section 1767 6.63392298e+03 +- 3.7669e+01 (5.6782e-01%) 1768 for aMC@NLO/aMC@LO, and as 1769 1770 for NLO/LO 1771 The cross_sect_dict is returned""" 1772 res = {} 1773 if mode in ['aMC@LO', 'aMC@NLO', 'noshower', 'noshowerLO']: 1774 pat = re.compile(\ 1775 '''Found (\d+) correctly terminated jobs 1776 random seed found in 'randinit' is (\d+) 1777 Integrated abs\(cross-section\) 1778 \s*(\d+\.\d+e[+-]\d+) \+\- (\d+\.\d+e[+-]\d+) \((\d+\.\d+e[+-]\d+)\%\) 1779 Integrated cross-section 1780 \s*(\-?\d+\.\d+e[+-]\d+) \+\- (\d+\.\d+e[+-]\d+) \((\-?\d+\.\d+e[+-]\d+)\%\)''') 1781 else: 1782 pat = re.compile(\ 1783 '''Found (\d+) correctly terminated jobs 1784 \s*(\-?\d+\.\d+e[+-]\d+) \+\- (\d+\.\d+e[+-]\d+) \((\-?\d+\.\d+e[+-]\d+)\%\)''') 1785 pass 1786 1787 match = re.search(pat, output[0]) 1788 if not match or output[1]: 1789 logger.info('Return code of the event collection: '+str(output[1])) 1790 logger.info('Output of the event collection:\n'+output[0]) 1791 raise aMCatNLOError('An error occurred during the collection of results.\n' + 1792 'Please check the .log files inside the directories which failed.') 1793 # if int(match.groups()[0]) != self.njobs: 1794 # raise aMCatNLOError('Not all jobs terminated successfully') 1795 if mode in ['aMC@LO', 'aMC@NLO', 'noshower', 'noshowerLO']: 1796 return {'randinit' : int(match.groups()[1]), 1797 'xseca' : float(match.groups()[2]), 1798 'erra' : float(match.groups()[3]), 1799 'xsect' : float(match.groups()[5]), 1800 'errt' : float(match.groups()[6])} 1801 else: 1802 return {'xsect' : float(match.groups()[1]), 1803 'errt' : float(match.groups()[2])}
1804
1805 - def print_summary(self, options, step, mode, scale_pdf_info={}):
1806 """print a summary of the results contained in self.cross_sect_dict. 1807 step corresponds to the mintMC step, if =2 (i.e. after event generation) 1808 some additional infos are printed""" 1809 # find process name 1810 proc_card_lines = open(pjoin(self.me_dir, 'Cards', 'proc_card_mg5.dat')).read().split('\n') 1811 process = '' 1812 for line in proc_card_lines: 1813 if line.startswith('generate') or line.startswith('add process'): 1814 process = process+(line.replace('generate ', '')).replace('add process ','')+' ; ' 1815 lpp = {0:'l', 1:'p', -1:'pbar'} 1816 proc_info = '\n Process %s\n Run at %s-%s collider (%s + %s GeV)' % \ 1817 (process[:-3], lpp[self.run_card['lpp1']], lpp[self.run_card['lpp2']], 1818 self.run_card['ebeam1'], self.run_card['ebeam2']) 1819 1820 # Gather some basic statistics for the run and extracted from the log files. 1821 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 1822 log_GV_files = glob.glob(pjoin(self.me_dir, \ 1823 'SubProcesses', 'P*','G*','log_MINT*.txt')) 1824 all_log_files = glob.glob(pjoin(self.me_dir, \ 1825 'SubProcesses', 'P*','G*','log*.txt')) 1826 elif mode == 'NLO': 1827 log_GV_files = glob.glob(pjoin(self.me_dir, \ 1828 'SubProcesses', 'P*','all_G*','log*.txt')) 1829 all_log_files = sum([glob.glob(pjoin(self.me_dir,'SubProcesses', 'P*', 1830 '%sG*'%foldName,'log*.txt')) for foldName in ['all_']],[]) 1831 elif mode == 'LO': 1832 log_GV_files = '' 1833 all_log_files = sum([glob.glob(pjoin(self.me_dir,'SubProcesses', 'P*', 1834 '%sG*'%foldName,'log*.txt')) for foldName in ['born_']],[]) 1835 else: 1836 raise aMCatNLOError, 'Running mode %s not supported.'%mode 1837 1838 1839 if mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO']: 1840 status = ['Determining the number of unweighted events per channel', 1841 'Updating the number of unweighted events per channel', 1842 'Summary:'] 1843 if step != 2: 1844 message = status[step] + '\n\n Intermediate results:' + \ 1845 ('\n Random seed: %(randinit)d' + \ 1846 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' + \ 1847 '\n Total abs(cross-section): %(xseca)8.3e +- %(erra)6.1e pb \n') \ 1848 % self.cross_sect_dict 1849 else: 1850 1851 message = '\n ' + status[step] + proc_info + \ 1852 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' % \ 1853 self.cross_sect_dict 1854 1855 if self.run_card['nevents']>=10000 and self.run_card['reweight_scale']: 1856 message = message + \ 1857 ('\n Ren. and fac. scale uncertainty: +%0.1f%% -%0.1f%%') % \ 1858 (scale_pdf_info['scale_upp'], scale_pdf_info['scale_low']) 1859 if self.run_card['nevents']>=10000 and self.run_card['reweight_PDF']: 1860 message = message + \ 1861 ('\n PDF uncertainty: +%0.1f%% -%0.1f%%') % \ 1862 (scale_pdf_info['pdf_upp'], scale_pdf_info['pdf_low']) 1863 1864 neg_frac = (self.cross_sect_dict['xseca'] - self.cross_sect_dict['xsect'])/\ 1865 (2. * self.cross_sect_dict['xseca']) 1866 message = message + \ 1867 ('\n Number of events generated: %s' + \ 1868 '\n Parton shower to be used: %s' + \ 1869 '\n Fraction of negative weights: %4.2f' + \ 1870 '\n Total running time : %s') % \ 1871 (self.run_card['nevents'], 1872 self.run_card['parton_shower'].upper(), 1873 neg_frac, 1874 misc.format_timer(time.time()-self.start_time)) 1875 1876 elif mode in ['NLO', 'LO']: 1877 status = ['Results after grid setup (cross-section is non-physical):', 1878 'Final results and run summary:'] 1879 if step == 0: 1880 message = '\n ' + status[step] + \ 1881 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' % \ 1882 self.cross_sect_dict 1883 elif step == 1: 1884 message = '\n ' + status[step] + proc_info + \ 1885 '\n Total cross-section: %(xsect)8.3e +- %(errt)6.1e pb' % \ 1886 self.cross_sect_dict 1887 if self.run_card['reweight_scale']: 1888 if int(self.run_card['ickkw'])!=-1: 1889 message = message + \ 1890 ('\n Ren. and fac. scale uncertainty: +%0.1f%% -%0.1f%%') % \ 1891 (scale_pdf_info['scale_upp'], scale_pdf_info['scale_low']) 1892 else: 1893 message = message + \ 1894 ('\n Soft and hard scale dependence (added in quadrature): +%0.1f%% -%0.1f%%') % \ 1895 (scale_pdf_info['scale_upp_quad'], scale_pdf_info['scale_low_quad']) 1896 if self.run_card['reweight_PDF']: 1897 message = message + \ 1898 ('\n PDF uncertainty: +%0.1f%% -%0.1f%%') % \ 1899 (scale_pdf_info['pdf_upp'], scale_pdf_info['pdf_low']) 1900 1901 if (mode in ['NLO', 'LO'] and step!=1) or \ 1902 (mode in ['aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] and step!=2): 1903 logger.info(message+'\n') 1904 return 1905 1906 # Some advanced general statistics are shown in the debug message at the 1907 # end of the run 1908 # Make sure it never stops a run 1909 try: 1910 message, debug_msg = \ 1911 self.compile_advanced_stats(log_GV_files, all_log_files, message) 1912 except Exception as e: 1913 debug_msg = 'Advanced statistics collection failed with error "%s"'%str(e) 1914 1915 logger.debug(debug_msg+'\n') 1916 logger.info(message+'\n') 1917 1918 # Now copy relevant information in the Events/Run_<xxx> directory 1919 evt_path = pjoin(self.me_dir, 'Events', self.run_name) 1920 open(pjoin(evt_path, 'summary.txt'),'w').write(message+'\n') 1921 open(pjoin(evt_path, '.full_summary.txt'), 1922 'w').write(message+'\n\n'+debug_msg+'\n') 1923 1924 self.archive_files(evt_path,mode)
1925
1926 - def archive_files(self, evt_path, mode):
1927 """ Copies in the Events/Run_<xxx> directory relevant files characterizing 1928 the run.""" 1929 1930 files_to_arxiv = [pjoin('Cards','param_card.dat'), 1931 pjoin('Cards','MadLoopParams.dat'), 1932 pjoin('Cards','FKS_params.dat'), 1933 pjoin('Cards','run_card.dat'), 1934 pjoin('Subprocesses','setscales.f'), 1935 pjoin('Subprocesses','cuts.f')] 1936 1937 if mode in ['NLO', 'LO']: 1938 files_to_arxiv.append(pjoin('Cards','FO_analyse_card.dat')) 1939 1940 if not os.path.exists(pjoin(evt_path,'RunMaterial')): 1941 os.mkdir(pjoin(evt_path,'RunMaterial')) 1942 1943 for path in files_to_arxiv: 1944 if os.path.isfile(pjoin(self.me_dir,path)): 1945 files.cp(pjoin(self.me_dir,path),pjoin(evt_path,'RunMaterial')) 1946 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'],cwd=evt_path) 1947 shutil.rmtree(pjoin(evt_path,'RunMaterial'))
1948
1949 - def compile_advanced_stats(self,log_GV_files,all_log_files,message):
1950 """ This functions goes through the log files given in arguments and 1951 compiles statistics about MadLoop stability, virtual integration 1952 optimization and detection of potential error messages into a nice 1953 debug message to printed at the end of the run """ 1954 1955 # > UPS is a dictionary of tuples with this format {channel:[nPS,nUPS]} 1956 # > Errors is a list of tuples with this format (log_file,nErrors) 1957 stats = {'UPS':{}, 'Errors':[], 'virt_stats':{}, 'timings':{}} 1958 mint_search = re.compile(r"MINT(?P<ID>\d*).txt") 1959 1960 # ================================== 1961 # == MadLoop stability statistics == 1962 # ================================== 1963 1964 # Recuperate the fraction of unstable PS points found in the runs for 1965 # the virtuals 1966 UPS_stat_finder = re.compile( 1967 r"Satistics from MadLoop:.*"+\ 1968 r"Total points tried\:\s+(?P<ntot>\d+).*"+\ 1969 r"Stability unknown\:\s+(?P<nsun>\d+).*"+\ 1970 r"Stable PS point\:\s+(?P<nsps>\d+).*"+\ 1971 r"Unstable PS point \(and rescued\)\:\s+(?P<nups>\d+).*"+\ 1972 r"Exceptional PS point \(unstable and not rescued\)\:\s+(?P<neps>\d+).*"+\ 1973 r"Double precision used\:\s+(?P<nddp>\d+).*"+\ 1974 r"Quadruple precision used\:\s+(?P<nqdp>\d+).*"+\ 1975 r"Initialization phase\-space points\:\s+(?P<nini>\d+).*"+\ 1976 r"Unknown return code \(100\)\:\s+(?P<n100>\d+).*"+\ 1977 r"Unknown return code \(10\)\:\s+(?P<n10>\d+).*",re.DOTALL) 1978 1979 unit_code_meaning = { 0 : 'Not identified (CTModeRun != -1)', 1980 1 : 'CutTools (double precision)', 1981 2 : 'PJFry++', 1982 3 : 'IREGI', 1983 4 : 'Golem95', 1984 9 : 'CutTools (quadruple precision)'} 1985 RetUnit_finder =re.compile( 1986 r"#Unit\s*(?P<unit>\d+)\s*=\s*(?P<n_occurences>\d+)") 1987 #Unit 1988 1989 for gv_log in log_GV_files: 1990 channel_name = '/'.join(gv_log.split('/')[-5:-1]) 1991 log=open(gv_log,'r').read() 1992 UPS_stats = re.search(UPS_stat_finder,log) 1993 for retunit_stats in re.finditer(RetUnit_finder, log): 1994 if channel_name not in stats['UPS'].keys(): 1995 stats['UPS'][channel_name] = [0]*10+[[0]*10] 1996 stats['UPS'][channel_name][10][int(retunit_stats.group('unit'))] \ 1997 += int(retunit_stats.group('n_occurences')) 1998 if not UPS_stats is None: 1999 try: 2000 stats['UPS'][channel_name][0] += int(UPS_stats.group('ntot')) 2001 stats['UPS'][channel_name][1] += int(UPS_stats.group('nsun')) 2002 stats['UPS'][channel_name][2] += int(UPS_stats.group('nsps')) 2003 stats['UPS'][channel_name][3] += int(UPS_stats.group('nups')) 2004 stats['UPS'][channel_name][4] += int(UPS_stats.group('neps')) 2005 stats['UPS'][channel_name][5] += int(UPS_stats.group('nddp')) 2006 stats['UPS'][channel_name][6] += int(UPS_stats.group('nqdp')) 2007 stats['UPS'][channel_name][7] += int(UPS_stats.group('nini')) 2008 stats['UPS'][channel_name][8] += int(UPS_stats.group('n100')) 2009 stats['UPS'][channel_name][9] += int(UPS_stats.group('n10')) 2010 except KeyError: 2011 stats['UPS'][channel_name] = [int(UPS_stats.group('ntot')), 2012 int(UPS_stats.group('nsun')),int(UPS_stats.group('nsps')), 2013 int(UPS_stats.group('nups')),int(UPS_stats.group('neps')), 2014 int(UPS_stats.group('nddp')),int(UPS_stats.group('nqdp')), 2015 int(UPS_stats.group('nini')),int(UPS_stats.group('n100')), 2016 int(UPS_stats.group('n10')),[0]*10] 2017 debug_msg = "" 2018 if len(stats['UPS'].keys())>0: 2019 nTotPS = sum([chan[0] for chan in stats['UPS'].values()],0) 2020 nTotsun = sum([chan[1] for chan in stats['UPS'].values()],0) 2021 nTotsps = sum([chan[2] for chan in stats['UPS'].values()],0) 2022 nTotups = sum([chan[3] for chan in stats['UPS'].values()],0) 2023 nToteps = sum([chan[4] for chan in stats['UPS'].values()],0) 2024 nTotddp = sum([chan[5] for chan in stats['UPS'].values()],0) 2025 nTotqdp = sum([chan[6] for chan in stats['UPS'].values()],0) 2026 nTotini = sum([chan[7] for chan in stats['UPS'].values()],0) 2027 nTot100 = sum([chan[8] for chan in stats['UPS'].values()],0) 2028 nTot10 = sum([chan[9] for chan in stats['UPS'].values()],0) 2029 nTot1 = [sum([chan[10][i] for chan in stats['UPS'].values()],0) \ 2030 for i in range(10)] 2031 UPSfracs = [(chan[0] , 0.0 if chan[1][0]==0 else \ 2032 float(chan[1][4]*100)/chan[1][0]) for chan in stats['UPS'].items()] 2033 maxUPS = max(UPSfracs, key = lambda w: w[1]) 2034 2035 tmpStr = "" 2036 tmpStr += '\n Number of loop ME evaluations (by MadLoop): %d'%nTotPS 2037 tmpStr += '\n Stability unknown: %d'%nTotsun 2038 tmpStr += '\n Stable PS point: %d'%nTotsps 2039 tmpStr += '\n Unstable PS point (and rescued): %d'%nTotups 2040 tmpStr += '\n Unstable PS point (and not rescued): %d'%nToteps 2041 tmpStr += '\n Only double precision used: %d'%nTotddp 2042 tmpStr += '\n Quadruple precision used: %d'%nTotqdp 2043 tmpStr += '\n Initialization phase-space points: %d'%nTotini 2044 tmpStr += '\n Reduction methods used:' 2045 red_methods = [(unit_code_meaning[i],nTot1[i]) for i in \ 2046 unit_code_meaning.keys() if nTot1[i]>0] 2047 for method, n in sorted(red_methods, key= lambda l: l[1], reverse=True): 2048 tmpStr += '\n > %s%s%s'%(method,' '*(33-len(method)),n) 2049 if nTot100 != 0: 2050 debug_msg += '\n Unknown return code (100): %d'%nTot100 2051 if nTot10 != 0: 2052 debug_msg += '\n Unknown return code (10): %d'%nTot10 2053 nUnknownUnit = sum(nTot1[u] for u in range(10) if u \ 2054 not in unit_code_meaning.keys()) 2055 if nUnknownUnit != 0: 2056 debug_msg += '\n Unknown return code (1): %d'\ 2057 %nUnknownUnit 2058 2059 if maxUPS[1]>0.001: 2060 message += tmpStr 2061 message += '\n Total number of unstable PS point detected:'+\ 2062 ' %d (%4.2f%%)'%(nToteps,float(100*nToteps)/nTotPS) 2063 message += '\n Maximum fraction of UPS points in '+\ 2064 'channel %s (%4.2f%%)'%maxUPS 2065 message += '\n Please report this to the authors while '+\ 2066 'providing the file' 2067 message += '\n %s'%str(pjoin(os.path.dirname(self.me_dir), 2068 maxUPS[0],'UPS.log')) 2069 else: 2070 debug_msg += tmpStr 2071 2072 2073 # ==================================================== 2074 # == aMC@NLO virtual integration optimization stats == 2075 # ==================================================== 2076 2077 virt_tricks_finder = re.compile( 2078 r"accumulated results Virtual ratio\s*=\s*-?(?P<v_ratio>[\d\+-Eed\.]*)"+\ 2079 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_ratio_err>[\d\+-Eed\.]*)\s*\%\)\s*\n"+\ 2080 r"accumulated results ABS virtual\s*=\s*-?(?P<v_abs_contr>[\d\+-Eed\.]*)"+\ 2081 r"\s*\+/-\s*-?[\d\+-Eed\.]*\s*\(\s*-?(?P<v_abs_contr_err>[\d\+-Eed\.]*)\s*\%\)") 2082 2083 virt_frac_finder = re.compile(r"update virtual fraction to\s*:\s*"+\ 2084 "-?(?P<v_frac>[\d\+-Eed\.]*)\s*-?(?P<v_average>[\d\+-Eed\.]*)") 2085 2086 channel_contr_finder = re.compile(r"Final result \[ABS\]\s*:\s*-?(?P<v_contr>[\d\+-Eed\.]*)") 2087 2088 channel_contr_list = {} 2089 for gv_log in log_GV_files: 2090 logfile=open(gv_log,'r') 2091 log = logfile.read() 2092 logfile.close() 2093 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 2094 vf_stats = None 2095 for vf_stats in re.finditer(virt_frac_finder, log): 2096 pass 2097 if not vf_stats is None: 2098 v_frac = float(vf_stats.group('v_frac')) 2099 v_average = float(vf_stats.group('v_average')) 2100 try: 2101 if v_frac < stats['virt_stats']['v_frac_min'][0]: 2102 stats['virt_stats']['v_frac_min']=(v_frac,channel_name) 2103 if v_frac > stats['virt_stats']['v_frac_max'][0]: 2104 stats['virt_stats']['v_frac_max']=(v_frac,channel_name) 2105 stats['virt_stats']['v_frac_avg'][0] += v_frac 2106 stats['virt_stats']['v_frac_avg'][1] += 1 2107 except KeyError: 2108 stats['virt_stats']['v_frac_min']=[v_frac,channel_name] 2109 stats['virt_stats']['v_frac_max']=[v_frac,channel_name] 2110 stats['virt_stats']['v_frac_avg']=[v_frac,1] 2111 2112 2113 ccontr_stats = None 2114 for ccontr_stats in re.finditer(channel_contr_finder, log): 2115 pass 2116 if not ccontr_stats is None: 2117 contrib = float(ccontr_stats.group('v_contr')) 2118 try: 2119 if contrib>channel_contr_list[channel_name]: 2120 channel_contr_list[channel_name]=contrib 2121 except KeyError: 2122 channel_contr_list[channel_name]=contrib 2123 2124 2125 # Now build the list of relevant virt log files to look for the maxima 2126 # of virt fractions and such. 2127 average_contrib = 0.0 2128 for value in channel_contr_list.values(): 2129 average_contrib += value 2130 if len(channel_contr_list.values()) !=0: 2131 average_contrib = average_contrib / len(channel_contr_list.values()) 2132 2133 relevant_log_GV_files = [] 2134 excluded_channels = set([]) 2135 all_channels = set([]) 2136 for log_file in log_GV_files: 2137 channel_name = '/'.join(log_file.split('/')[-3:-1]) 2138 all_channels.add(channel_name) 2139 try: 2140 if channel_contr_list[channel_name] > (0.1*average_contrib): 2141 relevant_log_GV_files.append(log_file) 2142 else: 2143 excluded_channels.add(channel_name) 2144 except KeyError: 2145 relevant_log_GV_files.append(log_file) 2146 2147 # Now we want to use the latest occurence of accumulated result in the log file 2148 for gv_log in relevant_log_GV_files: 2149 logfile=open(gv_log,'r') 2150 log = logfile.read() 2151 logfile.close() 2152 channel_name = '/'.join(gv_log.split('/')[-3:-1]) 2153 2154 vt_stats = None 2155 for vt_stats in re.finditer(virt_tricks_finder, log): 2156 pass 2157 if not vt_stats is None: 2158 vt_stats_group = vt_stats.groupdict() 2159 v_ratio = float(vt_stats.group('v_ratio')) 2160 v_ratio_err = float(vt_stats.group('v_ratio_err')) 2161 v_contr = float(vt_stats.group('v_abs_contr')) 2162 v_contr_err = float(vt_stats.group('v_abs_contr_err')) 2163 try: 2164 if v_ratio < stats['virt_stats']['v_ratio_min'][0]: 2165 stats['virt_stats']['v_ratio_min']=(v_ratio,channel_name) 2166 if v_ratio > stats['virt_stats']['v_ratio_max'][0]: 2167 stats['virt_stats']['v_ratio_max']=(v_ratio,channel_name) 2168 if v_ratio < stats['virt_stats']['v_ratio_err_min'][0]: 2169 stats['virt_stats']['v_ratio_err_min']=(v_ratio_err,channel_name) 2170 if v_ratio > stats['virt_stats']['v_ratio_err_max'][0]: 2171 stats['virt_stats']['v_ratio_err_max']=(v_ratio_err,channel_name) 2172 if v_contr < stats['virt_stats']['v_contr_min'][0]: 2173 stats['virt_stats']['v_contr_min']=(v_contr,channel_name) 2174 if v_contr > stats['virt_stats']['v_contr_max'][0]: 2175 stats['virt_stats']['v_contr_max']=(v_contr,channel_name) 2176 if v_contr_err < stats['virt_stats']['v_contr_err_min'][0]: 2177 stats['virt_stats']['v_contr_err_min']=(v_contr_err,channel_name) 2178 if v_contr_err > stats['virt_stats']['v_contr_err_max'][0]: 2179 stats['virt_stats']['v_contr_err_max']=(v_contr_err,channel_name) 2180 except KeyError: 2181 stats['virt_stats']['v_ratio_min']=[v_ratio,channel_name] 2182 stats['virt_stats']['v_ratio_max']=[v_ratio,channel_name] 2183 stats['virt_stats']['v_ratio_err_min']=[v_ratio_err,channel_name] 2184 stats['virt_stats']['v_ratio_err_max']=[v_ratio_err,channel_name] 2185 stats['virt_stats']['v_contr_min']=[v_contr,channel_name] 2186 stats['virt_stats']['v_contr_max']=[v_contr,channel_name] 2187 stats['virt_stats']['v_contr_err_min']=[v_contr_err,channel_name] 2188 stats['virt_stats']['v_contr_err_max']=[v_contr_err,channel_name] 2189 2190 vf_stats = None 2191 for vf_stats in re.finditer(virt_frac_finder, log): 2192 pass 2193 if not vf_stats is None: 2194 v_frac = float(vf_stats.group('v_frac')) 2195 v_average = float(vf_stats.group('v_average')) 2196 try: 2197 if v_average < stats['virt_stats']['v_average_min'][0]: 2198 stats['virt_stats']['v_average_min']=(v_average,channel_name) 2199 if v_average > stats['virt_stats']['v_average_max'][0]: 2200 stats['virt_stats']['v_average_max']=(v_average,channel_name) 2201 stats['virt_stats']['v_average_avg'][0] += v_average 2202 stats['virt_stats']['v_average_avg'][1] += 1 2203 except KeyError: 2204 stats['virt_stats']['v_average_min']=[v_average,channel_name] 2205 stats['virt_stats']['v_average_max']=[v_average,channel_name] 2206 stats['virt_stats']['v_average_avg']=[v_average,1] 2207 2208 try: 2209 debug_msg += '\n\n Statistics on virtual integration optimization : ' 2210 2211 debug_msg += '\n Maximum virt fraction computed %.3f (%s)'\ 2212 %tuple(stats['virt_stats']['v_frac_max']) 2213 debug_msg += '\n Minimum virt fraction computed %.3f (%s)'\ 2214 %tuple(stats['virt_stats']['v_frac_min']) 2215 debug_msg += '\n Average virt fraction computed %.3f'\ 2216 %float(stats['virt_stats']['v_frac_avg'][0]/float(stats['virt_stats']['v_frac_avg'][1])) 2217 debug_msg += '\n Stats below exclude negligible channels (%d excluded out of %d)'%\ 2218 (len(excluded_channels),len(all_channels)) 2219 debug_msg += '\n Maximum virt ratio used %.2f (%s)'\ 2220 %tuple(stats['virt_stats']['v_average_max']) 2221 debug_msg += '\n Maximum virt ratio found from grids %.2f (%s)'\ 2222 %tuple(stats['virt_stats']['v_ratio_max']) 2223 tmpStr = '\n Max. MC err. on virt ratio from grids %.1f %% (%s)'\ 2224 %tuple(stats['virt_stats']['v_ratio_err_max']) 2225 debug_msg += tmpStr 2226 # After all it was decided that it is better not to alarm the user unecessarily 2227 # with such printout of the statistics. 2228 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0 or \ 2229 # stats['virt_stats']['v_ratio_err_max'][0]>100.0: 2230 # message += "\n Suspiciously large MC error in :" 2231 # if stats['virt_stats']['v_ratio_err_max'][0]>100.0: 2232 # message += tmpStr 2233 2234 tmpStr = '\n Maximum MC error on abs virt %.1f %% (%s)'\ 2235 %tuple(stats['virt_stats']['v_contr_err_max']) 2236 debug_msg += tmpStr 2237 # if stats['virt_stats']['v_contr_err_max'][0]>100.0: 2238 # message += tmpStr 2239 2240 2241 except KeyError: 2242 debug_msg += '\n Could not find statistics on the integration optimization. ' 2243 2244 # ======================================= 2245 # == aMC@NLO timing profile statistics == 2246 # ======================================= 2247 2248 timing_stat_finder = re.compile(r"\s*Time spent in\s*(?P<name>\w*)\s*:\s*"+\ 2249 "(?P<time>[\d\+-Eed\.]*)\s*") 2250 2251 for logf in log_GV_files: 2252 logfile=open(logf,'r') 2253 log = logfile.read() 2254 logfile.close() 2255 channel_name = '/'.join(logf.split('/')[-3:-1]) 2256 mint = re.search(mint_search,logf) 2257 if not mint is None: 2258 channel_name = channel_name+' [step %s]'%mint.group('ID') 2259 2260 for time_stats in re.finditer(timing_stat_finder, log): 2261 try: 2262 stats['timings'][time_stats.group('name')][channel_name]+=\ 2263 float(time_stats.group('time')) 2264 except KeyError: 2265 if time_stats.group('name') not in stats['timings'].keys(): 2266 stats['timings'][time_stats.group('name')] = {} 2267 stats['timings'][time_stats.group('name')][channel_name]=\ 2268 float(time_stats.group('time')) 2269 2270 # useful inline function 2271 Tstr = lambda secs: str(datetime.timedelta(seconds=int(secs))) 2272 try: 2273 totTimeList = [(time, chan) for chan, time in \ 2274 stats['timings']['Total'].items()] 2275 except KeyError: 2276 totTimeList = [] 2277 2278 totTimeList.sort() 2279 if len(totTimeList)>0: 2280 debug_msg += '\n\n Inclusive timing profile :' 2281 debug_msg += '\n Overall slowest channel %s (%s)'%\ 2282 (Tstr(totTimeList[-1][0]),totTimeList[-1][1]) 2283 debug_msg += '\n Average channel running time %s'%\ 2284 Tstr(sum([el[0] for el in totTimeList])/len(totTimeList)) 2285 debug_msg += '\n Aggregated total running time %s'%\ 2286 Tstr(sum([el[0] for el in totTimeList])) 2287 else: 2288 debug_msg += '\n\n Inclusive timing profile non available.' 2289 2290 sorted_keys = sorted(stats['timings'].keys(), key= lambda stat: \ 2291 sum(stats['timings'][stat].values()), reverse=True) 2292 for name in sorted_keys: 2293 if name=='Total': 2294 continue 2295 if sum(stats['timings'][name].values())<=0.0: 2296 debug_msg += '\n Zero time record for %s.'%name 2297 continue 2298 try: 2299 TimeList = [((100.0*time/stats['timings']['Total'][chan]), 2300 chan) for chan, time in stats['timings'][name].items()] 2301 except KeyError, ZeroDivisionError: 2302 debug_msg += '\n\n Timing profile for %s unavailable.'%name 2303 continue 2304 TimeList.sort() 2305 debug_msg += '\n Timing profile for <%s> :'%name 2306 try: 2307 debug_msg += '\n Overall fraction of time %.3f %%'%\ 2308 float((100.0*(sum(stats['timings'][name].values())/ 2309 sum(stats['timings']['Total'].values())))) 2310 except KeyError, ZeroDivisionError: 2311 debug_msg += '\n Overall fraction of time unavailable.' 2312 debug_msg += '\n Largest fraction of time %.3f %% (%s)'%\ 2313 (TimeList[-1][0],TimeList[-1][1]) 2314 debug_msg += '\n Smallest fraction of time %.3f %% (%s)'%\ 2315 (TimeList[0][0],TimeList[0][1]) 2316 2317 # ============================= 2318 # == log file eror detection == 2319 # ============================= 2320 2321 # Find the number of potential errors found in all log files 2322 # This re is a simple match on a case-insensitve 'error' but there is 2323 # also some veto added for excluding the sentence 2324 # "See Section 6 of paper for error calculation." 2325 # which appear in the header of lhapdf in the logs. 2326 err_finder = re.compile(\ 2327 r"(?<!of\spaper\sfor\s)\bERROR\b(?!\scalculation\.)",re.IGNORECASE) 2328 for log in all_log_files: 2329 logfile=open(log,'r') 2330 nErrors = len(re.findall(err_finder, logfile.read())) 2331 logfile.close() 2332 if nErrors != 0: 2333 stats['Errors'].append((str(log),nErrors)) 2334 2335 nErrors = sum([err[1] for err in stats['Errors']],0) 2336 if nErrors != 0: 2337 debug_msg += '\n WARNING:: A total of %d error%s ha%s been '\ 2338 %(nErrors,'s' if nErrors>1 else '','ve' if nErrors>1 else 's')+\ 2339 'found in the following log file%s:'%('s' if \ 2340 len(stats['Errors'])>1 else '') 2341 for error in stats['Errors'][:3]: 2342 log_name = '/'.join(error[0].split('/')[-5:]) 2343 debug_msg += '\n > %d error%s in %s'%\ 2344 (error[1],'s' if error[1]>1 else '',log_name) 2345 if len(stats['Errors'])>3: 2346 nRemainingErrors = sum([err[1] for err in stats['Errors']][3:],0) 2347 nRemainingLogs = len(stats['Errors'])-3 2348 debug_msg += '\n And another %d error%s in %d other log file%s'%\ 2349 (nRemainingErrors, 's' if nRemainingErrors>1 else '', 2350 nRemainingLogs, 's ' if nRemainingLogs>1 else '') 2351 2352 return message, debug_msg
2353 2354
2355 - def reweight_and_collect_events(self, options, mode, nevents, event_norm):
2356 """this function calls the reweighting routines and creates the event file in the 2357 Event dir. Return the name of the event file created 2358 """ 2359 scale_pdf_info={} 2360 if self.run_card['reweight_scale'] or self.run_card['reweight_PDF'] : 2361 scale_pdf_info = self.run_reweight(options['reweightonly']) 2362 2363 self.update_status('Collecting events', level='parton', update_results=True) 2364 misc.compile(['collect_events'], 2365 cwd=pjoin(self.me_dir, 'SubProcesses')) 2366 p = misc.Popen(['./collect_events'], cwd=pjoin(self.me_dir, 'SubProcesses'), 2367 stdin=subprocess.PIPE, 2368 stdout=open(pjoin(self.me_dir, 'collect_events.log'), 'w')) 2369 if event_norm.lower() == 'sum': 2370 p.communicate(input = '1\n') 2371 elif event_norm.lower() == 'unity': 2372 p.communicate(input = '3\n') 2373 else: 2374 p.communicate(input = '2\n') 2375 2376 #get filename from collect events 2377 filename = open(pjoin(self.me_dir, 'collect_events.log')).read().split()[-1] 2378 2379 if not os.path.exists(pjoin(self.me_dir, 'SubProcesses', filename)): 2380 raise aMCatNLOError('An error occurred during event generation. ' + \ 2381 'The event file has not been created. Check collect_events.log') 2382 evt_file = pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz') 2383 misc.gzip(pjoin(self.me_dir, 'SubProcesses', filename), stdout=evt_file) 2384 if not options['reweightonly']: 2385 self.print_summary(options, 2, mode, scale_pdf_info) 2386 logger.info('The %s file has been generated.\n' % (evt_file)) 2387 self.results.add_detail('nb_event', nevents) 2388 self.update_status('Events generated', level='parton', update_results=True) 2389 return evt_file[:-3]
2390 2391
2392 - def run_mcatnlo(self, evt_file):
2393 """runs mcatnlo on the generated event file, to produce showered-events 2394 """ 2395 logger.info('Preparing MCatNLO run') 2396 try: 2397 misc.gunzip(evt_file) 2398 except Exception: 2399 pass 2400 2401 self.banner = banner_mod.Banner(evt_file) 2402 shower = self.banner.get_detail('run_card', 'parton_shower').upper() 2403 2404 #check that the number of split event files divides the number of 2405 # events, otherwise set it to 1 2406 if int(int(self.banner.get_detail('run_card', 'nevents')) / \ 2407 self.shower_card['nsplit_jobs']) * self.shower_card['nsplit_jobs'] \ 2408 != int(self.banner.get_detail('run_card', 'nevents')): 2409 logger.warning(\ 2410 'nsplit_jobs in the shower card is not a divisor of the number of events.\n' + \ 2411 'Setting it to 1.') 2412 self.shower_card['nsplit_jobs'] = 1 2413 2414 # don't split jobs if the user asks to shower only a part of the events 2415 if self.shower_card['nevents'] > 0 and \ 2416 self.shower_card['nevents'] < int(self.banner.get_detail('run_card', 'nevents')) and \ 2417 self.shower_card['nsplit_jobs'] != 1: 2418 logger.warning(\ 2419 'Only a part of the events will be showered.\n' + \ 2420 'Setting nsplit_jobs in the shower_card to 1.') 2421 self.shower_card['nsplit_jobs'] = 1 2422 2423 self.banner_to_mcatnlo(evt_file) 2424 2425 # if fastjet has to be linked (in extralibs) then 2426 # add lib /include dirs for fastjet if fastjet-config is present on the 2427 # system, otherwise add fjcore to the files to combine 2428 if 'fastjet' in self.shower_card['extralibs']: 2429 #first, check that stdc++ is also linked 2430 if not 'stdc++' in self.shower_card['extralibs']: 2431 logger.warning('Linking FastJet: adding stdc++ to EXTRALIBS') 2432 self.shower_card['extralibs'] += ' stdc++' 2433 # then check if options[fastjet] corresponds to a valid fj installation 2434 try: 2435 #this is for a complete fj installation 2436 p = subprocess.Popen([self.options['fastjet'], '--prefix'], \ 2437 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 2438 output, error = p.communicate() 2439 #remove the line break from output (last character) 2440 output = output[:-1] 2441 # add lib/include paths 2442 if not pjoin(output, 'lib') in self.shower_card['extrapaths']: 2443 logger.warning('Linking FastJet: updating EXTRAPATHS') 2444 self.shower_card['extrapaths'] += ' ' + pjoin(output, 'lib') 2445 if not pjoin(output, 'include') in self.shower_card['includepaths']: 2446 logger.warning('Linking FastJet: updating INCLUDEPATHS') 2447 self.shower_card['includepaths'] += ' ' + pjoin(output, 'include') 2448 # to be changed in the fortran wrapper 2449 include_line = '#include "fastjet/ClusterSequence.hh"//INCLUDE_FJ' 2450 namespace_line = 'namespace fj = fastjet;//NAMESPACE_FJ' 2451 except Exception: 2452 logger.warning('Linking FastJet: using fjcore') 2453 # this is for FJcore, so no FJ library has to be linked 2454 self.shower_card['extralibs'] = self.shower_card['extralibs'].replace('fastjet', '') 2455 if not 'fjcore.o' in self.shower_card['analyse']: 2456 self.shower_card['analyse'] += ' fjcore.o' 2457 # to be changed in the fortran wrapper 2458 include_line = '#include "fjcore.hh"//INCLUDE_FJ' 2459 namespace_line = 'namespace fj = fjcore;//NAMESPACE_FJ' 2460 # change the fortran wrapper with the correct namespaces/include 2461 fjwrapper_lines = open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc')).read().split('\n') 2462 for line in fjwrapper_lines: 2463 if '//INCLUDE_FJ' in line: 2464 fjwrapper_lines[fjwrapper_lines.index(line)] = include_line 2465 if '//NAMESPACE_FJ' in line: 2466 fjwrapper_lines[fjwrapper_lines.index(line)] = namespace_line 2467 open(pjoin(self.me_dir, 'MCatNLO', 'srcCommon', 'myfastjetfortran.cc'), 'w').write(\ 2468 '\n'.join(fjwrapper_lines) + '\n') 2469 2470 extrapaths = self.shower_card['extrapaths'].split() 2471 2472 # check that the path needed by HW++ and PY8 are set if one uses these shower 2473 if shower in ['HERWIGPP', 'PYTHIA8']: 2474 path_dict = {'HERWIGPP': ['hepmc_path', 2475 'thepeg_path', 2476 'hwpp_path'], 2477 'PYTHIA8': ['pythia8_path']} 2478 2479 if not all([self.options[ppath] for ppath in path_dict[shower]]): 2480 raise aMCatNLOError('Some paths are missing in the configuration file.\n' + \ 2481 ('Please make sure you have set these variables: %s' % ', '.join(path_dict[shower]))) 2482 2483 if shower == 'HERWIGPP': 2484 extrapaths.append(pjoin(self.options['hepmc_path'], 'lib')) 2485 2486 if shower == 'PYTHIA8' and not os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 2487 extrapaths.append(pjoin(self.options['pythia8_path'], 'lib')) 2488 2489 if 'LD_LIBRARY_PATH' in os.environ.keys(): 2490 ldlibrarypath = os.environ['LD_LIBRARY_PATH'] 2491 else: 2492 ldlibrarypath = '' 2493 ldlibrarypath += ':' + ':'.join(extrapaths) 2494 os.putenv('LD_LIBRARY_PATH', ldlibrarypath) 2495 2496 shower_card_path = pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat') 2497 self.shower_card.write_card(shower, shower_card_path) 2498 2499 # overwrite if shower_card_set.dat exists in MCatNLO 2500 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat')): 2501 files.mv(pjoin(self.me_dir, 'MCatNLO', 'shower_card_set.dat'), 2502 pjoin(self.me_dir, 'MCatNLO', 'shower_card.dat')) 2503 2504 mcatnlo_log = pjoin(self.me_dir, 'mcatnlo.log') 2505 self.update_status('Compiling MCatNLO for %s...' % shower, level='shower') 2506 misc.call(['./MCatNLO_MadFKS.inputs'], stdout=open(mcatnlo_log, 'w'), 2507 stderr=open(mcatnlo_log, 'w'), 2508 cwd=pjoin(self.me_dir, 'MCatNLO')) 2509 2510 exe = 'MCATNLO_%s_EXE' % shower 2511 if not os.path.exists(pjoin(self.me_dir, 'MCatNLO', exe)) and \ 2512 not os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe')): 2513 print open(mcatnlo_log).read() 2514 raise aMCatNLOError('Compilation failed, check %s for details' % mcatnlo_log) 2515 logger.info(' ... done') 2516 2517 # create an empty dir where to run 2518 count = 1 2519 while os.path.isdir(pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 2520 (shower, count))): 2521 count += 1 2522 rundir = pjoin(self.me_dir, 'MCatNLO', 'RUN_%s_%d' % \ 2523 (shower, count)) 2524 os.mkdir(rundir) 2525 files.cp(shower_card_path, rundir) 2526 2527 #look for the event files (don't resplit if one asks for the 2528 # same number of event files as in the previous run) 2529 event_files = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 2530 'events_*.lhe')) 2531 if max(len(event_files), 1) != self.shower_card['nsplit_jobs']: 2532 logger.info('Cleaning old files and splitting the event file...') 2533 #clean the old files 2534 files.rm([f for f in event_files if 'events.lhe' not in f]) 2535 if self.shower_card['nsplit_jobs'] > 1: 2536 misc.compile(['split_events'], cwd = pjoin(self.me_dir, 'Utilities')) 2537 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'split_events')], 2538 stdin=subprocess.PIPE, 2539 stdout=open(pjoin(self.me_dir, 'Events', self.run_name, 'split_events.log'), 'w'), 2540 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2541 p.communicate(input = 'events.lhe\n%d\n' % self.shower_card['nsplit_jobs']) 2542 logger.info('Splitting done.') 2543 event_files = glob.glob(pjoin(self.me_dir, 'Events', self.run_name, 2544 'events_*.lhe')) 2545 2546 event_files.sort() 2547 2548 self.update_status('Showering events...', level='shower') 2549 logger.info('(Running in %s)' % rundir) 2550 if shower != 'PYTHIA8': 2551 files.mv(pjoin(self.me_dir, 'MCatNLO', exe), rundir) 2552 files.mv(pjoin(self.me_dir, 'MCatNLO', 'MCATNLO_%s_input' % shower), rundir) 2553 else: 2554 # special treatment for pythia8 2555 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.cmd'), rundir) 2556 files.mv(pjoin(self.me_dir, 'MCatNLO', 'Pythia8.exe'), rundir) 2557 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 2558 files.ln(pjoin(self.options['pythia8_path'], 'examples', 'config.sh'), rundir) 2559 files.ln(pjoin(self.options['pythia8_path'], 'xmldoc'), rundir) 2560 else: 2561 files.ln(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc'), rundir) 2562 #link the hwpp exe in the rundir 2563 if shower == 'HERWIGPP': 2564 try: 2565 files.ln(pjoin(self.options['hwpp_path'], 'bin', 'Herwig++'), rundir) 2566 except Exception: 2567 raise aMCatNLOError('The Herwig++ path set in the configuration file is not valid.') 2568 2569 if os.path.exists(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so')): 2570 files.cp(pjoin(self.me_dir, 'MCatNLO', 'HWPPAnalyzer', 'HepMCFortran.so'), rundir) 2571 2572 files.ln(evt_file, rundir, 'events.lhe') 2573 for i, f in enumerate(event_files): 2574 files.ln(f, rundir,'events_%d.lhe' % (i + 1)) 2575 2576 if not self.shower_card['analyse']: 2577 # an hep/hepmc file as output 2578 out_id = 'HEP' 2579 else: 2580 # one or more .top file(s) as output 2581 if "HwU" in self.shower_card['analyse']: 2582 out_id = 'HWU' 2583 else: 2584 out_id = 'TOP' 2585 2586 # write the executable 2587 open(pjoin(rundir, 'shower.sh'), 'w').write(\ 2588 open(pjoin(self.me_dir, 'MCatNLO', 'shower_template.sh')).read() \ 2589 % {'extralibs': ':'.join(extrapaths)}) 2590 subprocess.call(['chmod', '+x', pjoin(rundir, 'shower.sh')]) 2591 2592 if event_files: 2593 arg_list = [[shower, out_id, self.run_name, '%d' % (i + 1)] \ 2594 for i in range(len(event_files))] 2595 else: 2596 arg_list = [[shower, out_id, self.run_name]] 2597 2598 self.run_all({rundir: 'shower.sh'}, arg_list, 'shower') 2599 self.njobs = 1 2600 self.wait_for_complete('shower') 2601 2602 # now collect the results 2603 message = '' 2604 warning = '' 2605 to_gzip = [evt_file] 2606 if out_id == 'HEP': 2607 #copy the showered stdhep/hepmc file back in events 2608 if shower in ['PYTHIA8', 'HERWIGPP']: 2609 hep_format = 'HEPMC' 2610 ext = 'hepmc' 2611 else: 2612 hep_format = 'StdHEP' 2613 ext = 'hep' 2614 2615 hep_file = '%s_%s_0.%s.gz' % \ 2616 (pjoin(os.path.dirname(evt_file), 'events'), shower, ext) 2617 count = 0 2618 2619 # find the first available name for the output: 2620 # check existing results with or without event splitting 2621 while os.path.exists(hep_file) or \ 2622 os.path.exists(hep_file.replace('.%s.gz' % ext, '__1.%s.gz' % ext)) : 2623 count +=1 2624 hep_file = '%s_%s_%d.%s.gz' % \ 2625 (pjoin(os.path.dirname(evt_file), 'events'), shower, count, ext) 2626 2627 try: 2628 if self.shower_card['nsplit_jobs'] == 1: 2629 files.mv(os.path.join(rundir, 'events.%s.gz' % ext), hep_file) 2630 message = ('The file %s has been generated. \nIt contains showered' + \ 2631 ' and hadronized events in the %s format obtained' + \ 2632 ' showering the parton-level event file %s.gz with %s') % \ 2633 (hep_file, hep_format, evt_file, shower) 2634 else: 2635 hep_list = [] 2636 for i in range(self.shower_card['nsplit_jobs']): 2637 hep_list.append(hep_file.replace('.%s.gz' % ext, '__%d.%s.gz' % (i + 1, ext))) 2638 files.mv(os.path.join(rundir, 'events_%d.%s.gz' % (i + 1, ext)), hep_list[-1]) 2639 message = ('The following files have been generated:\n %s\nThey contain showered' + \ 2640 ' and hadronized events in the %s format obtained' + \ 2641 ' showering the (split) parton-level event file %s.gz with %s') % \ 2642 ('\n '.join(hep_list), hep_format, evt_file, shower) 2643 2644 except OSError, IOError: 2645 raise aMCatNLOError('No file has been generated, an error occurred.'+\ 2646 ' More information in %s' % pjoin(os.getcwd(), 'amcatnlo_run.log')) 2647 2648 # run the plot creation in a secure way 2649 if hep_format == 'StdHEP': 2650 try: 2651 self.do_plot('%s -f' % self.run_name) 2652 except Exception, error: 2653 logger.info("Fail to make the plot. Continue...") 2654 pass 2655 2656 elif out_id == 'TOP' or out_id == 'HWU': 2657 #copy the topdrawer or HwU file(s) back in events 2658 if out_id=='TOP': 2659 ext='top' 2660 elif out_id=='HWU': 2661 ext='HwU' 2662 topfiles = [] 2663 top_tars = [tarfile.TarFile(f) for f in glob.glob(pjoin(rundir, 'histfile*.tar'))] 2664 for top_tar in top_tars: 2665 topfiles.extend(top_tar.getnames()) 2666 2667 # safety check 2668 if len(top_tars) != self.shower_card['nsplit_jobs']: 2669 raise aMCatNLOError('%d job(s) expected, %d file(s) found' % \ 2670 (self.shower_card['nsplit_jobs'], len(top_tars))) 2671 2672 # find the first available name for the output: 2673 # check existing results with or without event splitting 2674 filename = 'plot_%s_%d_' % (shower, 1) 2675 count = 1 2676 while os.path.exists(pjoin(self.me_dir, 'Events', 2677 self.run_name, '%s0.%s' % (filename,ext))) or \ 2678 os.path.exists(pjoin(self.me_dir, 'Events', 2679 self.run_name, '%s0__1.%s' % (filename,ext))): 2680 count += 1 2681 filename = 'plot_%s_%d_' % (shower, count) 2682 2683 if out_id=='TOP': 2684 hist_format='TopDrawer format' 2685 elif out_id=='HWU': 2686 hist_format='HwU and GnuPlot formats' 2687 2688 if not topfiles: 2689 # if no topfiles are found just warn the user 2690 warning = 'No .top file has been generated. For the results of your ' +\ 2691 'run, please check inside %s' % rundir 2692 elif self.shower_card['nsplit_jobs'] == 1: 2693 # only one job for the shower 2694 top_tars[0].extractall(path = rundir) 2695 plotfiles = [] 2696 for i, file in enumerate(topfiles): 2697 if out_id=='TOP': 2698 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 2699 '%s%d.top' % (filename, i)) 2700 files.mv(pjoin(rundir, file), plotfile) 2701 elif out_id=='HWU': 2702 histogram_list=histograms.HwUList(pjoin(rundir,file)) 2703 histogram_list.output(pjoin(self.me_dir,'Events',self.run_name, 2704 '%s%d'% (filename,i)),format = 'gnuplot') 2705 try: 2706 misc.call(['gnuplot','%s%d.gnuplot' % (filename,i)],\ 2707 stdout=os.open(os.devnull, os.O_RDWR),\ 2708 stderr=os.open(os.devnull, os.O_RDWR),\ 2709 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2710 except Exception: 2711 pass 2712 plotfile=pjoin(self.me_dir,'Events',self.run_name, 2713 '%s%d.HwU'% (filename,i)) 2714 plotfiles.append(plotfile) 2715 2716 ffiles = 'files' 2717 have = 'have' 2718 if len(plotfiles) == 1: 2719 ffiles = 'file' 2720 have = 'has' 2721 2722 message = ('The %s %s %s been generated, with histograms in the' + \ 2723 ' %s, obtained by showering the parton-level' + \ 2724 ' file %s.gz with %s.') % (ffiles, ', '.join(plotfiles), have, \ 2725 hist_format, evt_file, shower) 2726 else: 2727 # many jobs for the shower have been run 2728 topfiles_set = set(topfiles) 2729 plotfiles = [] 2730 for j, top_tar in enumerate(top_tars): 2731 top_tar.extractall(path = rundir) 2732 for i, file in enumerate(topfiles_set): 2733 plotfile = pjoin(self.me_dir, 'Events', self.run_name, 2734 '%s%d__%d.%s' % (filename, i, j + 1,ext)) 2735 files.mv(pjoin(rundir, file), plotfile) 2736 plotfiles.append(plotfile) 2737 2738 # check if the user asked to combine the .top into a single file 2739 if self.shower_card['combine_td']: 2740 misc.compile(['sum_plots'], cwd = pjoin(self.me_dir, 'Utilities')) 2741 2742 if self.banner.get('run_card', 'event_norm').lower() == 'sum': 2743 norm = 1. 2744 elif self.banner.get('run_card', 'event_norm').lower() == 'average': 2745 norm = 1./float(self.shower_card['nsplit_jobs']) 2746 2747 plotfiles2 = [] 2748 for i, file in enumerate(topfiles_set): 2749 filelist = ['%s%d__%d.%s' % (filename, i, j + 1,ext) \ 2750 for j in range(self.shower_card['nsplit_jobs'])] 2751 if out_id=='TOP': 2752 infile="%d\n%s\n%s\n" % \ 2753 (self.shower_card['nsplit_jobs'], 2754 '\n'.join(filelist), 2755 '\n'.join([str(norm)] * self.shower_card['nsplit_jobs'])) 2756 p = misc.Popen([pjoin(self.me_dir, 'Utilities', 'sum_plots')], 2757 stdin=subprocess.PIPE, 2758 stdout=os.open(os.devnull, os.O_RDWR), 2759 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2760 p.communicate(input = infile) 2761 files.mv(pjoin(self.me_dir, 'Events', self.run_name, 'sum.top'), 2762 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.top' % (filename, i))) 2763 elif out_id=='HWU': 2764 histogram_list=histograms.HwUList(plotfiles[0]) 2765 for ii, histo in enumerate(histogram_list): 2766 histogram_list[ii] = histo*norm 2767 for histo_path in plotfiles[1:]: 2768 for ii, histo in enumerate(histograms.HwUList(histo_path)): 2769 # First make sure the plots have the same weight labels and such 2770 histo.test_plot_compability(histogram_list[ii]) 2771 # Now let the histogram module do the magic and add them. 2772 histogram_list[ii] += histo*norm 2773 # And now output the finalized list 2774 histogram_list.output(pjoin(self.me_dir,'Events',self.run_name,'%s%d'% (filename, i)), 2775 format = 'gnuplot') 2776 try: 2777 misc.call(['gnuplot','%s%d.gnuplot' % (filename, i)],\ 2778 stdout=os.open(os.devnull, os.O_RDWR),\ 2779 stderr=os.open(os.devnull, os.O_RDWR),\ 2780 cwd=pjoin(self.me_dir, 'Events', self.run_name)) 2781 except Exception: 2782 pass 2783 2784 plotfiles2.append(pjoin(self.me_dir, 'Events', self.run_name, '%s%d.%s' % (filename, i,ext))) 2785 tar = tarfile.open( 2786 pjoin(self.me_dir, 'Events', self.run_name, '%s%d.tar.gz' % (filename, i)), 'w:gz') 2787 for f in filelist: 2788 tar.add(pjoin(self.me_dir, 'Events', self.run_name, f), arcname=f) 2789 files.rm([pjoin(self.me_dir, 'Events', self.run_name, f) for f in filelist]) 2790 2791 tar.close() 2792 2793 ffiles = 'files' 2794 have = 'have' 2795 if len(plotfiles2) == 1: 2796 ffiles = 'file' 2797 have = 'has' 2798 2799 message = ('The %s %s %s been generated, with histograms in the' + \ 2800 ' %s, obtained by showering the parton-level' + \ 2801 ' file %s.gz with %s.\n' + \ 2802 'The files from the different shower ' + \ 2803 'jobs (before combining them) can be found inside %s.') % \ 2804 (ffiles, ', '.join(plotfiles2), have, hist_format,\ 2805 evt_file, shower, 2806 ', '.join([f.replace('%s' % ext, 'tar.gz') for f in plotfiles2])) 2807 2808 else: 2809 message = ('The following files have been generated:\n %s\n' + \ 2810 'They contain histograms in the' + \ 2811 ' %s, obtained by showering the parton-level' + \ 2812 ' file %s.gz with %s.') % ('\n '.join(plotfiles), \ 2813 hist_format, evt_file, shower) 2814 2815 # Now arxiv the shower card used if RunMaterial is present 2816 run_dir_path = pjoin(rundir, self.run_name) 2817 if os.path.exists(pjoin(run_dir_path,'RunMaterial.tar.gz')): 2818 misc.call(['tar','-xzpf','RunMaterial.tar.gz'],cwd=run_dir_path) 2819 files.cp(pjoin(self.me_dir,'Cards','shower_card.dat'), 2820 pjoin(run_dir_path,'RunMaterial','shower_card_for_%s_%d.dat'\ 2821 %(shower, count))) 2822 misc.call(['tar','-czpf','RunMaterial.tar.gz','RunMaterial'], 2823 cwd=run_dir_path) 2824 shutil.rmtree(pjoin(run_dir_path,'RunMaterial')) 2825 # end of the run, gzip files and print out the message/warning 2826 for f in to_gzip: 2827 misc.gzip(f) 2828 if message: 2829 logger.info(message) 2830 if warning: 2831 logger.warning(warning) 2832 2833 self.update_status('Run complete', level='shower', update_results=True)
2834 2835 2836 ############################################################################
2837 - def set_run_name(self, name, tag=None, level='parton', reload_card=False):
2838 """define the run name, the run_tag, the banner and the results.""" 2839 2840 # when are we force to change the tag new_run:previous run requiring changes 2841 upgrade_tag = {'parton': ['parton','pythia','pgs','delphes','shower'], 2842 'pythia': ['pythia','pgs','delphes'], 2843 'shower': ['shower'], 2844 'pgs': ['pgs'], 2845 'delphes':['delphes'], 2846 'plot':[]} 2847 2848 2849 2850 if name == self.run_name: 2851 if reload_card: 2852 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 2853 self.run_card = banner_mod.RunCardNLO(run_card) 2854 2855 #check if we need to change the tag 2856 if tag: 2857 self.run_card['run_tag'] = tag 2858 self.run_tag = tag 2859 self.results.add_run(self.run_name, self.run_card) 2860 else: 2861 for tag in upgrade_tag[level]: 2862 if getattr(self.results[self.run_name][-1], tag): 2863 tag = self.get_available_tag() 2864 self.run_card['run_tag'] = tag 2865 self.run_tag = tag 2866 self.results.add_run(self.run_name, self.run_card) 2867 break 2868 return # Nothing to do anymore 2869 2870 # save/clean previous run 2871 if self.run_name: 2872 self.store_result() 2873 # store new name 2874 self.run_name = name 2875 2876 # Read run_card 2877 run_card = pjoin(self.me_dir, 'Cards','run_card.dat') 2878 self.run_card = banner_mod.RunCardNLO(run_card) 2879 2880 new_tag = False 2881 # First call for this run -> set the banner 2882 self.banner = banner_mod.recover_banner(self.results, level, self.run_name, tag) 2883 if tag: 2884 self.run_card['run_tag'] = tag 2885 new_tag = True 2886 elif not self.run_name in self.results and level =='parton': 2887 pass # No results yet, so current tag is fine 2888 elif not self.run_name in self.results: 2889 #This is only for case when you want to trick the interface 2890 logger.warning('Trying to run data on unknown run.') 2891 self.results.add_run(name, self.run_card) 2892 self.results.update('add run %s' % name, 'all', makehtml=True) 2893 else: 2894 for tag in upgrade_tag[level]: 2895 2896 if getattr(self.results[self.run_name][-1], tag): 2897 # LEVEL is already define in the last tag -> need to switch tag 2898 tag = self.get_available_tag() 2899 self.run_card['run_tag'] = tag 2900 new_tag = True 2901 break 2902 if not new_tag: 2903 # We can add the results to the current run 2904 tag = self.results[self.run_name][-1]['tag'] 2905 self.run_card['run_tag'] = tag # ensure that run_tag is correct 2906 2907 2908 if name in self.results and not new_tag: 2909 self.results.def_current(self.run_name) 2910 else: 2911 self.results.add_run(self.run_name, self.run_card) 2912 2913 self.run_tag = self.run_card['run_tag'] 2914 2915 # Return the tag of the previous run having the required data for this 2916 # tag/run to working wel. 2917 if level == 'parton': 2918 return 2919 elif level == 'pythia': 2920 return self.results[self.run_name][0]['tag'] 2921 else: 2922 for i in range(-1,-len(self.results[self.run_name])-1,-1): 2923 tagRun = self.results[self.run_name][i] 2924 if tagRun.pythia: 2925 return tagRun['tag']
2926 2927
2928 - def store_result(self):
2929 """ tar the pythia results. This is done when we are quite sure that 2930 the pythia output will not be use anymore """ 2931 2932 if not self.run_name: 2933 return 2934 2935 self.results.save() 2936 2937 if not self.to_store: 2938 return 2939 2940 tag = self.run_card['run_tag'] 2941 2942 self.to_store = []
2943 2944
2945 - def get_init_dict(self, evt_file):
2946 """reads the info in the init block and returns them in a dictionary""" 2947 ev_file = open(evt_file) 2948 init = "" 2949 found = False 2950 while True: 2951 line = ev_file.readline() 2952 if "<init>" in line: 2953 found = True 2954 elif found and not line.startswith('#'): 2955 init += line 2956 if "</init>" in line or "<event>" in line: 2957 break 2958 ev_file.close() 2959 2960 # IDBMUP(1),IDBMUP(2),EBMUP(1),EBMUP(2), PDFGUP(1),PDFGUP(2), 2961 # PDFSUP(1),PDFSUP(2),IDWTUP,NPRUP 2962 # these are not included (so far) in the init_dict 2963 # XSECUP(1),XERRUP(1),XMAXUP(1),LPRUP(1) 2964 2965 init_dict = {} 2966 init_dict['idbmup1'] = int(init.split()[0]) 2967 init_dict['idbmup2'] = int(init.split()[1]) 2968 init_dict['ebmup1'] = float(init.split()[2]) 2969 init_dict['ebmup2'] = float(init.split()[3]) 2970 init_dict['pdfgup1'] = int(init.split()[4]) 2971 init_dict['pdfgup2'] = int(init.split()[5]) 2972 init_dict['pdfsup1'] = int(init.split()[6]) 2973 init_dict['pdfsup2'] = int(init.split()[7]) 2974 init_dict['idwtup'] = int(init.split()[8]) 2975 init_dict['nprup'] = int(init.split()[9]) 2976 2977 return init_dict
2978 2979
2980 - def banner_to_mcatnlo(self, evt_file):
2981 """creates the mcatnlo input script using the values set in the header of the event_file. 2982 It also checks if the lhapdf library is used""" 2983 shower = self.banner.get('run_card', 'parton_shower').upper() 2984 pdlabel = self.banner.get('run_card', 'pdlabel') 2985 itry = 0 2986 nevents = self.shower_card['nevents'] 2987 init_dict = self.get_init_dict(evt_file) 2988 2989 if nevents < 0 or \ 2990 nevents > int(self.banner.get_detail('run_card', 'nevents')): 2991 nevents = int(self.banner.get_detail('run_card', 'nevents')) 2992 2993 nevents = nevents / self.shower_card['nsplit_jobs'] 2994 2995 mcmass_dict = {} 2996 for line in [l for l in self.banner['montecarlomasses'].split('\n') if l]: 2997 pdg = int(line.split()[0]) 2998 mass = float(line.split()[1]) 2999 mcmass_dict[pdg] = mass 3000 3001 content = 'EVPREFIX=%s\n' % pjoin(os.path.split(evt_file)[1]) 3002 content += 'NEVENTS=%d\n' % nevents 3003 content += 'NEVENTS_TOT=%d\n' % (int(self.banner.get_detail('run_card', 'nevents')) /\ 3004 self.shower_card['nsplit_jobs']) 3005 content += 'MCMODE=%s\n' % shower 3006 content += 'PDLABEL=%s\n' % pdlabel 3007 content += 'ALPHAEW=%s\n' % self.banner.get_detail('param_card', 'sminputs', 1).value 3008 #content += 'PDFSET=%s\n' % self.banner.get_detail('run_card', 'lhaid') 3009 #content += 'PDFSET=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 3010 content += 'TMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 6).value 3011 content += 'TWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 6).value 3012 content += 'ZMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 23).value 3013 content += 'ZWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 23).value 3014 content += 'WMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 24).value 3015 content += 'WWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 24).value 3016 try: 3017 content += 'HGGMASS=%s\n' % self.banner.get_detail('param_card', 'mass', 25).value 3018 content += 'HGGWIDTH=%s\n' % self.banner.get_detail('param_card', 'decay', 25).value 3019 except KeyError: 3020 content += 'HGGMASS=120.\n' 3021 content += 'HGGWIDTH=0.00575308848\n' 3022 content += 'beammom1=%s\n' % self.banner.get_detail('run_card', 'ebeam1') 3023 content += 'beammom2=%s\n' % self.banner.get_detail('run_card', 'ebeam2') 3024 content += 'BEAM1=%s\n' % self.banner.get_detail('run_card', 'lpp1') 3025 content += 'BEAM2=%s\n' % self.banner.get_detail('run_card', 'lpp2') 3026 content += 'DMASS=%s\n' % mcmass_dict[1] 3027 content += 'UMASS=%s\n' % mcmass_dict[2] 3028 content += 'SMASS=%s\n' % mcmass_dict[3] 3029 content += 'CMASS=%s\n' % mcmass_dict[4] 3030 content += 'BMASS=%s\n' % mcmass_dict[5] 3031 try: 3032 content += 'EMASS=%s\n' % mcmass_dict[11] 3033 content += 'MUMASS=%s\n' % mcmass_dict[13] 3034 content += 'TAUMASS=%s\n' % mcmass_dict[15] 3035 except KeyError: 3036 # this is for backward compatibility 3037 mcmass_lines = [l for l in \ 3038 open(pjoin(self.me_dir, 'SubProcesses', 'MCmasses_%s.inc' % shower.upper()) 3039 ).read().split('\n') if l] 3040 new_mcmass_dict = {} 3041 for l in mcmass_lines: 3042 key, val = l.split('=') 3043 new_mcmass_dict[key.strip()] = val.replace('d', 'e').strip() 3044 content += 'EMASS=%s\n' % new_mcmass_dict['mcmass(11)'] 3045 content += 'MUMASS=%s\n' % new_mcmass_dict['mcmass(13)'] 3046 content += 'TAUMASS=%s\n' % new_mcmass_dict['mcmass(15)'] 3047 3048 content += 'GMASS=%s\n' % mcmass_dict[21] 3049 content += 'EVENT_NORM=%s\n' % self.banner.get_detail('run_card', 'event_norm').lower() 3050 # check if need to link lhapdf 3051 if int(self.shower_card['pdfcode']) > 1 or \ 3052 (pdlabel=='lhapdf' and int(self.shower_card['pdfcode'])==1): 3053 # Use LHAPDF (should be correctly installed, because 3054 # either events were already generated with them, or the 3055 # user explicitly gives an LHAPDF number in the 3056 # shower_card). 3057 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 3058 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 3059 stdout = subprocess.PIPE).stdout.read().strip() 3060 content += 'LHAPDFPATH=%s\n' % lhapdfpath 3061 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 3062 if self.shower_card['pdfcode']==1: 3063 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 3064 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 3065 else: 3066 lhaid_list = [abs(int(self.shower_card['pdfcode']))] 3067 content += 'PDFCODE=%s\n' % self.shower_card['pdfcode'] 3068 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 3069 elif int(self.shower_card['pdfcode'])==1: 3070 # Try to use LHAPDF because user wants to use the same PDF 3071 # as was used for the event generation. However, for the 3072 # event generation, LHAPDF was not used, so non-trivial to 3073 # see if if LHAPDF is available with the corresponding PDF 3074 # set. If not found, give a warning and use build-in PDF 3075 # set instead. 3076 try: 3077 lhapdfpath = subprocess.Popen([self.options['lhapdf'], '--prefix'], 3078 stdout = subprocess.PIPE).stdout.read().strip() 3079 self.link_lhapdf(pjoin(self.me_dir, 'lib')) 3080 content += 'LHAPDFPATH=%s\n' % lhapdfpath 3081 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 3082 lhaid_list = [max([init_dict['pdfsup1'],init_dict['pdfsup2']])] 3083 content += 'PDFCODE=%s\n' % max([init_dict['pdfsup1'],init_dict['pdfsup2']]) 3084 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 3085 except Exception: 3086 logger.warning('Trying to shower events using the same PDF in the shower as used in the generation'+\ 3087 ' of the events using LHAPDF. However, no valid LHAPDF installation found with the'+\ 3088 ' needed PDF set. Will use default internal PDF for the shower instead. To use the'+\ 3089 ' same set as was used in the event generation install LHAPDF and set the path using'+\ 3090 ' "set /path_to_lhapdf/bin/lhapdf-config" from the MadGraph5_aMC@NLO python shell') 3091 content += 'LHAPDFPATH=\n' 3092 content += 'PDFCODE=0\n' 3093 else: 3094 content += 'LHAPDFPATH=\n' 3095 content += 'PDFCODE=0\n' 3096 3097 content += 'ICKKW=%s\n' % self.banner.get_detail('run_card', 'ickkw') 3098 content += 'PTJCUT=%s\n' % self.banner.get_detail('run_card', 'ptj') 3099 # add the pythia8/hwpp path(s) 3100 if self.options['pythia8_path']: 3101 content+='PY8PATH=%s\n' % self.options['pythia8_path'] 3102 if self.options['hwpp_path']: 3103 content+='HWPPPATH=%s\n' % self.options['hwpp_path'] 3104 if self.options['thepeg_path']: 3105 content+='THEPEGPATH=%s\n' % self.options['thepeg_path'] 3106 if self.options['hepmc_path']: 3107 content+='HEPMCPATH=%s\n' % self.options['hepmc_path'] 3108 3109 output = open(pjoin(self.me_dir, 'MCatNLO', 'banner.dat'), 'w') 3110 output.write(content) 3111 output.close() 3112 return shower
3113 3114
3115 - def run_reweight(self, only):
3116 """runs the reweight_xsec_events eecutables on each sub-event file generated 3117 to compute on the fly scale and/or PDF uncertainities""" 3118 logger.info(' Doing reweight') 3119 3120 nev_unw = pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted') 3121 # if only doing reweight, copy back the nevents_unweighted file 3122 if only: 3123 if os.path.exists(nev_unw + '.orig'): 3124 files.cp(nev_unw + '.orig', nev_unw) 3125 else: 3126 raise aMCatNLOError('Cannot find event file information') 3127 3128 #read the nevents_unweighted file to get the list of event files 3129 file = open(nev_unw) 3130 lines = file.read().split('\n') 3131 file.close() 3132 # make copy of the original nevent_unweighted file 3133 files.cp(nev_unw, nev_unw + '.orig') 3134 # loop over lines (all but the last one whith is empty) and check that the 3135 # number of events is not 0 3136 evt_files = [line.split()[0] for line in lines[:-1] if line.split()[1] != '0'] 3137 #prepare the job_dict 3138 job_dict = {} 3139 exe = 'reweight_xsec_events.local' 3140 for i, evt_file in enumerate(evt_files): 3141 path, evt = os.path.split(evt_file) 3142 files.ln(pjoin(self.me_dir, 'SubProcesses', exe), \ 3143 pjoin(self.me_dir, 'SubProcesses', path)) 3144 job_dict[path] = [exe] 3145 3146 self.run_all(job_dict, [[evt, '1']], 'Running reweight') 3147 3148 #check that the new event files are complete 3149 for evt_file in evt_files: 3150 last_line = subprocess.Popen(['tail', '-n1', '%s.rwgt' % \ 3151 pjoin(self.me_dir, 'SubProcesses', evt_file)], \ 3152 stdout = subprocess.PIPE).stdout.read().strip() 3153 if last_line != "</LesHouchesEvents>": 3154 raise aMCatNLOError('An error occurred during reweight. Check the' + \ 3155 '\'reweight_xsec_events.output\' files inside the ' + \ 3156 '\'SubProcesses/P*/G*/ directories for details') 3157 3158 #update file name in nevents_unweighted 3159 newfile = open(nev_unw, 'w') 3160 for line in lines: 3161 if line: 3162 newfile.write(line.replace(line.split()[0], line.split()[0] + '.rwgt') + '\n') 3163 newfile.close() 3164 3165 return self.pdf_scale_from_reweighting(evt_files)
3166
3167 - def pdf_scale_from_reweighting(self, evt_files):
3168 """This function takes the files with the scale and pdf values 3169 written by the reweight_xsec_events.f code 3170 (P*/G*/pdf_scale_dependence.dat) and computes the overall 3171 scale and PDF uncertainty (the latter is computed using the 3172 Hessian method (if lhaid<90000) or Gaussian (if lhaid>90000)) 3173 and returns it in percents. The expected format of the file 3174 is: n_scales xsec_scale_central xsec_scale1 ... n_pdf 3175 xsec_pdf0 xsec_pdf1 ....""" 3176 scale_pdf_info={} 3177 scales=[] 3178 pdfs=[] 3179 numofpdf = 0 3180 numofscales = 0 3181 for evt_file in evt_files: 3182 path, evt=os.path.split(evt_file) 3183 data_file=open(pjoin(self.me_dir, 'SubProcesses', path, 'scale_pdf_dependence.dat')).read() 3184 lines = data_file.replace("D", "E").split("\n") 3185 if not numofscales: 3186 numofscales = int(lines[0]) 3187 if not numofpdf: 3188 numofpdf = int(lines[2]) 3189 scales_this = [float(val) for val in lines[1].split()] 3190 pdfs_this = [float(val) for val in lines[3].split()] 3191 3192 if numofscales != len(scales_this) or numofpdf !=len(pdfs_this): 3193 # the +1 takes the 0th (central) set into account 3194 logger.info(data_file) 3195 logger.info((' Expected # of scales: %d\n'+ 3196 ' Found # of scales: %d\n'+ 3197 ' Expected # of pdfs: %d\n'+ 3198 ' Found # of pdfs: %d\n') % 3199 (numofscales, len(scales_this), numofpdf, len(pdfs_this))) 3200 raise aMCatNLOError('inconsistent scale_pdf_dependence.dat') 3201 if not scales: 3202 scales = [0.] * numofscales 3203 if not pdfs: 3204 pdfs = [0.] * numofpdf 3205 3206 scales = [a + b for a, b in zip(scales, scales_this)] 3207 pdfs = [a + b for a, b in zip(pdfs, pdfs_this)] 3208 3209 # get the central value 3210 if numofscales>0 and numofpdf==0: 3211 cntrl_val=scales[0] 3212 elif numofpdf>0 and numofscales==0: 3213 cntrl_val=pdfs[0] 3214 elif numofpdf>0 and numofscales>0: 3215 if abs(1-scales[0]/pdfs[0])>0.0001: 3216 raise aMCatNLOError('Central values for scale and PDF variation not identical') 3217 else: 3218 cntrl_val=scales[0] 3219 3220 # get the scale uncertainty in percent 3221 if numofscales>0: 3222 if cntrl_val != 0.0: 3223 # max and min of the full envelope 3224 scale_pdf_info['scale_upp'] = (max(scales)/cntrl_val-1)*100 3225 scale_pdf_info['scale_low'] = (1-min(scales)/cntrl_val)*100 3226 # ren and fac scale dependence added in quadrature 3227 scale_pdf_info['scale_upp_quad'] = ((cntrl_val+math.sqrt(math.pow(max(scales[0]-cntrl_val,scales[1]-cntrl_val,scales[2]-cntrl_val),2)+math.pow(max(scales[0]-cntrl_val,scales[3]-cntrl_val,scales[6]-cntrl_val),2)))/cntrl_val-1)*100 3228 scale_pdf_info['scale_low_quad'] = (1-(cntrl_val-math.sqrt(math.pow(min(scales[0]-cntrl_val,scales[1]-cntrl_val,scales[2]-cntrl_val),2)+math.pow(min(scales[0]-cntrl_val,scales[3]-cntrl_val,scales[6]-cntrl_val),2)))/cntrl_val)*100 3229 else: 3230 scale_pdf_info['scale_upp'] = 0.0 3231 scale_pdf_info['scale_low'] = 0.0 3232 3233 # get the pdf uncertainty in percent (according to the Hessian method) 3234 lhaid=int(self.run_card['lhaid']) 3235 pdf_upp=0.0 3236 pdf_low=0.0 3237 if lhaid <= 90000: 3238 # use Hessian method (CTEQ & MSTW) 3239 if numofpdf>1: 3240 for i in range(int(numofpdf/2)): 3241 pdf_upp=pdf_upp+math.pow(max(0.0,pdfs[2*i+1]-cntrl_val,pdfs[2*i+2]-cntrl_val),2) 3242 pdf_low=pdf_low+math.pow(max(0.0,cntrl_val-pdfs[2*i+1],cntrl_val-pdfs[2*i+2]),2) 3243 if cntrl_val != 0.0: 3244 scale_pdf_info['pdf_upp'] = math.sqrt(pdf_upp)/cntrl_val*100 3245 scale_pdf_info['pdf_low'] = math.sqrt(pdf_low)/cntrl_val*100 3246 else: 3247 scale_pdf_info['pdf_upp'] = 0.0 3248 scale_pdf_info['pdf_low'] = 0.0 3249 3250 else: 3251 # use Gaussian method (NNPDF) 3252 pdf_stdev=0.0 3253 for i in range(int(numofpdf-1)): 3254 pdf_stdev = pdf_stdev + pow(pdfs[i+1] - cntrl_val,2) 3255 pdf_stdev = math.sqrt(pdf_stdev/int(numofpdf-2)) 3256 if cntrl_val != 0.0: 3257 scale_pdf_info['pdf_upp'] = pdf_stdev/cntrl_val*100 3258 else: 3259 scale_pdf_info['pdf_upp'] = 0.0 3260 scale_pdf_info['pdf_low'] = scale_pdf_info['pdf_upp'] 3261 return scale_pdf_info
3262 3263
3264 - def wait_for_complete(self, run_type):
3265 """this function waits for jobs on cluster to complete their run.""" 3266 3267 starttime = time.time() 3268 #logger.info(' Waiting for submitted jobs to complete') 3269 update_status = lambda i, r, f: self.update_status((i, r, f, run_type), 3270 starttime=starttime, level='parton', update_results=True) 3271 try: 3272 self.cluster.wait(self.me_dir, update_status) 3273 except: 3274 self.cluster.remove() 3275 raise
3276
3277 - def run_all(self, job_dict, arg_list, run_type='monitor', split_jobs = False):
3278 """runs the jobs in job_dict (organized as folder: [job_list]), with arguments args""" 3279 njob_split = 0 3280 self.ijob = 0 3281 3282 # this is to keep track, if splitting evt generation, of the various 3283 # folders/args in order to resubmit the jobs if some of them fail 3284 self.split_folders = {} 3285 3286 if run_type != 'shower': 3287 self.njobs = sum(len(jobs) for jobs in job_dict.values()) * len(arg_list) 3288 for args in arg_list: 3289 for Pdir, jobs in job_dict.items(): 3290 for job in jobs: 3291 if not split_jobs: 3292 self.run_exe(job, args, run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 3293 else: 3294 for n in self.find_jobs_to_split(Pdir, job, args[1]): 3295 self.run_exe(job, args + [n], run_type, cwd=pjoin(self.me_dir, 'SubProcesses', Pdir) ) 3296 njob_split += 1 3297 # print some statistics if running serially 3298 if self.cluster_mode == 2: 3299 time.sleep(1) # security to allow all jobs to be launched 3300 if njob_split > 0: 3301 self.njobs = njob_split 3302 else: 3303 self.njobs = len(arg_list) 3304 for args in arg_list: 3305 [(cwd, exe)] = job_dict.items() 3306 self.run_exe(exe, args, run_type, cwd) 3307 3308 self.wait_for_complete(run_type)
3309 3310 3311
3312 - def check_event_files(self):
3313 """check the integrity of the event files after splitting, and resubmit 3314 those which are not nicely terminated""" 3315 to_resubmit = [] 3316 for dir in self.split_folders.keys(): 3317 last_line = '' 3318 try: 3319 last_line = subprocess.Popen( 3320 ['tail', '-n1', pjoin(dir, 'events.lhe')], \ 3321 stdout = subprocess.PIPE).stdout.read().strip() 3322 except IOError: 3323 pass 3324 3325 if last_line != "</LesHouchesEvents>": 3326 to_resubmit.append(dir) 3327 3328 self.njobs = 0 3329 if to_resubmit: 3330 run_type = 'Resubmitting broken jobs' 3331 logger.info('Some event files are broken, corresponding jobs will be resubmitted.') 3332 logger.debug('Resubmitting\n' + '\n'.join(to_resubmit) + '\n') 3333 for dir in to_resubmit: 3334 files.rm([dir]) 3335 job = self.split_folders[dir][0] 3336 args = self.split_folders[dir][1:] 3337 run_type = 'monitor' 3338 cwd = os.path.split(dir)[0] 3339 self.run_exe(job, args, run_type, cwd=cwd ) 3340 self.njobs +=1 3341 3342 self.wait_for_complete(run_type)
3343 3344
3345 - def find_jobs_to_split(self, pdir, job, arg):
3346 """looks into the nevents_unweighed_splitted file to check how many 3347 split jobs are needed for this (pdir, job). arg is F, B or V""" 3348 # find the number of the integration channel 3349 splittings = [] 3350 ajob = open(pjoin(self.me_dir, 'SubProcesses', pdir, job)).read() 3351 pattern = re.compile('for i in (\d+) ; do') 3352 match = re.search(pattern, ajob) 3353 channel = match.groups()[0] 3354 # then open the nevents_unweighted_splitted file and look for the 3355 # number of splittings to be done 3356 nevents_file = open(pjoin(self.me_dir, 'SubProcesses', 'nevents_unweighted_splitted')).read() 3357 # This skips the channels with zero events, because they are 3358 # not of the form GFXX_YY, but simply GFXX 3359 pattern = re.compile(r"%s_(\d+)/events.lhe" % \ 3360 pjoin(pdir, 'G%s%s' % (arg,channel))) 3361 matches = re.findall(pattern, nevents_file) 3362 for m in matches: 3363 splittings.append(m) 3364 return splittings
3365 3366
3367 - def run_exe(self, exe, args, run_type, cwd=None):
3368 """this basic function launch locally/on cluster exe with args as argument. 3369 """ 3370 3371 # first test that exe exists: 3372 execpath = None 3373 if cwd and os.path.exists(pjoin(cwd, exe)): 3374 execpath = pjoin(cwd, exe) 3375 elif not cwd and os.path.exists(exe): 3376 execpath = exe 3377 else: 3378 raise aMCatNLOError('Cannot find executable %s in %s' \ 3379 % (exe, os.getcwd())) 3380 # check that the executable has exec permissions 3381 if self.cluster_mode == 1 and not os.access(execpath, os.X_OK): 3382 subprocess.call(['chmod', '+x', exe], cwd=cwd) 3383 # finally run it 3384 if self.cluster_mode == 0: 3385 #this is for the serial run 3386 misc.call(['./'+exe] + args, cwd=cwd) 3387 self.ijob += 1 3388 self.update_status((max([self.njobs - self.ijob - 1, 0]), 3389 min([1, self.njobs - self.ijob]), 3390 self.ijob, run_type), level='parton') 3391 3392 #this is for the cluster/multicore run 3393 elif 'reweight' in exe: 3394 # a reweight run 3395 # Find the correct PDF input file 3396 input_files, output_files = [], [] 3397 pdfinput = self.get_pdf_input_filename() 3398 if os.path.exists(pdfinput): 3399 input_files.append(pdfinput) 3400 input_files.append(pjoin(os.path.dirname(exe), os.path.pardir, 'reweight_xsec_events')) 3401 input_files.append(pjoin(cwd, os.path.pardir, 'leshouche_info.dat')) 3402 input_files.append(args[0]) 3403 output_files.append('%s.rwgt' % os.path.basename(args[0])) 3404 output_files.append('reweight_xsec_events.output') 3405 output_files.append('scale_pdf_dependence.dat') 3406 3407 return self.cluster.submit2(exe, args, cwd=cwd, 3408 input_files=input_files, output_files=output_files, 3409 required_output=output_files) 3410 3411 elif 'ajob' in exe: 3412 # the 'standard' amcatnlo job 3413 # check if args is a list of string 3414 if type(args[0]) == str: 3415 input_files, output_files, required_output, args = self.getIO_ajob(exe,cwd, args) 3416 #submitting 3417 self.cluster.submit2(exe, args, cwd=cwd, 3418 input_files=input_files, output_files=output_files, 3419 required_output=required_output) 3420 3421 # keep track of folders and arguments for splitted evt gen 3422 subfolder=output_files[-1].split('/')[0] 3423 if len(args) == 4 and '_' in subfolder: 3424 self.split_folders[pjoin(cwd,subfolder)] = [exe] + args 3425 3426 elif 'shower' in exe: 3427 # a shower job 3428 # args are [shower, output(HEP or TOP), run_name] 3429 # cwd is the shower rundir, where the executable are found 3430 input_files, output_files = [], [] 3431 shower = args[0] 3432 # the input files 3433 if shower == 'PYTHIA8': 3434 input_files.append(pjoin(cwd, 'Pythia8.exe')) 3435 input_files.append(pjoin(cwd, 'Pythia8.cmd')) 3436 if os.path.exists(pjoin(self.options['pythia8_path'], 'xmldoc')): 3437 input_files.append(pjoin(cwd, 'config.sh')) 3438 input_files.append(pjoin(self.options['pythia8_path'], 'xmldoc')) 3439 else: 3440 input_files.append(pjoin(self.options['pythia8_path'], 'share/Pythia8/xmldoc')) 3441 else: 3442 input_files.append(pjoin(cwd, 'MCATNLO_%s_EXE' % shower)) 3443 input_files.append(pjoin(cwd, 'MCATNLO_%s_input' % shower)) 3444 if shower == 'HERWIGPP': 3445 input_files.append(pjoin(cwd, 'Herwig++')) 3446 input_files.append(pjoin(cwd, 'HepMCFortran.so')) 3447 if len(args) == 3: 3448 if os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')): 3449 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe.gz')) 3450 elif os.path.exists(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')): 3451 input_files.append(pjoin(self.me_dir, 'Events', self.run_name, 'events.lhe')) 3452 else: 3453 raise aMCatNLOError, 'Event file not present in %s' % \ 3454 pjoin(self.me_dir, 'Events', self.run_name) 3455 else: 3456 input_files.append(pjoin(cwd, 'events_%s.lhe' % args[3])) 3457 # the output files 3458 if len(args) == 3: 3459 output_files.append('mcatnlo_run.log') 3460 else: 3461 output_files.append('mcatnlo_run_%s.log' % args[3]) 3462 if args[1] == 'HEP': 3463 if len(args) == 3: 3464 fname = 'events' 3465 else: 3466 fname = 'events_%s' % args[3] 3467 if shower in ['PYTHIA8', 'HERWIGPP']: 3468 output_files.append(fname + '.hepmc.gz') 3469 else: 3470 output_files.append(fname + '.hep.gz') 3471 elif args[1] == 'TOP' or args[1] == 'HWU': 3472 if len(args) == 3: 3473 fname = 'histfile' 3474 else: 3475 fname = 'histfile_%s' % args[3] 3476 output_files.append(fname + '.tar') 3477 else: 3478 raise aMCatNLOError, 'Not a valid output argument for shower job : %d' % args[1] 3479 #submitting 3480 self.cluster.submit2(exe, args, cwd=cwd, 3481 input_files=input_files, output_files=output_files) 3482 3483 else: 3484 return self.cluster.submit(exe, args, cwd=cwd)
3485
3486 - def getIO_ajob(self,exe,cwd, args):
3487 # use local disk if possible => need to stands what are the 3488 # input/output files 3489 3490 keep_fourth_arg = False 3491 output_files = [] 3492 required_output = [] 3493 input_files = [pjoin(self.me_dir, 'SubProcesses', 'randinit'), 3494 pjoin(cwd, 'symfact.dat'), 3495 pjoin(cwd, 'iproc.dat'), 3496 pjoin(cwd, 'initial_states_map.dat'), 3497 pjoin(cwd, 'configs_and_props_info.dat'), 3498 pjoin(cwd, 'leshouche_info.dat'), 3499 pjoin(cwd, 'FKS_params.dat')] 3500 3501 # For GoSam interface, we must copy the SLHA card as well 3502 if os.path.exists(pjoin(self.me_dir,'OLP_virtuals','gosam.rc')): 3503 input_files.append(pjoin(self.me_dir, 'Cards', 'param_card.dat')) 3504 3505 if os.path.exists(pjoin(cwd,'nevents.tar')): 3506 input_files.append(pjoin(cwd,'nevents.tar')) 3507 3508 if os.path.exists(pjoin(self.me_dir,'SubProcesses','OLE_order.olc')): 3509 input_files.append(pjoin(cwd, 'OLE_order.olc')) 3510 3511 # File for the loop (might not be present if MadLoop is not used) 3512 if os.path.exists(pjoin(cwd,'MadLoop5_resources')) and \ 3513 cluster.need_transfer(self.options): 3514 input_files.append(pjoin(cwd, 'MadLoop5_resources.tar.gz')) 3515 tf=tarfile.open(pjoin(cwd,'MadLoop5_resources.tar.gz'),'w:gz', 3516 dereference=True) 3517 tf.add(pjoin(cwd,'MadLoop5_resources'),arcname='MadLoop5_resources') 3518 tf.close() 3519 3520 Ire = re.compile("for i in ([\d\s]*) ; do") 3521 try : 3522 fsock = open(exe) 3523 except IOError: 3524 fsock = open(pjoin(cwd,exe)) 3525 text = fsock.read() 3526 data = Ire.findall(text) 3527 subdir = ' '.join(data).split() 3528 3529 if args[0] == '0': 3530 # MADEVENT MINT FO MODE 3531 input_files.append(pjoin(cwd, 'madevent_mintFO')) 3532 input_files.append(pjoin(self.me_dir, 'SubProcesses','madin.%s' % args[1])) 3533 #j=$2\_G$i 3534 for i in subdir: 3535 current = '%s_G%s' % (args[1],i) 3536 if os.path.exists(pjoin(cwd,current)): 3537 input_files.append(pjoin(cwd, current)) 3538 output_files.append(current) 3539 3540 required_output.append('%s/results.dat' % current) 3541 required_output.append('%s/log.txt' % current) 3542 required_output.append('%s/mint_grids' % current) 3543 required_output.append('%s/grid.MC_integer' % current) 3544 if len(args) == 4: 3545 required_output.append('%s/scale_pdf_dependence.dat' % current) 3546 args[2] = '-1' 3547 # use a grid train on another part 3548 base = '%s_G%s' % (args[3],i) 3549 if args[0] == '0': 3550 to_move = ['grid.MC_integer','mint_grids'] 3551 elif args[0] == '1': 3552 to_move = ['mint_grids', 'grid.MC_integer'] 3553 else: 3554 to_move = [] 3555 if self.run_card['iappl'] == 2: 3556 for grid in glob.glob(pjoin(cwd,base,'grid_obs_*_in.root')): 3557 to_move.append(grid) 3558 if not os.path.exists(pjoin(cwd,current)): 3559 os.mkdir(pjoin(cwd,current)) 3560 input_files.append(pjoin(cwd, current)) 3561 for name in to_move: 3562 files.cp(pjoin(cwd,base, name), 3563 pjoin(cwd,current)) 3564 files.cp(pjoin(cwd,base, 'grid.MC_integer'), 3565 pjoin(cwd,current)) 3566 3567 elif args[0] == '2': 3568 # MINTMC MODE 3569 input_files.append(pjoin(cwd, 'madevent_mintMC')) 3570 if args[2] in ['0','2']: 3571 input_files.append(pjoin(self.me_dir, 'SubProcesses','madinMMC_%s.2' % args[1])) 3572 3573 for i in subdir: 3574 current = 'G%s%s' % (args[1], i) 3575 if os.path.exists(pjoin(cwd,current)): 3576 input_files.append(pjoin(cwd, current)) 3577 output_files.append(current) 3578 if len(args) == 4 and args[3] in ['H','S','V','B','F']: 3579 # use a grid train on another part 3580 base = '%s_%s' % (args[3],i) 3581 files.ln(pjoin(cwd,base,'mint_grids'), name = 'preset_mint_grids', 3582 starting_dir=pjoin(cwd,current)) 3583 files.ln(pjoin(cwd,base,'grid.MC_integer'), 3584 starting_dir=pjoin(cwd,current)) 3585 elif len(args) ==4: 3586 keep_fourth_arg = True 3587 # this is for the split event generation 3588 output_files.append('G%s%s_%s' % (args[1], i, args[3])) 3589 required_output.append('G%s%s_%s/log_MINT%s.txt' % (args[1], i, args[3],args[2])) 3590 3591 else: 3592 required_output.append('%s/log_MINT%s.txt' % (current,args[2])) 3593 if args[2] in ['0','1']: 3594 required_output.append('%s/results.dat' % current) 3595 if args[2] == '1': 3596 output_files.append('%s/results.dat' % current) 3597 3598 else: 3599 raise aMCatNLOError, 'not valid arguments: %s' %(', '.join(args)) 3600 3601 #Find the correct PDF input file 3602 pdfinput = self.get_pdf_input_filename() 3603 if os.path.exists(pdfinput): 3604 input_files.append(pdfinput) 3605 3606 if len(args) == 4 and not keep_fourth_arg: 3607 args = args[:3] 3608 3609 return input_files, output_files, required_output, args
3610
3611 - def write_madinMMC_file(self, path, run_mode, mint_mode):
3612 """writes the madinMMC_?.2 file""" 3613 #check the validity of the arguments 3614 run_modes = ['born', 'virt', 'novi', 'all', 'viSB', 'novB'] 3615 if run_mode not in run_modes: 3616 raise aMCatNLOError('%s is not a valid mode for run. Please use one of the following: %s' \ 3617 % (run_mode, ', '.join(run_modes))) 3618 mint_modes = [0, 1, 2] 3619 if mint_mode not in mint_modes: 3620 raise aMCatNLOError('%s is not a valid mode for mintMC. Please use one of the following: %s' \ 3621 % (mint_mode, ', '.join(mint_modes))) 3622 if run_mode in ['born']: 3623 name_suffix = 'B' 3624 elif run_mode in ['virt', 'viSB']: 3625 name_suffix = 'V' 3626 else: 3627 name_suffix = 'F' 3628 3629 content = \ 3630 """-1 12 ! points, iterations 3631 0.03 ! desired fractional accuracy 3632 1 -0.1 ! alpha, beta for Gsoft 3633 -1 -0.1 ! alpha, beta for Gazi 3634 1 ! Suppress amplitude (0 no, 1 yes)? 3635 1 ! Exact helicity sum (0 yes, n = number/event)? 3636 1 ! Enter Configuration Number: 3637 %1d ! MINT imode: 0 to set-up grids, 1 to perform integral, 2 generate events 3638 1 1 1 ! if imode is 1: Folding parameters for xi_i, phi_i and y_ij 3639 %s ! all, born, real, virt 3640 """ \ 3641 % (mint_mode, run_mode) 3642 file = open(pjoin(path, 'madinMMC_%s.2' % name_suffix), 'w') 3643 file.write(content) 3644 file.close()
3645
3646 - def write_madin_file(self, path, run_mode, vegas_mode, npoints, niters, accuracy='0'):
3647 """writes the madin.run_mode file""" 3648 #check the validity of the arguments 3649 run_modes = ['born', 'virt', 'novi', 'all', 'viSB', 'novB', 'grid'] 3650 if run_mode not in run_modes: 3651 raise aMCatNLOError('%s is not a valid mode for run. Please use one of the following: %s' \ 3652 % (run_mode, ', '.join(run_modes))) 3653 name_suffix = run_mode 3654 3655 content = \ 3656 """%s %s ! points, iterations 3657 %s ! accuracy 3658 2 ! 0 fixed grid 2 adjust 3659 1 ! 1 suppress amp, 0 doesnt 3660 1 ! 0 for exact hel sum 3661 1 ! hel configuration numb 3662 'test' 3663 1 ! 1 to save grids 3664 %s ! 0 to exclude, 1 for new run, 2 to restart, 3 to reset w/ keeping grid 3665 %s ! all, born, real, virt 3666 """ \ 3667 % (npoints,niters,accuracy,vegas_mode,run_mode) 3668 file = open(pjoin(path, 'madin.%s' % name_suffix), 'w') 3669 file.write(content) 3670 file.close()
3671
3672 - def compile(self, mode, options):
3673 """compiles aMC@NLO to compute either NLO or NLO matched to shower, as 3674 specified in mode""" 3675 3676 os.mkdir(pjoin(self.me_dir, 'Events', self.run_name)) 3677 3678 self.banner.write(pjoin(self.me_dir, 'Events', self.run_name, 3679 '%s_%s_banner.txt' % (self.run_name, self.run_tag))) 3680 3681 self.get_characteristics(pjoin(self.me_dir, 3682 'SubProcesses', 'proc_characteristics')) 3683 3684 #define a bunch of log files 3685 amcatnlo_log = pjoin(self.me_dir, 'compile_amcatnlo.log') 3686 madloop_log = pjoin(self.me_dir, 'compile_madloop.log') 3687 reweight_log = pjoin(self.me_dir, 'compile_reweight.log') 3688 test_log = pjoin(self.me_dir, 'test.log') 3689 3690 self.update_status('Compiling the code', level=None, update_results=True) 3691 3692 3693 libdir = pjoin(self.me_dir, 'lib') 3694 sourcedir = pjoin(self.me_dir, 'Source') 3695 3696 #clean files 3697 files.rm([amcatnlo_log, madloop_log, reweight_log, test_log]) 3698 #define which executable/tests to compile 3699 if '+' in mode: 3700 mode = mode.split('+')[0] 3701 if mode in ['NLO', 'LO']: 3702 exe = 'madevent_mintFO' 3703 tests = ['test_ME'] 3704 self.analyse_card.write_card(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts')) 3705 elif mode in ['aMC@NLO', 'aMC@LO','noshower','noshowerLO']: 3706 exe = 'madevent_mintMC' 3707 tests = ['test_ME', 'test_MC'] 3708 # write an analyse_opts with a dummy analysis so that compilation goes through 3709 open(pjoin(self.me_dir, 'SubProcesses', 'analyse_opts'),'w').write('FO_ANALYSE=analysis_dummy.o dbook.o open_output_files_dummy.o HwU_dummy.o\n') 3710 3711 #directory where to compile exe 3712 p_dirs = [d for d in \ 3713 open(pjoin(self.me_dir, 'SubProcesses', 'subproc.mg')).read().split('\n') if d] 3714 # create param_card.inc and run_card.inc 3715 self.do_treatcards('', amcatnlo=True) 3716 # if --nocompile option is specified, check here that all exes exists. 3717 # If they exists, return 3718 if all([os.path.exists(pjoin(self.me_dir, 'SubProcesses', p_dir, exe)) \ 3719 for p_dir in p_dirs]) and options['nocompile']: 3720 return 3721 3722 # rm links to lhapdflib/ PDFsets if exist 3723 if os.path.exists(pjoin(libdir, 'PDFsets')): 3724 files.rm(pjoin(libdir, 'PDFsets')) 3725 3726 # read the run_card to find if lhapdf is used or not 3727 if self.run_card['pdlabel'] == 'lhapdf' and \ 3728 (self.banner.get_detail('run_card', 'lpp1') != 0 or \ 3729 self.banner.get_detail('run_card', 'lpp2') != 0): 3730 3731 self.link_lhapdf(libdir, [pjoin('SubProcesses', p) for p in p_dirs]) 3732 pdfsetsdir = self.get_lhapdf_pdfsetsdir() 3733 lhaid_list = [int(self.run_card['lhaid'])] 3734 if self.run_card['reweight_PDF']: 3735 lhaid_list.append(int(self.run_card['PDF_set_min'])) 3736 lhaid_list.append(int(self.run_card['PDF_set_max'])) 3737 self.copy_lhapdf_set(lhaid_list, pdfsetsdir) 3738 3739 else: 3740 if self.run_card['lpp1'] == 1 == self.run_card['lpp2']: 3741 logger.info('Using built-in libraries for PDFs') 3742 if self.run_card['lpp1'] == 0 == self.run_card['lpp2']: 3743 logger.info('Lepton-Lepton collision: Ignoring \'pdlabel\' and \'lhaid\' in the run_card.') 3744 try: 3745 del os.environ['lhapdf'] 3746 except KeyError: 3747 pass 3748 3749 # read the run_card to find if applgrid is used or not 3750 if self.run_card['iappl'] != 0: 3751 os.environ['applgrid'] = 'True' 3752 # check versions of applgrid and amcfast 3753 for code in ['applgrid','amcfast']: 3754 try: 3755 p = subprocess.Popen([self.options[code], '--version'], \ 3756 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 3757 except OSError: 3758 raise aMCatNLOError(('No valid %s installation found. \n' + \ 3759 'Please set the path to %s-config by using \n' + \ 3760 'MG5_aMC> set <absolute-path-to-%s>/bin/%s-config \n') % (code,code,code,code)) 3761 else: 3762 output, _ = p.communicate() 3763 if code is 'applgrid' and output < '1.4.63': 3764 raise aMCatNLOError('Version of APPLgrid is too old. Use 1.4.69 or later.'\ 3765 +' You are using %s',output) 3766 if code is 'amcfast' and output < '1.1.1': 3767 raise aMCatNLOError('Version of aMCfast is too old. Use 1.1.1 or later.'\ 3768 +' You are using %s',output) 3769 3770 # set-up the Source/make_opts with the correct applgrid-config file 3771 appllibs=" APPLLIBS=$(shell %s --ldflags) $(shell %s --ldcflags) \n" \ 3772 % (self.options['amcfast'],self.options['applgrid']) 3773 text=open(pjoin(self.me_dir,'Source','make_opts'),'r').readlines() 3774 text_out=[] 3775 for line in text: 3776 if line.strip().startswith('APPLLIBS=$'): 3777 line=appllibs 3778 text_out.append(line) 3779 open(pjoin(self.me_dir,'Source','make_opts'),'w').writelines(text_out) 3780 else: 3781 try: 3782 del os.environ['applgrid'] 3783 except KeyError: 3784 pass 3785 3786 try: 3787 os.environ['fastjet_config'] = self.options['fastjet'] 3788 except (TypeError, KeyError): 3789 if 'fastjet_config' in os.environ: 3790 del os.environ['fastjet_config'] 3791 os.unsetenv('fastjet_config') 3792 3793 # make Source 3794 self.update_status('Compiling source...', level=None) 3795 misc.compile(['clean4pdf'], cwd = sourcedir) 3796 misc.compile(cwd = sourcedir) 3797 if os.path.exists(pjoin(libdir, 'libdhelas.a')) \ 3798 and os.path.exists(pjoin(libdir, 'libgeneric.a')) \ 3799 and os.path.exists(pjoin(libdir, 'libmodel.a')) \ 3800 and os.path.exists(pjoin(libdir, 'libpdf.a')): 3801 logger.info(' ...done, continuing with P* directories') 3802 else: 3803 raise aMCatNLOError('Compilation failed') 3804 3805 # make StdHep (only necessary with MG option output_dependencies='internal') 3806 MCatNLO_libdir = pjoin(self.me_dir, 'MCatNLO', 'lib') 3807 if not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libstdhep.a'))) or \ 3808 not os.path.exists(os.path.realpath(pjoin(MCatNLO_libdir, 'libFmcfio.a'))): 3809 if os.path.exists(pjoin(sourcedir,'StdHEP')): 3810 logger.info('Compiling StdHEP (can take a couple of minutes) ...') 3811 misc.compile(['StdHEP'], cwd = sourcedir) 3812 logger.info(' ...done.') 3813 else: 3814 raise aMCatNLOError('Could not compile StdHEP because its'+\ 3815 ' source directory could not be found in the SOURCE folder.\n'+\ 3816 " Check the MG5_aMC option 'output_dependencies.'") 3817 3818 # make CutTools (only necessary with MG option output_dependencies='internal') 3819 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 3820 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 3821 if os.path.exists(pjoin(sourcedir,'CutTools')): 3822 logger.info('Compiling CutTools (can take a couple of minutes) ...') 3823 misc.compile(['CutTools'], cwd = sourcedir) 3824 logger.info(' ...done.') 3825 else: 3826 raise aMCatNLOError('Could not compile CutTools because its'+\ 3827 ' source directory could not be found in the SOURCE folder.\n'+\ 3828 " Check the MG5_aMC option 'output_dependencies.'") 3829 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \ 3830 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))): 3831 raise aMCatNLOError('CutTools compilation failed.') 3832 3833 # Verify compatibility between current compiler and the one which was 3834 # used when last compiling CutTools (if specified). 3835 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 3836 libdir, 'libcts.a')))),'compiler_version.log') 3837 if os.path.exists(compiler_log_path): 3838 compiler_version_used = open(compiler_log_path,'r').read() 3839 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 3840 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 3841 if os.path.exists(pjoin(sourcedir,'CutTools')): 3842 logger.info('CutTools was compiled with a different fortran'+\ 3843 ' compiler. Re-compiling it now...') 3844 misc.compile(['cleanCT'], cwd = sourcedir) 3845 misc.compile(['CutTools'], cwd = sourcedir) 3846 logger.info(' ...done.') 3847 else: 3848 raise aMCatNLOError("CutTools installation in %s"\ 3849 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\ 3850 " seems to have been compiled with a different compiler than"+\ 3851 " the one specified in MG5_aMC. Please recompile CutTools.") 3852 3853 # make IREGI (only necessary with MG option output_dependencies='internal') 3854 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libiregi.a'))) \ 3855 and os.path.exists(pjoin(sourcedir,'IREGI')): 3856 logger.info('Compiling IREGI (can take a couple of minutes) ...') 3857 misc.compile(['IREGI'], cwd = sourcedir) 3858 logger.info(' ...done.') 3859 3860 if os.path.exists(pjoin(libdir, 'libiregi.a')): 3861 # Verify compatibility between current compiler and the one which was 3862 # used when last compiling IREGI (if specified). 3863 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin( 3864 libdir, 'libiregi.a')))),'compiler_version.log') 3865 if os.path.exists(compiler_log_path): 3866 compiler_version_used = open(compiler_log_path,'r').read() 3867 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\ 3868 pjoin(sourcedir,'make_opts')))) in compiler_version_used: 3869 if os.path.exists(pjoin(sourcedir,'IREGI')): 3870 logger.info('IREGI was compiled with a different fortran'+\ 3871 ' compiler. Re-compiling it now...') 3872 misc.compile(['cleanIR'], cwd = sourcedir) 3873 misc.compile(['IREGI'], cwd = sourcedir) 3874 logger.info(' ...done.') 3875 else: 3876 raise aMCatNLOError("IREGI installation in %s"\ 3877 %os.path.realpath(pjoin(libdir, 'libiregi.a'))+\ 3878 " seems to have been compiled with a different compiler than"+\ 3879 " the one specified in MG5_aMC. Please recompile IREGI.") 3880 3881 # check if MadLoop virtuals have been generated 3882 if self.proc_characteristics['has_loops'] and \ 3883 not os.path.exists(pjoin(self.me_dir,'OLP_virtuals')): 3884 os.environ['madloop'] = 'true' 3885 if mode in ['NLO', 'aMC@NLO', 'noshower']: 3886 tests.append('check_poles') 3887 else: 3888 os.unsetenv('madloop') 3889 3890 # make and run tests (if asked for), gensym and make madevent in each dir 3891 self.update_status('Compiling directories...', level=None) 3892 3893 for test in tests: 3894 self.write_test_input(test) 3895 3896 try: 3897 import multiprocessing 3898 if not self.nb_core: 3899 try: 3900 self.nb_core = int(self.options['nb_core']) 3901 except TypeError: 3902 self.nb_core = multiprocessing.cpu_count() 3903 except ImportError: 3904 self.nb_core = 1 3905 3906 compile_options = copy.copy(self.options) 3907 compile_options['nb_core'] = self.nb_core 3908 compile_cluster = cluster.MultiCore(**compile_options) 3909 logger.info('Compiling on %d cores' % self.nb_core) 3910 3911 update_status = lambda i, r, f: self.donothing(i,r,f) 3912 for p_dir in p_dirs: 3913 compile_cluster.submit(prog = compile_dir, 3914 argument = [self.me_dir, p_dir, mode, options, 3915 tests, exe, self.options['run_mode']]) 3916 try: 3917 compile_cluster.wait(self.me_dir, update_status) 3918 except Exception, error: 3919 logger.warning("Fail to compile the Subprocesses") 3920 if __debug__: 3921 raise 3922 compile_cluster.remove() 3923 self.do_quit('') 3924 3925 logger.info('Checking test output:') 3926 for p_dir in p_dirs: 3927 logger.info(p_dir) 3928 for test in tests: 3929 logger.info(' Result for %s:' % test) 3930 3931 this_dir = pjoin(self.me_dir, 'SubProcesses', p_dir) 3932 #check that none of the tests failed 3933 self.check_tests(test, this_dir)
3934 3935
3936 - def donothing(*args):
3937 pass
3938 3939
3940 - def check_tests(self, test, dir):
3941 """just call the correct parser for the test log. 3942 Skip check_poles for LOonly folders""" 3943 if test in ['test_ME', 'test_MC']: 3944 return self.parse_test_mx_log(pjoin(dir, '%s.log' % test)) 3945 elif test == 'check_poles' and not os.path.exists(pjoin(dir,'parton_lum_0.f')): 3946 return self.parse_check_poles_log(pjoin(dir, '%s.log' % test))
3947 3948
3949 - def parse_test_mx_log(self, log):
3950 """read and parse the test_ME/MC.log file""" 3951 content = open(log).read() 3952 if 'FAILED' in content: 3953 logger.info('Output of the failing test:\n'+content[:-1],'$MG:color:BLACK') 3954 raise aMCatNLOError('Some tests failed, run cannot continue.\n' + \ 3955 'Please check that widths of final state particles (e.g. top) have been' + \ 3956 ' set to 0 in the param_card.dat.') 3957 else: 3958 lines = [l for l in content.split('\n') if 'PASSED' in l] 3959 logger.info(' Passed.') 3960 logger.debug('\n'+'\n'.join(lines))
3961 3962
3963 - def parse_check_poles_log(self, log):
3964 """reads and parse the check_poles.log file""" 3965 content = open(log).read() 3966 npass = 0 3967 nfail = 0 3968 for line in content.split('\n'): 3969 if 'PASSED' in line: 3970 npass +=1 3971 tolerance = float(line.split()[1]) 3972 if 'FAILED' in line: 3973 nfail +=1 3974 tolerance = float(line.split()[1]) 3975 3976 if nfail + npass == 0: 3977 logger.warning('0 points have been tried') 3978 return 3979 3980 if float(nfail)/float(nfail+npass) > 0.1: 3981 raise aMCatNLOError('Poles do not cancel, run cannot continue') 3982 else: 3983 logger.info(' Poles successfully cancel for %d points over %d (tolerance=%2.1e)' \ 3984 %(npass, nfail+npass, tolerance))
3985 3986
3987 - def write_test_input(self, test):
3988 """write the input files to run test_ME/MC or check_poles""" 3989 if test in ['test_ME', 'test_MC']: 3990 content = "-2 -2\n" #generate randomly energy/angle 3991 content+= "100 100\n" #run 100 points for soft and collinear tests 3992 content+= "0\n" #sum over helicities 3993 content+= "0\n" #all FKS configs 3994 content+= '\n'.join(["-1"] * 50) #random diagram 3995 elif test == 'check_poles': 3996 content = '20 \n -1\n' 3997 3998 file = open(pjoin(self.me_dir, '%s_input.txt' % test), 'w') 3999 if test == 'test_MC': 4000 shower = self.run_card['parton_shower'] 4001 MC_header = "%s\n " % shower + \ 4002 "1 \n1 -0.1\n-1 -0.1\n" 4003 file.write(MC_header + content) 4004 else: 4005 file.write(content) 4006 file.close()
4007 4008 4009 4010 ############################################################################
4011 - def find_model_name(self):
4012 """ return the model name """ 4013 if hasattr(self, 'model_name'): 4014 return self.model_name 4015 4016 model = 'sm' 4017 proc = [] 4018 for line in open(os.path.join(self.me_dir,'Cards','proc_card_mg5.dat')): 4019 line = line.split('#')[0] 4020 #line = line.split('=')[0] 4021 if line.startswith('import') and 'model' in line: 4022 model = line.split()[2] 4023 proc = [] 4024 elif line.startswith('generate'): 4025 proc.append(line.split(None,1)[1]) 4026 elif line.startswith('add process'): 4027 proc.append(line.split(None,2)[2]) 4028 4029 self.model = model 4030 self.process = proc 4031 return model
4032 4033 4034 4035 ############################################################################
4036 - def ask_run_configuration(self, mode, options, switch={}):
4037 """Ask the question when launching generate_events/multi_run""" 4038 4039 if 'parton' not in options: 4040 options['parton'] = False 4041 if 'reweightonly' not in options: 4042 options['reweightonly'] = False 4043 4044 4045 void = 'NOT INSTALLED' 4046 switch_order = ['order', 'fixed_order', 'shower','madspin', 'reweight'] 4047 switch_default = {'order': 'NLO', 'fixed_order': 'OFF', 'shower': void, 4048 'madspin': void} 4049 if not switch: 4050 switch = switch_default 4051 else: 4052 switch.update(dict((k,value) for k,v in switch_default.items() if k not in switch)) 4053 4054 default_switch = ['ON', 'OFF'] 4055 allowed_switch_value = {'order': ['LO', 'NLO'], 4056 'fixed_order': default_switch, 4057 'shower': default_switch, 4058 'madspin': default_switch, 4059 'reweight': default_switch} 4060 4061 description = {'order': 'Perturbative order of the calculation:', 4062 'fixed_order': 'Fixed order (no event generation and no MC@[N]LO matching):', 4063 'shower': 'Shower the generated events:', 4064 'madspin': 'Decay particles with the MadSpin module:', 4065 'reweight': 'Add weights to the events based on changing model parameters:'} 4066 4067 force_switch = {('shower', 'ON'): {'fixed_order': 'OFF'}, 4068 ('madspin', 'ON'): {'fixed_order':'OFF'}, 4069 ('reweight', 'ON'): {'fixed_order':'OFF'}, 4070 ('fixed_order', 'ON'): {'shower': 'OFF', 'madspin': 'OFF', 'reweight':'OFF'} 4071 } 4072 special_values = ['LO', 'NLO', 'aMC@NLO', 'aMC@LO', 'noshower', 'noshowerLO'] 4073 4074 assign_switch = lambda key, value: switch.__setitem__(key, value if switch[key] != void else void ) 4075 4076 4077 if mode == 'auto': 4078 mode = None 4079 if not mode and (options['parton'] or options['reweightonly']): 4080 mode = 'noshower' 4081 4082 # Init the switch value according to the current status 4083 available_mode = ['0', '1', '2'] 4084 available_mode.append('3') 4085 if os.path.exists(pjoin(self.me_dir, 'Cards', 'shower_card.dat')): 4086 switch['shower'] = 'ON' 4087 else: 4088 switch['shower'] = 'OFF' 4089 4090 if not aMCatNLO or self.options['mg5_path']: 4091 available_mode.append('4') 4092 if os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 4093 switch['madspin'] = 'ON' 4094 else: 4095 switch['madspin'] = 'OFF' 4096 if misc.has_f2py() or self.options['f2py_compiler']: 4097 available_mode.append('5') 4098 if os.path.exists(pjoin(self.me_dir,'Cards','reweight_card.dat')): 4099 switch['reweight'] = 'ON' 4100 else: 4101 switch['reweight'] = 'OFF' 4102 else: 4103 switch['reweight'] = 'Not available (requires NumPy)' 4104 4105 4106 if 'do_reweight' in options and options['do_reweight']: 4107 if switch['reweight'] == "OFF": 4108 switch['reweight'] = "ON" 4109 elif switch['reweight'] != "ON": 4110 logger.critical("Cannot run REWEIGHT: %s" % switch['reweight']) 4111 if 'do_madspin' in options and options['do_madspin']: 4112 if switch['madspin'] == "OFF": 4113 switch['madspin'] = 'ON' 4114 elif switch['madspin'] != "ON": 4115 logger.critical("Cannot run MadSpin module: %s" % switch['reweight']) 4116 4117 4118 answers = list(available_mode) + ['auto', 'done'] 4119 alias = {} 4120 for id, key in enumerate(switch_order): 4121 if switch[key] != void and switch[key] in allowed_switch_value[key]: 4122 answers += ['%s=%s' % (key, s) for s in allowed_switch_value[key]] 4123 #allow lower case for on/off 4124 alias.update(dict(('%s=%s' % (key, s.lower()), '%s=%s' % (key, s)) 4125 for s in allowed_switch_value[key])) 4126 answers += special_values 4127 4128 def create_question(switch): 4129 switch_format = " %i %-61s %12s=%s\n" 4130 question = "The following switches determine which operations are executed:\n" 4131 for id, key in enumerate(switch_order): 4132 question += switch_format % (id+1, description[key], key, switch[key]) 4133 question += ' Either type the switch number (1 to %s) to change its default setting,\n' % (id+1) 4134 question += ' or set any switch explicitly (e.g. type \'order=LO\' at the prompt)\n' 4135 question += ' Type \'0\', \'auto\', \'done\' or just press enter when you are done.\n' 4136 return question
4137 4138 4139 def modify_switch(mode, answer, switch): 4140 if '=' in answer: 4141 key, status = answer.split('=') 4142 switch[key] = status 4143 if (key, status) in force_switch: 4144 for key2, status2 in force_switch[(key, status)].items(): 4145 if switch[key2] not in [status2, void]: 4146 logger.info('For coherence \'%s\' is set to \'%s\'' 4147 % (key2, status2), '$MG:color:BLACK') 4148 switch[key2] = status2 4149 elif answer in ['0', 'auto', 'done']: 4150 return 4151 elif answer in special_values: 4152 logger.info('Enter mode value: Go to the related mode', '$MG:color:BLACK') 4153 #assign_switch('reweight', 'OFF') 4154 #assign_switch('madspin', 'OFF') 4155 if answer == 'LO': 4156 switch['order'] = 'LO' 4157 switch['fixed_order'] = 'ON' 4158 assign_switch('shower', 'OFF') 4159 elif answer == 'NLO': 4160 switch['order'] = 'NLO' 4161 switch['fixed_order'] = 'ON' 4162 assign_switch('shower', 'OFF') 4163 elif answer == 'aMC@NLO': 4164 switch['order'] = 'NLO' 4165 switch['fixed_order'] = 'OFF' 4166 assign_switch('shower', 'ON') 4167 elif answer == 'aMC@LO': 4168 switch['order'] = 'LO' 4169 switch['fixed_order'] = 'OFF' 4170 assign_switch('shower', 'ON') 4171 elif answer == 'noshower': 4172 switch['order'] = 'NLO' 4173 switch['fixed_order'] = 'OFF' 4174 assign_switch('shower', 'OFF') 4175 elif answer == 'noshowerLO': 4176 switch['order'] = 'LO' 4177 switch['fixed_order'] = 'OFF' 4178 assign_switch('shower', 'OFF') 4179 if mode: 4180 return 4181 return switch 4182 4183 4184 modify_switch(mode, self.last_mode, switch) 4185 if switch['madspin'] == 'OFF' and os.path.exists(pjoin(self.me_dir,'Cards','madspin_card.dat')): 4186 assign_switch('madspin', 'ON') 4187 4188 if not self.force: 4189 answer = '' 4190 while answer not in ['0', 'done', 'auto', 'onlyshower']: 4191 question = create_question(switch) 4192 if mode: 4193 answer = mode 4194 else: 4195 answer = self.ask(question, '0', answers, alias=alias) 4196 if answer.isdigit() and answer != '0': 4197 key = switch_order[int(answer) - 1] 4198 opt1 = allowed_switch_value[key][0] 4199 opt2 = allowed_switch_value[key][1] 4200 answer = '%s=%s' % (key, opt1 if switch[key] == opt2 else opt2) 4201 4202 if not modify_switch(mode, answer, switch): 4203 break 4204 4205 #assign the mode depending of the switch 4206 if not mode or mode == 'auto': 4207 if switch['order'] == 'LO': 4208 if switch['shower'] == 'ON': 4209 mode = 'aMC@LO' 4210 elif switch['fixed_order'] == 'ON': 4211 mode = 'LO' 4212 else: 4213 mode = 'noshowerLO' 4214 elif switch['order'] == 'NLO': 4215 if switch['shower'] == 'ON': 4216 mode = 'aMC@NLO' 4217 elif switch['fixed_order'] == 'ON': 4218 mode = 'NLO' 4219 else: 4220 mode = 'noshower' 4221 logger.info('will run in mode: %s' % mode) 4222 4223 if mode == 'noshower': 4224 logger.warning("""You have chosen not to run a parton shower. NLO events without showering are NOT physical. 4225 Please, shower the Les Houches events before using them for physics analyses.""") 4226 4227 4228 # specify the cards which are needed for this run. 4229 cards = ['param_card.dat', 'run_card.dat'] 4230 ignore = [] 4231 if mode in ['LO', 'NLO']: 4232 options['parton'] = True 4233 ignore = ['shower_card.dat', 'madspin_card.dat'] 4234 cards.append('FO_analyse_card.dat') 4235 else: 4236 if switch['madspin'] == 'ON': 4237 cards.append('madspin_card.dat') 4238 if switch['reweight'] == 'ON': 4239 cards.append('reweight_card.dat') 4240 if 'aMC@' in mode: 4241 cards.append('shower_card.dat') 4242 if mode == 'onlyshower': 4243 cards = ['shower_card.dat'] 4244 if options['reweightonly']: 4245 cards = ['run_card.dat'] 4246 4247 self.keep_cards(cards, ignore) 4248 4249 if mode =='onlyshower': 4250 cards = ['shower_card.dat'] 4251 4252 if not options['force'] and not self.force: 4253 self.ask_edit_cards(cards, plot=False) 4254 4255 self.banner = banner_mod.Banner() 4256 4257 # store the cards in the banner 4258 for card in cards: 4259 self.banner.add(pjoin(self.me_dir, 'Cards', card)) 4260 # and the run settings 4261 run_settings = '\n'.join(['%s = %s' % (k, v) for (k, v) in switch.items()]) 4262 self.banner.add_text('run_settings', run_settings) 4263 4264 if not mode =='onlyshower': 4265 self.run_card = self.banner.charge_card('run_card') 4266 self.run_tag = self.run_card['run_tag'] 4267 #this is if the user did not provide a name for the current run 4268 if not hasattr(self, 'run_name') or not self.run_name: 4269 self.run_name = self.find_available_run_name(self.me_dir) 4270 #add a tag in the run_name for distinguish run_type 4271 if self.run_name.startswith('run_'): 4272 if mode in ['LO','aMC@LO','noshowerLO']: 4273 self.run_name += '_LO' 4274 self.set_run_name(self.run_name, self.run_tag, 'parton') 4275 if int(self.run_card['ickkw']) == 3 and mode in ['LO', 'aMC@LO', 'noshowerLO']: 4276 raise self.InvalidCmd("""FxFx merging (ickkw=3) not allowed at LO""") 4277 elif int(self.run_card['ickkw']) == 3 and mode in ['aMC@NLO', 'noshower']: 4278 logger.warning("""You are running with FxFx merging enabled. To be able to merge 4279 samples of various multiplicities without double counting, you 4280 have to remove some events after showering 'by hand'. Please 4281 read http://amcatnlo.cern.ch/FxFx_merging.htm for more details.""") 4282 if self.run_card['parton_shower'].upper() == 'PYTHIA6Q': 4283 raise self.InvalidCmd("""FxFx merging does not work with Q-squared ordered showers.""") 4284 elif self.run_card['parton_shower'].upper() != 'HERWIG6' and self.run_card['parton_shower'].upper() != 'PYTHIA8': 4285 question="FxFx merging not tested for %s shower. Do you want to continue?\n" % self.run_card['parton_shower'] + \ 4286 "Type \'n\' to stop or \'y\' to continue" 4287 answers = ['n','y'] 4288 answer = self.ask(question, 'n', answers, alias=alias) 4289 if answer == 'n': 4290 error = '''Stop opertation''' 4291 self.ask_run_configuration(mode, options) 4292 # raise aMCatNLOError(error) 4293 elif int(self.run_card['ickkw']) == -1 and mode in ['aMC@NLO', 'noshower']: 4294 # NNLL+NLO jet-veto only possible for LO event generation or fNLO runs. 4295 raise self.InvalidCmd("""NNLL+NLO jet veto runs (ickkw=-1) only possible for fNLO or LO.""") 4296 if 'aMC@' in mode or mode == 'onlyshower': 4297 self.shower_card = self.banner.charge_card('shower_card') 4298 4299 elif mode in ['LO', 'NLO']: 4300 analyse_card_path = pjoin(self.me_dir, 'Cards','FO_analyse_card.dat') 4301 self.analyse_card = self.banner.charge_card('FO_analyse_card') 4302 4303 4304 return mode 4305 4306 4307 #=============================================================================== 4308 # aMCatNLOCmd 4309 #===============================================================================
4310 -class aMCatNLOCmdShell(aMCatNLOCmd, cmd.CmdShell):
4311 """The command line processor of MadGraph"""
4312 4313 _compile_usage = "compile [MODE] [options]\n" + \ 4314 "-- compiles aMC@NLO \n" + \ 4315 " MODE can be either FO, for fixed-order computations, \n" + \ 4316 " or MC for matching with parton-shower monte-carlos. \n" + \ 4317 " (if omitted, it is set to MC)\n" 4318 _compile_parser = misc.OptionParser(usage=_compile_usage) 4319 _compile_parser.add_option("-f", "--force", default=False, action='store_true', 4320 help="Use the card present in the directory for the launch, without editing them") 4321 4322 _launch_usage = "launch [MODE] [options]\n" + \ 4323 "-- execute aMC@NLO \n" + \ 4324 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 4325 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 4326 " computation of the total cross-section and the filling of parton-level histograms \n" + \ 4327 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 4328 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 4329 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 4330 " in the run_card.dat\n" 4331 4332 _launch_parser = misc.OptionParser(usage=_launch_usage) 4333 _launch_parser.add_option("-f", "--force", default=False, action='store_true', 4334 help="Use the card present in the directory for the launch, without editing them") 4335 _launch_parser.add_option("-c", "--cluster", default=False, action='store_true', 4336 help="Submit the jobs on the cluster") 4337 _launch_parser.add_option("-m", "--multicore", default=False, action='store_true', 4338 help="Submit the jobs on multicore mode") 4339 _launch_parser.add_option("-x", "--nocompile", default=False, action='store_true', 4340 help="Skip compilation. Ignored if no executable is found") 4341 _launch_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 4342 help="Skip integration and event generation, just run reweight on the" + \ 4343 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 4344 _launch_parser.add_option("-p", "--parton", default=False, action='store_true', 4345 help="Stop the run after the parton level file generation (you need " + \ 4346 "to shower the file in order to get physical results)") 4347 _launch_parser.add_option("-o", "--only_generation", default=False, action='store_true', 4348 help="Skip grid set up, just generate events starting from " + \ 4349 "the last available results") 4350 _launch_parser.add_option("-n", "--name", default=False, dest='run_name', 4351 help="Provide a name to the run") 4352 _launch_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 4353 help="For use with APPLgrid only: start from existing grids") 4354 _launch_parser.add_option("-R", "--reweight", default=False, dest='do_reweight', action='store_true', 4355 help="Run the reweight module (reweighting by different model parameter") 4356 _launch_parser.add_option("-M", "--madspin", default=False, dest='do_madspin', action='store_true', 4357 help="Run the madspin package") 4358 4359 4360 4361 _generate_events_usage = "generate_events [MODE] [options]\n" + \ 4362 "-- execute aMC@NLO \n" + \ 4363 " MODE can be either LO, NLO, aMC@NLO or aMC@LO (if omitted, it is asked in a separate question)\n" + \ 4364 " If mode is set to LO/NLO, no event generation will be performed, but only the \n" + \ 4365 " computation of the total cross-section and the filling of parton-level histograms \n" + \ 4366 " specified in the DIRPATH/SubProcesses/madfks_plot.f file.\n" + \ 4367 " If mode is set to aMC@LO/aMC@NLO, after the cross-section computation, a .lhe \n" + \ 4368 " event file is generated which will be showered with the MonteCarlo specified \n" + \ 4369 " in the run_card.dat\n" 4370 4371 _generate_events_parser = misc.OptionParser(usage=_generate_events_usage) 4372 _generate_events_parser.add_option("-f", "--force", default=False, action='store_true', 4373 help="Use the card present in the directory for the generate_events, without editing them") 4374 _generate_events_parser.add_option("-c", "--cluster", default=False, action='store_true', 4375 help="Submit the jobs on the cluster") 4376 _generate_events_parser.add_option("-m", "--multicore", default=False, action='store_true', 4377 help="Submit the jobs on multicore mode") 4378 _generate_events_parser.add_option("-x", "--nocompile", default=False, action='store_true', 4379 help="Skip compilation. Ignored if no executable is found") 4380 _generate_events_parser.add_option("-r", "--reweightonly", default=False, action='store_true', 4381 help="Skip integration and event generation, just run reweight on the" + \ 4382 " latest generated event files (see list in SubProcesses/nevents_unweighted)") 4383 _generate_events_parser.add_option("-p", "--parton", default=False, action='store_true', 4384 help="Stop the run after the parton level file generation (you need " + \ 4385 "to shower the file in order to get physical results)") 4386 _generate_events_parser.add_option("-o", "--only_generation", default=False, action='store_true', 4387 help="Skip grid set up, just generate events starting from " + \ 4388 "the last available results") 4389 _generate_events_parser.add_option("-n", "--name", default=False, dest='run_name', 4390 help="Provide a name to the run") 4391 4392 4393 4394 _calculate_xsect_usage = "calculate_xsect [ORDER] [options]\n" + \ 4395 "-- calculate cross-section up to ORDER.\n" + \ 4396 " ORDER can be either LO or NLO (if omitted, it is set to NLO). \n" 4397 4398 _calculate_xsect_parser = misc.OptionParser(usage=_calculate_xsect_usage) 4399 _calculate_xsect_parser.add_option("-f", "--force", default=False, action='store_true', 4400 help="Use the card present in the directory for the launch, without editing them") 4401 _calculate_xsect_parser.add_option("-c", "--cluster", default=False, action='store_true', 4402 help="Submit the jobs on the cluster") 4403 _calculate_xsect_parser.add_option("-m", "--multicore", default=False, action='store_true', 4404 help="Submit the jobs on multicore mode") 4405 _calculate_xsect_parser.add_option("-x", "--nocompile", default=False, action='store_true', 4406 help="Skip compilation. Ignored if no executable is found") 4407 _calculate_xsect_parser.add_option("-n", "--name", default=False, dest='run_name', 4408 help="Provide a name to the run") 4409 _calculate_xsect_parser.add_option("-a", "--appl_start_grid", default=False, dest='appl_start_grid', 4410 help="For use with APPLgrid only: start from existing grids") 4411 _calculate_xsect_parser.add_option("-o", "--only_generation", default=False, action='store_true', 4412 help="Skip grid set up, just generate events starting from " + \ 4413 "the last available results") 4414 4415 _shower_usage = 'shower run_name [options]\n' + \ 4416 '-- do shower/hadronization on parton-level file generated for run run_name\n' + \ 4417 ' all the information (e.g. number of events, MonteCarlo, ...\n' + \ 4418 ' are directly read from the header of the event file\n' 4419 _shower_parser = misc.OptionParser(usage=_shower_usage) 4420 _shower_parser.add_option("-f", "--force", default=False, action='store_true', 4421 help="Use the shower_card present in the directory for the launch, without editing") 4422