Package madgraph :: Package iolibs :: Module export_fks
[hide private]
[frames] | no frames]

Source Code for Module madgraph.iolibs.export_fks

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Methods and classes to export matrix elements to fks format.""" 
  16   
  17  from distutils import dir_util 
  18  import glob 
  19  import logging 
  20  import os 
  21  import re 
  22  import shutil 
  23  import subprocess 
  24  import string 
  25  import copy 
  26  import platform 
  27   
  28  import madgraph.core.color_algebra as color 
  29  import madgraph.core.helas_objects as helas_objects 
  30  import madgraph.core.base_objects as base_objects 
  31  import madgraph.fks.fks_helas_objects as fks_helas_objects 
  32  import madgraph.fks.fks_base as fks 
  33  import madgraph.fks.fks_common as fks_common 
  34  import madgraph.iolibs.drawing_eps as draw 
  35  import madgraph.iolibs.gen_infohtml as gen_infohtml 
  36  import madgraph.iolibs.files as files 
  37  import madgraph.various.misc as misc 
  38  import madgraph.iolibs.file_writers as writers 
  39  import madgraph.iolibs.template_files as template_files 
  40  import madgraph.iolibs.ufo_expression_parsers as parsers 
  41  import madgraph.iolibs.export_v4 as export_v4 
  42  import madgraph.loop.loop_exporters as loop_exporters 
  43  import madgraph.various.q_polynomial as q_polynomial 
  44  import madgraph.various.banner as banner_mod 
  45   
  46  import aloha.create_aloha as create_aloha 
  47   
  48  import models.write_param_card as write_param_card 
  49  import models.check_param_card as check_param_card 
  50  from madgraph import MadGraph5Error, MG5DIR, InvalidCmd 
  51  from madgraph.iolibs.files import cp, ln, mv 
  52   
  53  pjoin = os.path.join 
  54   
  55  _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/' 
  56  logger = logging.getLogger('madgraph.export_fks') 
  57   
  58   
59 -def make_jpeg_async(args):
60 Pdir = args[0] 61 old_pos = args[1] 62 dir_path = args[2] 63 64 devnull = os.open(os.devnull, os.O_RDWR) 65 66 os.chdir(Pdir) 67 subprocess.call([os.path.join(old_pos, dir_path, 'bin', 'internal', 'gen_jpeg-pl')], 68 stdout = devnull) 69 os.chdir(os.path.pardir)
70 71 72 #================================================================================= 73 # Class for used of the (non-optimized) Loop process 74 #=================================================================================
75 -class ProcessExporterFortranFKS(loop_exporters.LoopProcessExporterFortranSA):
76 """Class to take care of exporting a set of matrix elements to 77 Fortran (v4) format.""" 78 79 #=============================================================================== 80 # copy the Template in a new directory. 81 #===============================================================================
82 - def copy_fkstemplate(self):
83 """create the directory run_name as a copy of the MadEvent 84 Template, and clean the directory 85 For now it is just the same as copy_v4template, but it will be modified 86 """ 87 88 mgme_dir = self.mgme_dir 89 dir_path = self.dir_path 90 clean =self.opt['clean'] 91 92 #First copy the full template tree if dir_path doesn't exit 93 if not os.path.isdir(dir_path): 94 if not mgme_dir: 95 raise MadGraph5Error, \ 96 "No valid MG_ME path given for MG4 run directory creation." 97 logger.info('initialize a new directory: %s' % \ 98 os.path.basename(dir_path)) 99 shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) 100 # distutils.dir_util.copy_tree since dir_path already exists 101 dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'),dir_path) 102 # Copy plot_card 103 for card in ['plot_card']: 104 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): 105 try: 106 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'), 107 pjoin(self.dir_path, 'Cards', card + '_default.dat')) 108 except IOError: 109 logger.warning("Failed to move " + card + ".dat to default") 110 111 elif not os.path.isfile(os.path.join(dir_path, 'TemplateVersion.txt')): 112 if not mgme_dir: 113 raise MadGraph5Error, \ 114 "No valid MG_ME path given for MG4 run directory creation." 115 try: 116 shutil.copy(os.path.join(mgme_dir, 'MGMEVersion.txt'), dir_path) 117 except IOError: 118 MG5_version = misc.get_pkg_info() 119 open(os.path.join(dir_path, 'MGMEVersion.txt'), 'w').write( \ 120 "5." + MG5_version['version']) 121 122 #Ensure that the Template is clean 123 if clean: 124 logger.info('remove old information in %s' % os.path.basename(dir_path)) 125 if os.environ.has_key('MADGRAPH_BASE'): 126 subprocess.call([os.path.join('bin', 'internal', 'clean_template'), 127 '--web'],cwd=dir_path) 128 else: 129 try: 130 subprocess.call([os.path.join('bin', 'internal', 'clean_template')], \ 131 cwd=dir_path) 132 except Exception, why: 133 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \ 134 % (os.path.basename(dir_path),why)) 135 #Write version info 136 MG_version = misc.get_pkg_info() 137 open(os.path.join(dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write( 138 MG_version['version']) 139 140 # We must link the CutTools to the Library folder of the active Template 141 self.link_CutTools(dir_path) 142 143 link_tir_libs=[] 144 tir_libs=[] 145 os.remove(os.path.join(self.dir_path,'SubProcesses','makefile_loop.inc')) 146 dirpath = os.path.join(self.dir_path, 'SubProcesses') 147 filename = pjoin(self.dir_path, 'SubProcesses','makefile_loop') 148 calls = self.write_makefile_TIR(writers.MakefileWriter(filename), 149 link_tir_libs,tir_libs) 150 os.remove(os.path.join(self.dir_path,'Source','make_opts.inc')) 151 filename = pjoin(self.dir_path, 'Source','make_opts') 152 calls = self.write_make_opts(writers.MakefileWriter(filename), 153 link_tir_libs,tir_libs) 154 155 # Duplicate run_card and FO_analyse_card 156 for card in ['FO_analyse_card', 'shower_card']: 157 try: 158 shutil.copy(pjoin(self.dir_path, 'Cards', 159 card + '.dat'), 160 pjoin(self.dir_path, 'Cards', 161 card + '_default.dat')) 162 except IOError: 163 logger.warning("Failed to copy " + card + ".dat to default") 164 165 cwd = os.getcwd() 166 dirpath = os.path.join(self.dir_path, 'SubProcesses') 167 try: 168 os.chdir(dirpath) 169 except os.error: 170 logger.error('Could not cd to directory %s' % dirpath) 171 return 0 172 173 # We add here the user-friendly MadLoop option setter. 174 cpfiles= ["SubProcesses/MadLoopParamReader.f", 175 "Cards/MadLoopParams.dat", 176 "SubProcesses/MadLoopParams.inc"] 177 178 for file in cpfiles: 179 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 180 os.path.join(self.dir_path, file)) 181 182 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), 183 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat')) 184 185 if os.path.exists(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')): 186 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.dir_path, 187 'Cards', 'MadLoopParams.dat')) 188 # write the output file 189 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses", 190 "MadLoopParams.dat")) 191 192 # We need minimal editing of MadLoopCommons.f 193 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 194 "SubProcesses","MadLoopCommons.inc")).read() 195 writer = writers.FortranWriter(os.path.join(self.dir_path, 196 "SubProcesses","MadLoopCommons.f")) 197 writer.writelines(MadLoopCommon%{ 198 'print_banner_commands':self.MadLoop_banner}, 199 context={'collier_available':False}) 200 writer.close() 201 202 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 203 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 204 writers.FortranWriter('cts_mpc.h')) 205 206 207 # Finally make sure to turn off MC over Hel for the default mode. 208 FKS_card_path = pjoin(self.dir_path,'Cards','FKS_params.dat') 209 FKS_card_file = open(FKS_card_path,'r') 210 FKS_card = FKS_card_file.read() 211 FKS_card_file.close() 212 FKS_card = re.sub(r"#NHelForMCoverHels\n-?\d+", 213 "#NHelForMCoverHels\n-1", FKS_card) 214 FKS_card_file = open(FKS_card_path,'w') 215 FKS_card_file.write(FKS_card) 216 FKS_card_file.close() 217 218 # Return to original PWD 219 os.chdir(cwd) 220 # Copy the different python files in the Template 221 self.copy_python_files() 222 223 # We need to create the correct open_data for the pdf 224 self.write_pdf_opendata()
225 226 # I put it here not in optimized one, because I want to use the same makefile_loop.inc 227 # Also, we overload this function (i.e. it is already defined in 228 # LoopProcessExporterFortranSA) because the path of the template makefile 229 # is different.
230 - def write_makefile_TIR(self, writer, link_tir_libs,tir_libs,tir_include=[]):
231 """ Create the file makefile_loop which links to the TIR libraries.""" 232 233 file = open(os.path.join(self.mgme_dir,'Template','NLO', 234 'SubProcesses','makefile_loop.inc')).read() 235 replace_dict={} 236 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 237 replace_dict['tir_libs']=' '.join(tir_libs) 238 replace_dict['dotf']='%.f' 239 replace_dict['doto']='%.o' 240 replace_dict['tir_include']=' '.join(tir_include) 241 file=file%replace_dict 242 if writer: 243 writer.writelines(file) 244 else: 245 return file
246 247 # I put it here not in optimized one, because I want to use the same make_opts.inc
248 - def write_make_opts(self, writer, link_tir_libs,tir_libs):
249 """ Create the file make_opts which links to the TIR libraries.""" 250 file = open(os.path.join(self.mgme_dir,'Template','NLO', 251 'Source','make_opts.inc')).read() 252 replace_dict={} 253 replace_dict['link_tir_libs']=' '.join(link_tir_libs) 254 replace_dict['tir_libs']=' '.join(tir_libs) 255 replace_dict['dotf']='%.f' 256 replace_dict['doto']='%.o' 257 file=file%replace_dict 258 if writer: 259 writer.writelines(file) 260 else: 261 return file
262 263 #=========================================================================== 264 # copy_python_files 265 #===========================================================================
266 - def copy_python_files(self):
267 """copy python files required for the Template""" 268 269 files_to_copy = [ \ 270 pjoin('interface','amcatnlo_run_interface.py'), 271 pjoin('interface','extended_cmd.py'), 272 pjoin('interface','common_run_interface.py'), 273 pjoin('interface','coloring_logging.py'), 274 pjoin('various','misc.py'), 275 pjoin('various','shower_card.py'), 276 pjoin('various','FO_analyse_card.py'), 277 pjoin('various','histograms.py'), 278 pjoin('various','banner.py'), 279 pjoin('various','cluster.py'), 280 pjoin('various','systematics.py'), 281 pjoin('various','lhe_parser.py'), 282 pjoin('madevent','sum_html.py'), 283 pjoin('madevent','gen_crossxhtml.py'), 284 pjoin('iolibs','files.py'), 285 pjoin('iolibs','save_load_object.py'), 286 pjoin('iolibs','file_writers.py'), 287 pjoin('..','models','check_param_card.py'), 288 pjoin('__init__.py') 289 ] 290 cp(_file_path+'/interface/.mg5_logging.conf', 291 self.dir_path+'/bin/internal/me5_logging.conf') 292 293 for cp_file in files_to_copy: 294 cp(pjoin(_file_path,cp_file), 295 pjoin(self.dir_path,'bin','internal',os.path.basename(cp_file)))
296
297 - def convert_model(self, model, wanted_lorentz = [], 298 wanted_couplings = []):
299 300 super(ProcessExporterFortranFKS,self).convert_model(model, 301 wanted_lorentz, wanted_couplings) 302 303 IGNORE_PATTERNS = ('*.pyc','*.dat','*.py~') 304 try: 305 shutil.rmtree(pjoin(self.dir_path,'bin','internal','ufomodel')) 306 except OSError as error: 307 pass 308 model_path = model.get('modelpath') 309 shutil.copytree(model_path, 310 pjoin(self.dir_path,'bin','internal','ufomodel'), 311 ignore=shutil.ignore_patterns(*IGNORE_PATTERNS)) 312 if hasattr(model, 'restrict_card'): 313 out_path = pjoin(self.dir_path, 'bin', 'internal','ufomodel', 314 'restrict_default.dat') 315 if isinstance(model.restrict_card, check_param_card.ParamCard): 316 model.restrict_card.write(out_path) 317 else: 318 files.cp(model.restrict_card, out_path)
319 320 321 322 #=========================================================================== 323 # write_maxparticles_file 324 #===========================================================================
325 - def write_maxparticles_file(self, writer, maxparticles):
326 """Write the maxparticles.inc file for MadEvent""" 327 328 lines = "integer max_particles, max_branch\n" 329 lines += "parameter (max_particles=%d) \n" % maxparticles 330 lines += "parameter (max_branch=max_particles-1)" 331 332 # Write the file 333 writer.writelines(lines) 334 335 return True
336 337 338 #=========================================================================== 339 # write_maxconfigs_file 340 #===========================================================================
341 - def write_maxconfigs_file(self, writer, maxconfigs):
342 """Write the maxconfigs.inc file for MadEvent""" 343 344 lines = "integer lmaxconfigs\n" 345 lines += "parameter (lmaxconfigs=%d)" % maxconfigs 346 347 # Write the file 348 writer.writelines(lines) 349 350 return True
351 352 353 #=============================================================================== 354 # write a procdef_mg5 (an equivalent of the MG4 proc_card.dat) 355 #===============================================================================
356 - def write_procdef_mg5(self, file_pos, modelname, process_str):
357 """ write an equivalent of the MG4 proc_card in order that all the Madevent 358 Perl script of MadEvent4 are still working properly for pure MG5 run.""" 359 360 proc_card_template = template_files.mg4_proc_card.mg4_template 361 process_template = template_files.mg4_proc_card.process_template 362 process_text = '' 363 coupling = '' 364 new_process_content = [] 365 366 # First find the coupling and suppress the coupling from process_str 367 #But first ensure that coupling are define whithout spaces: 368 process_str = process_str.replace(' =', '=') 369 process_str = process_str.replace('= ', '=') 370 process_str = process_str.replace(',',' , ') 371 #now loop on the element and treat all the coupling 372 for info in process_str.split(): 373 if '=' in info: 374 coupling += info + '\n' 375 else: 376 new_process_content.append(info) 377 # Recombine the process_str (which is the input process_str without coupling 378 #info) 379 process_str = ' '.join(new_process_content) 380 381 #format the SubProcess 382 process_text += process_template.substitute({'process': process_str, \ 383 'coupling': coupling}) 384 385 text = proc_card_template.substitute({'process': process_text, 386 'model': modelname, 387 'multiparticle':''}) 388 ff = open(file_pos, 'w') 389 ff.write(text) 390 ff.close()
391 392 393 #=============================================================================== 394 # write a initial states map, useful for the fast PDF NLO interface 395 #===============================================================================
396 - def write_init_map(self, file_pos, initial_states):
397 """ Write an initial state process map. Each possible PDF 398 combination gets an unique identifier.""" 399 400 text='' 401 for i,e in enumerate(initial_states): 402 text=text+str(i+1)+' '+str(len(e)) 403 for t in e: 404 text=text+' ' 405 try: 406 for p in t: 407 text=text+' '+str(p) 408 except TypeError: 409 text=text+' '+str(t) 410 text=text+'\n' 411 412 ff = open(file_pos, 'w') 413 ff.write(text) 414 ff.close()
415
416 - def get_ME_identifier(self, matrix_element, *args, **opts):
417 """ A function returning a string uniquely identifying the matrix 418 element given in argument so that it can be used as a prefix to all 419 MadLoop5 subroutines and common blocks related to it. This allows 420 to compile several processes into one library as requested by the 421 BLHA (Binoth LesHouches Accord) guidelines. The MadFKS design 422 necessitates that there is no process prefix.""" 423 424 return ''
425 426 #=============================================================================== 427 # write_coef_specs 428 #===============================================================================
429 - def write_coef_specs_file(self, virt_me_list):
430 """writes the coef_specs.inc in the DHELAS folder. Should not be called in the 431 non-optimized mode""" 432 raise fks_common.FKSProcessError(), \ 433 "write_coef_specs should be called only in the loop-optimized mode"
434 435 436 #=============================================================================== 437 # generate_directories_fks 438 #===============================================================================
439 - def generate_directories_fks(self, matrix_element, fortran_model, me_number, 440 me_ntot, path=os.getcwd(),OLP='MadLoop'):
441 """Generate the Pxxxxx_i directories for a subprocess in MadFKS, 442 including the necessary matrix.f and various helper files""" 443 proc = matrix_element.born_matrix_element['processes'][0] 444 445 if not self.model: 446 self.model = matrix_element.get('processes')[0].get('model') 447 448 cwd = os.getcwd() 449 try: 450 os.chdir(path) 451 except OSError, error: 452 error_msg = "The directory %s should exist in order to be able " % path + \ 453 "to \"export\" in it. If you see this error message by " + \ 454 "typing the command \"export\" please consider to use " + \ 455 "instead the command \"output\". " 456 raise MadGraph5Error, error_msg 457 458 calls = 0 459 460 self.fksdirs = [] 461 #first make and cd the direcrory corresponding to the born process: 462 borndir = "P%s" % \ 463 (matrix_element.get('processes')[0].shell_string()) 464 os.mkdir(borndir) 465 os.chdir(borndir) 466 logger.info('Writing files in %s (%d / %d)' % (borndir, me_number + 1, me_ntot)) 467 468 ## write the files corresponding to the born process in the P* directory 469 self.generate_born_fks_files(matrix_element, 470 fortran_model, me_number, path) 471 472 # With NJET you want to generate the order file per subprocess and most 473 # likely also generate it for each subproc. 474 if OLP=='NJET': 475 filename = 'OLE_order.lh' 476 self.write_lh_order(filename, [matrix_element.born_matrix_element.get('processes')[0]], OLP) 477 478 if matrix_element.virt_matrix_element: 479 calls += self.generate_virt_directory( \ 480 matrix_element.virt_matrix_element, \ 481 fortran_model, \ 482 os.path.join(path, borndir)) 483 484 #write the infortions for the different real emission processes 485 486 self.write_real_matrix_elements(matrix_element, fortran_model) 487 488 self.write_pdf_calls(matrix_element, fortran_model) 489 490 filename = 'nFKSconfigs.inc' 491 self.write_nfksconfigs_file(writers.FortranWriter(filename), 492 matrix_element, 493 fortran_model) 494 495 filename = 'iproc.dat' 496 self.write_iproc_file(writers.FortranWriter(filename), 497 me_number) 498 499 filename = 'fks_info.inc' 500 self.write_fks_info_file(writers.FortranWriter(filename), 501 matrix_element, 502 fortran_model) 503 504 filename = 'leshouche_info.dat' 505 nfksconfs,maxproc,maxflow,nexternal=\ 506 self.write_leshouche_info_file(filename,matrix_element) 507 508 # if no corrections are generated ([LOonly] mode), get 509 # these variables from the born 510 if nfksconfs == maxproc == maxflow == 0: 511 nfksconfs = 1 512 (dummylines, maxproc, maxflow) = self.get_leshouche_lines( 513 matrix_element.born_matrix_element, 1) 514 515 filename = 'leshouche_decl.inc' 516 self.write_leshouche_info_declarations( 517 writers.FortranWriter(filename), 518 nfksconfs,maxproc,maxflow,nexternal, 519 fortran_model) 520 filename = 'genps.inc' 521 ngraphs = matrix_element.born_matrix_element.get_number_of_amplitudes() 522 ncolor = max(1,len(matrix_element.born_matrix_element.get('color_basis'))) 523 self.write_genps(writers.FortranWriter(filename),maxproc,ngraphs,\ 524 ncolor,maxflow,fortran_model) 525 526 filename = 'configs_and_props_info.dat' 527 nconfigs,max_leg_number,nfksconfs=self.write_configs_and_props_info_file( 528 filename, 529 matrix_element) 530 531 filename = 'configs_and_props_decl.inc' 532 self.write_configs_and_props_info_declarations( 533 writers.FortranWriter(filename), 534 nconfigs,max_leg_number,nfksconfs, 535 fortran_model) 536 537 filename = 'real_from_born_configs.inc' 538 self.write_real_from_born_configs( 539 writers.FortranWriter(filename), 540 matrix_element, 541 fortran_model) 542 543 filename = 'ngraphs.inc' 544 self.write_ngraphs_file(writers.FortranWriter(filename), 545 nconfigs) 546 547 #write the wrappers 548 filename = 'real_me_chooser.f' 549 self.write_real_me_wrapper(writers.FortranWriter(filename), 550 matrix_element, 551 fortran_model) 552 553 filename = 'parton_lum_chooser.f' 554 self.write_pdf_wrapper(writers.FortranWriter(filename), 555 matrix_element, 556 fortran_model) 557 558 filename = 'get_color.f' 559 self.write_colors_file(writers.FortranWriter(filename), 560 matrix_element) 561 562 filename = 'nexternal.inc' 563 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 564 self.write_nexternal_file(writers.FortranWriter(filename), 565 nexternal, ninitial) 566 self.proc_characteristic['ninitial'] = ninitial 567 self.proc_characteristic['nexternal'] = max(self.proc_characteristic['nexternal'], nexternal) 568 569 filename = 'pmass.inc' 570 try: 571 self.write_pmass_file(writers.FortranWriter(filename), 572 matrix_element.real_processes[0].matrix_element) 573 except IndexError: 574 self.write_pmass_file(writers.FortranWriter(filename), 575 matrix_element.born_matrix_element) 576 577 #draw the diagrams 578 self.draw_feynman_diagrams(matrix_element) 579 580 linkfiles = ['BinothLHADummy.f', 581 'check_poles.f', 582 'MCmasses_HERWIG6.inc', 583 'MCmasses_HERWIGPP.inc', 584 'MCmasses_PYTHIA6Q.inc', 585 'MCmasses_PYTHIA6PT.inc', 586 'MCmasses_PYTHIA8.inc', 587 'add_write_info.f', 588 'coupl.inc', 589 'cuts.f', 590 'FKS_params.dat', 591 'initial_states_map.dat', 592 'OLE_order.olc', 593 'FKSParams.inc', 594 'FKSParamReader.f', 595 'cuts.inc', 596 'unlops.inc', 597 'pythia_unlops.f', 598 'driver_mintMC.f', 599 'driver_mintFO.f', 600 'appl_interface.cc', 601 'appl_interface_dummy.f', 602 'appl_common.inc', 603 'reweight_appl.inc', 604 'fastjetfortran_madfks_core.cc', 605 'fastjetfortran_madfks_full.cc', 606 'fjcore.cc', 607 'fastjet_wrapper.f', 608 'fjcore.hh', 609 'fks_Sij.f', 610 'fks_powers.inc', 611 'fks_singular.f', 612 'veto_xsec.f', 613 'veto_xsec.inc', 614 'weight_lines.f', 615 'fks_inc_chooser.f', 616 'leshouche_inc_chooser.f', 617 'configs_and_props_inc_chooser.f', 618 'genps_fks.f', 619 'boostwdir2.f', 620 'madfks_mcatnlo.inc', 621 'open_output_files.f', 622 'open_output_files_dummy.f', 623 'HwU_dummy.f', 624 'madfks_plot.f', 625 'analysis_dummy.f', 626 'analysis_lhe.f', 627 'mint-integrator2.f', 628 'MC_integer.f', 629 'mint.inc', 630 'montecarlocounter.f', 631 'q_es.inc', 632 'recluster.cc', 633 'Boosts.h', 634 'reweight_xsec.f', 635 'reweight_xsec_events.f', 636 'reweight_xsec_events_pdf_dummy.f', 637 'iproc_map.f', 638 'run.inc', 639 'run_card.inc', 640 'setcuts.f', 641 'setscales.f', 642 'test_soft_col_limits.f', 643 'symmetry_fks_v3.f', 644 'vegas2.for', 645 'write_ajob.f', 646 'handling_lhe_events.f', 647 'write_event.f', 648 'fill_MC_mshell.f', 649 'maxparticles.inc', 650 'message.inc', 651 'initcluster.f', 652 'cluster.inc', 653 'cluster.f', 654 'reweight.f', 655 'randinit', 656 'sudakov.inc', 657 'maxconfigs.inc', 658 'timing_variables.inc'] 659 660 for file in linkfiles: 661 ln('../' + file , '.') 662 os.system("ln -s ../../Cards/param_card.dat .") 663 664 #copy the makefile 665 os.system("ln -s ../makefile_fks_dir ./makefile") 666 if matrix_element.virt_matrix_element: 667 os.system("ln -s ../BinothLHA.f ./BinothLHA.f") 668 elif OLP!='MadLoop': 669 os.system("ln -s ../BinothLHA_OLP.f ./BinothLHA.f") 670 else: 671 os.system("ln -s ../BinothLHA_user.f ./BinothLHA.f") 672 673 674 #import nexternal/leshouches in Source 675 # ln('nexternal.inc', '../../Source', log=False) 676 # ln('born_leshouche.inc', '../../Source', log=False) 677 678 679 # Return to SubProcesses dir 680 os.chdir(os.path.pardir) 681 # Add subprocess to subproc.mg 682 filename = 'subproc.mg' 683 files.append_to_file(filename, 684 self.write_subproc, 685 borndir) 686 687 688 os.chdir(cwd) 689 # Generate info page 690 gen_infohtml.make_info_html_nlo(self.dir_path) 691 692 693 return calls
694 695 #=========================================================================== 696 # create the run_card 697 #===========================================================================
698 - def create_run_card(self, processes, history):
699 """ """ 700 701 run_card = banner_mod.RunCardNLO() 702 703 run_card.create_default_for_process(self.proc_characteristic, 704 history, 705 processes) 706 707 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card_default.dat')) 708 run_card.write(pjoin(self.dir_path, 'Cards', 'run_card.dat'))
709 710
711 - def pass_information_from_cmd(self, cmd):
712 """pass information from the command interface to the exporter. 713 Please do not modify any object of the interface from the exporter. 714 """ 715 self.proc_defs = cmd._curr_proc_defs 716 if hasattr(cmd,'born_processes'): 717 self.born_processes = cmd.born_processes 718 else: 719 self.born_processes = [] 720 return
721
722 - def finalize(self, matrix_elements, history, mg5options, flaglist):
723 """Finalize FKS directory by creating jpeg diagrams, html 724 pages,proc_card_mg5.dat and madevent.tar.gz and create the MA5 card if 725 necessary.""" 726 727 devnull = os.open(os.devnull, os.O_RDWR) 728 try: 729 res = misc.call([mg5options['lhapdf'], '--version'], \ 730 stdout=subprocess.PIPE, stderr=subprocess.PIPE) 731 except Exception: 732 res = 1 733 if res != 0: 734 logger.info('The value for lhapdf in the current configuration does not ' + \ 735 'correspond to a valid executable.\nPlease set it correctly either in ' + \ 736 'input/mg5_configuration or with "set lhapdf /path/to/lhapdf-config" ' + \ 737 'and regenrate the process. \nTo avoid regeneration, edit the ' + \ 738 ('%s/Cards/amcatnlo_configuration.txt file.\n' % self.dir_path ) + \ 739 'Note that you can still compile and run aMC@NLO with the built-in PDFs\n') 740 741 compiler_dict = {'fortran': mg5options['fortran_compiler'], 742 'cpp': mg5options['cpp_compiler'], 743 'f2py': mg5options['f2py_compiler']} 744 745 if 'nojpeg' in flaglist: 746 makejpg = False 747 else: 748 makejpg = True 749 output_dependencies = mg5options['output_dependencies'] 750 751 752 self.proc_characteristic['grouped_matrix'] = False 753 self.proc_characteristic['complex_mass_scheme'] = mg5options['complex_mass_scheme'] 754 755 self.create_proc_charac() 756 757 self.create_run_card(matrix_elements.get_processes(), history) 758 # modelname = self.model.get('name') 759 # if modelname == 'mssm' or modelname.startswith('mssm-'): 760 # param_card = os.path.join(self.dir_path, 'Cards','param_card.dat') 761 # mg5_param = os.path.join(self.dir_path, 'Source', 'MODEL', 'MG5_param.dat') 762 # check_param_card.convert_to_mg5card(param_card, mg5_param) 763 # check_param_card.check_valid_param_card(mg5_param) 764 765 # # write the model functions get_mass/width_from_id 766 filename = os.path.join(self.dir_path,'Source','MODEL','get_mass_width_fcts.f') 767 makeinc = os.path.join(self.dir_path,'Source','MODEL','makeinc.inc') 768 self.write_get_mass_width_file(writers.FortranWriter(filename), makeinc, self.model) 769 770 # # Write maxconfigs.inc based on max of ME's/subprocess groups 771 772 filename = os.path.join(self.dir_path,'Source','maxconfigs.inc') 773 self.write_maxconfigs_file(writers.FortranWriter(filename), 774 matrix_elements.get_max_configs()) 775 776 # # Write maxparticles.inc based on max of ME's/subprocess groups 777 filename = os.path.join(self.dir_path,'Source','maxparticles.inc') 778 self.write_maxparticles_file(writers.FortranWriter(filename), 779 matrix_elements.get_max_particles()) 780 781 # Touch "done" file 782 os.system('touch %s/done' % os.path.join(self.dir_path,'SubProcesses')) 783 784 # Check for compiler 785 fcompiler_chosen = self.set_fortran_compiler(compiler_dict) 786 ccompiler_chosen = self.set_cpp_compiler(compiler_dict['cpp']) 787 788 old_pos = os.getcwd() 789 os.chdir(os.path.join(self.dir_path, 'SubProcesses')) 790 P_dir_list = [proc for proc in os.listdir('.') if os.path.isdir(proc) and \ 791 proc[0] == 'P'] 792 793 devnull = os.open(os.devnull, os.O_RDWR) 794 # Convert the poscript in jpg files (if authorize) 795 if makejpg: 796 logger.info("Generate jpeg diagrams") 797 for Pdir in P_dir_list: 798 os.chdir(Pdir) 799 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_jpeg-pl')], 800 stdout = devnull) 801 os.chdir(os.path.pardir) 802 # 803 logger.info("Generate web pages") 804 # Create the WebPage using perl script 805 806 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], \ 807 stdout = devnull) 808 809 os.chdir(os.path.pardir) 810 # 811 # obj = gen_infohtml.make_info_html(self.dir_path) 812 # [mv(name, './HTML/') for name in os.listdir('.') if \ 813 # (name.endswith('.html') or name.endswith('.jpg')) and \ 814 # name != 'index.html'] 815 # if online: 816 # nb_channel = obj.rep_rule['nb_gen_diag'] 817 # open(os.path.join('./Online'),'w').write(str(nb_channel)) 818 819 # Write command history as proc_card_mg5 820 if os.path.isdir('Cards'): 821 output_file = os.path.join('Cards', 'proc_card_mg5.dat') 822 history.write(output_file) 823 824 # Duplicate run_card and FO_analyse_card 825 for card in ['run_card', 'FO_analyse_card', 'shower_card']: 826 try: 827 shutil.copy(pjoin(self.dir_path, 'Cards', 828 card + '.dat'), 829 pjoin(self.dir_path, 'Cards', 830 card + '_default.dat')) 831 except IOError: 832 logger.warning("Failed to copy " + card + ".dat to default") 833 834 835 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], 836 stdout = devnull) 837 838 # Run "make" to generate madevent.tar.gz file 839 if os.path.exists(pjoin('SubProcesses', 'subproc.mg')): 840 if os.path.exists('amcatnlo.tar.gz'): 841 os.remove('amcatnlo.tar.gz') 842 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'make_amcatnlo_tar')], 843 stdout = devnull) 844 # 845 subprocess.call([os.path.join(old_pos, self.dir_path, 'bin', 'internal', 'gen_cardhtml-pl')], 846 stdout = devnull) 847 848 #return to the initial dir 849 os.chdir(old_pos) 850 851 # Setup stdHep 852 # Find the correct fortran compiler 853 base_compiler= ['FC=g77','FC=gfortran'] 854 855 StdHep_path = pjoin(MG5DIR, 'vendor', 'StdHEP') 856 857 if output_dependencies == 'external': 858 # check if stdhep has to be compiled (only the first time) 859 if not os.path.exists(pjoin(MG5DIR, 'vendor', 'StdHEP', 'lib', 'libstdhep.a')) or \ 860 not os.path.exists(pjoin(MG5DIR, 'vendor', 'StdHEP', 'lib', 'libFmcfio.a')): 861 if 'FC' not in os.environ or not os.environ['FC']: 862 path = os.path.join(StdHep_path, 'src', 'make_opts') 863 text = open(path).read() 864 for base in base_compiler: 865 text = text.replace(base,'FC=%s' % fcompiler_chosen) 866 open(path, 'w').writelines(text) 867 868 logger.info('Compiling StdHEP. This has to be done only once.') 869 misc.compile(cwd = pjoin(MG5DIR, 'vendor', 'StdHEP')) 870 logger.info('Done.') 871 #then link the libraries in the exported dir 872 files.ln(pjoin(StdHep_path, 'lib', 'libstdhep.a'), \ 873 pjoin(self.dir_path, 'MCatNLO', 'lib')) 874 files.ln(pjoin(StdHep_path, 'lib', 'libFmcfio.a'), \ 875 pjoin(self.dir_path, 'MCatNLO', 'lib')) 876 877 elif output_dependencies == 'internal': 878 StdHEP_internal_path = pjoin(self.dir_path,'Source','StdHEP') 879 shutil.copytree(StdHep_path,StdHEP_internal_path, symlinks=True) 880 # Create the links to the lib folder 881 linkfiles = ['libstdhep.a', 'libFmcfio.a'] 882 for file in linkfiles: 883 ln(pjoin(os.path.pardir,os.path.pardir,'Source','StdHEP','lib',file), 884 os.path.join(self.dir_path, 'MCatNLO', 'lib')) 885 if 'FC' not in os.environ or not os.environ['FC']: 886 path = pjoin(StdHEP_internal_path, 'src', 'make_opts') 887 text = open(path).read() 888 for base in base_compiler: 889 text = text.replace(base,'FC=%s' % fcompiler_chosen) 890 open(path, 'w').writelines(text) 891 # To avoid compiler version conflicts, we force a clean here 892 misc.compile(['clean'],cwd = StdHEP_internal_path) 893 894 elif output_dependencies == 'environment_paths': 895 # Here the user chose to define the dependencies path in one of 896 # his environmental paths 897 libStdHep = misc.which_lib('libstdhep.a') 898 libFmcfio = misc.which_lib('libFmcfio.a') 899 if not libStdHep is None and not libFmcfio is None: 900 logger.info('MG5_aMC is using StdHep installation found at %s.'%\ 901 os.path.dirname(libStdHep)) 902 ln(pjoin(libStdHep),pjoin(self.dir_path, 'MCatNLO', 'lib'),abspath=True) 903 ln(pjoin(libFmcfio),pjoin(self.dir_path, 'MCatNLO', 'lib'),abspath=True) 904 else: 905 raise InvalidCmd("Could not find the location of the files"+\ 906 " libstdhep.a and libFmcfio.a in you environment paths.") 907 908 else: 909 raise MadGraph5Error, 'output_dependencies option %s not recognized'\ 910 %output_dependencies 911 912 # Create the default MadAnalysis5 cards 913 if 'madanalysis5_path' in self.opt and not \ 914 self.opt['madanalysis5_path'] is None and not self.proc_defs is None: 915 # When using 916 processes = sum([me.get('processes') if not isinstance(me, str) else [] \ 917 for me in matrix_elements.get('matrix_elements')],[]) 918 919 # Try getting the processes from the generation info directly if no ME are 920 # available (as it is the case for parallel generation 921 if len(processes)==0: 922 processes = self.born_processes 923 if len(processes)==0: 924 logger.warning( 925 """MG5aMC could not provide to Madanalysis5 the list of processes generated. 926 As a result, the default card will not be tailored to the process generated. 927 This typically happens when using the 'low_mem_multicore_nlo_generation' NLO generation mode.""") 928 # For now, simply assign all processes to each proc_defs. 929 # That shouldn't really affect the default analysis card created by MA5 930 self.create_default_madanalysis5_cards( 931 history, self.proc_defs, [processes,]*len(self.proc_defs), 932 self.opt['madanalysis5_path'], pjoin(self.dir_path,'Cards'), 933 levels =['hadron'])
934
935 - def write_real_from_born_configs(self, writer, matrix_element, fortran_model):
936 """Writes the real_from_born_configs.inc file that contains 937 the mapping to go for a given born configuration (that is used 938 e.g. in the multi-channel phase-space integration to the 939 corresponding real-emission diagram, i.e. the real emission 940 diagram in which the combined ij is split in i_fks and 941 j_fks.""" 942 lines=[] 943 lines2=[] 944 max_links=0 945 born_me=matrix_element.born_matrix_element 946 for iFKS, conf in enumerate(matrix_element.get_fks_info_list()): 947 iFKS=iFKS+1 948 links=conf['fks_info']['rb_links'] 949 max_links=max(max_links,len(links)) 950 for i,diags in enumerate(links): 951 if not i == diags['born_conf']: 952 print links 953 raise MadGraph5Error, "born_conf should be canonically ordered" 954 real_configs=', '.join(['%d' % int(diags['real_conf']+1) for diags in links]) 955 lines.append("data (real_from_born_conf(irfbc,%d),irfbc=1,%d) /%s/" \ 956 % (iFKS,len(links),real_configs)) 957 958 lines2.append("integer irfbc") 959 lines2.append("integer real_from_born_conf(%d,%d)" \ 960 % (max_links,len(matrix_element.get_fks_info_list()))) 961 # Write the file 962 writer.writelines(lines2+lines)
963 964 965 #=============================================================================== 966 # write_get_mass_width_file 967 #=============================================================================== 968 #test written
969 - def write_get_mass_width_file(self, writer, makeinc, model):
970 """Write the get_mass_width_file.f file for MG4. 971 Also update the makeinc.inc file 972 """ 973 mass_particles = [p for p in model['particles'] if p['mass'].lower() != 'zero'] 974 width_particles = [p for p in model['particles'] if p['width'].lower() != 'zero'] 975 976 iflines_mass = '' 977 iflines_width = '' 978 979 for i, part in enumerate(mass_particles): 980 if i == 0: 981 ifstring = 'if' 982 else: 983 ifstring = 'else if' 984 if part['self_antipart']: 985 iflines_mass += '%s (id.eq.%d) then\n' % \ 986 (ifstring, part.get_pdg_code()) 987 else: 988 iflines_mass += '%s (id.eq.%d.or.id.eq.%d) then\n' % \ 989 (ifstring, part.get_pdg_code(), part.get_anti_pdg_code()) 990 iflines_mass += 'get_mass_from_id=abs(%s)\n' % part.get('mass') 991 992 for i, part in enumerate(width_particles): 993 if i == 0: 994 ifstring = 'if' 995 else: 996 ifstring = 'else if' 997 if part['self_antipart']: 998 iflines_width += '%s (id.eq.%d) then\n' % \ 999 (ifstring, part.get_pdg_code()) 1000 else: 1001 iflines_width += '%s (id.eq.%d.or.id.eq.%d) then\n' % \ 1002 (ifstring, part.get_pdg_code(), part.get_anti_pdg_code()) 1003 iflines_width += 'get_width_from_id=abs(%s)\n' % part.get('width') 1004 1005 # Make sure it compiles with an if-statement if the above lists are empty 1006 if len(mass_particles)==0: 1007 iflines_mass = 'if (.True.) then\n' 1008 1009 if len(width_particles)==0: 1010 iflines_width = 'if (.True.) then\n' 1011 1012 replace_dict = {'iflines_mass' : iflines_mass, 1013 'iflines_width' : iflines_width} 1014 1015 file = open(os.path.join(_file_path, \ 1016 'iolibs/template_files/get_mass_width_fcts.inc')).read() 1017 file = file % replace_dict 1018 1019 # Write the file 1020 writer.writelines(file) 1021 1022 # update the makeinc 1023 makeinc_content = open(makeinc).read() 1024 makeinc_content = makeinc_content.replace('MODEL = ', 'MODEL = get_mass_width_fcts.o ') 1025 open(makeinc, 'w').write(makeinc_content) 1026 1027 return
1028 1029
1030 - def write_configs_and_props_info_declarations(self, writer, max_iconfig, max_leg_number, nfksconfs, fortran_model):
1031 """writes the declarations for the variables relevant for configs_and_props 1032 """ 1033 lines = [] 1034 lines.append("integer ifr,lmaxconfigs_used,max_branch_used") 1035 lines.append("parameter (lmaxconfigs_used=%4d)" % max_iconfig) 1036 lines.append("parameter (max_branch_used =%4d)" % -max_leg_number) 1037 lines.append("integer mapconfig_d(%3d,0:lmaxconfigs_used)" % nfksconfs) 1038 lines.append("integer iforest_d(%3d,2,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1039 lines.append("integer sprop_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1040 lines.append("integer tprid_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1041 lines.append("double precision pmass_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1042 lines.append("double precision pwidth_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1043 lines.append("integer pow_d(%3d,-max_branch_used:-1,lmaxconfigs_used)" % nfksconfs) 1044 1045 writer.writelines(lines)
1046 1047
1048 - def write_configs_and_props_info_file(self, filename, matrix_element):
1049 """writes the configs_and_props_info.inc file that cointains 1050 all the (real-emission) configurations (IFOREST) as well as 1051 the masses and widths of intermediate particles""" 1052 lines = [] 1053 lines.append("# C -> MAPCONFIG_D") 1054 lines.append("# F/D -> IFOREST_D") 1055 lines.append("# S -> SPROP_D") 1056 lines.append("# T -> TPRID_D") 1057 lines.append("# M -> PMASS_D/PWIDTH_D") 1058 lines.append("# P -> POW_D") 1059 lines2 = [] 1060 nconfs = len(matrix_element.get_fks_info_list()) 1061 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1062 1063 max_iconfig=0 1064 max_leg_number=0 1065 1066 for iFKS, conf in enumerate(matrix_element.get_fks_info_list()): 1067 iFKS=iFKS+1 1068 iconfig = 0 1069 s_and_t_channels = [] 1070 mapconfigs = [] 1071 fks_matrix_element=matrix_element.real_processes[conf['n_me'] - 1].matrix_element 1072 base_diagrams = fks_matrix_element.get('base_amplitude').get('diagrams') 1073 model = fks_matrix_element.get('base_amplitude').get('process').get('model') 1074 minvert = min([max([len(vert.get('legs')) for vert in \ 1075 diag.get('vertices')]) for diag in base_diagrams]) 1076 1077 lines.append("# ") 1078 lines.append("# nFKSprocess %d" % iFKS) 1079 for idiag, diag in enumerate(base_diagrams): 1080 if any([len(vert.get('legs')) > minvert for vert in 1081 diag.get('vertices')]): 1082 # Only 3-vertices allowed in configs.inc 1083 continue 1084 iconfig = iconfig + 1 1085 helas_diag = fks_matrix_element.get('diagrams')[idiag] 1086 mapconfigs.append(helas_diag.get('number')) 1087 lines.append("# Diagram %d for nFKSprocess %d" % \ 1088 (helas_diag.get('number'),iFKS)) 1089 # Correspondance between the config and the amplitudes 1090 lines.append("C %4d %4d %4d " % (iFKS,iconfig, 1091 helas_diag.get('number'))) 1092 1093 # Need to reorganize the topology so that we start with all 1094 # final state external particles and work our way inwards 1095 schannels, tchannels = helas_diag.get('amplitudes')[0].\ 1096 get_s_and_t_channels(ninitial, model, 990) 1097 1098 s_and_t_channels.append([schannels, tchannels]) 1099 1100 # Write out propagators for s-channel and t-channel vertices 1101 allchannels = schannels 1102 if len(tchannels) > 1: 1103 # Write out tchannels only if there are any non-trivial ones 1104 allchannels = schannels + tchannels 1105 1106 for vert in allchannels: 1107 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 1108 last_leg = vert.get('legs')[-1] 1109 lines.append("F %4d %4d %4d %4d" % \ 1110 (iFKS,last_leg.get('number'), iconfig, len(daughters))) 1111 for d in daughters: 1112 lines.append("D %4d" % d) 1113 if vert in schannels: 1114 lines.append("S %4d %4d %4d %10d" % \ 1115 (iFKS,last_leg.get('number'), iconfig, 1116 last_leg.get('id'))) 1117 elif vert in tchannels[:-1]: 1118 lines.append("T %4d %4d %4d %10d" % \ 1119 (iFKS,last_leg.get('number'), iconfig, 1120 abs(last_leg.get('id')))) 1121 1122 # update what the array sizes (mapconfig,iforest,etc) will be 1123 max_leg_number = min(max_leg_number,last_leg.get('number')) 1124 max_iconfig = max(max_iconfig,iconfig) 1125 1126 # Write out number of configs 1127 lines.append("# Number of configs for nFKSprocess %d" % iFKS) 1128 lines.append("C %4d %4d %4d" % (iFKS,0,iconfig)) 1129 1130 # write the props.inc information 1131 lines2.append("# ") 1132 particle_dict = fks_matrix_element.get('processes')[0].get('model').\ 1133 get('particle_dict') 1134 1135 for iconf, configs in enumerate(s_and_t_channels): 1136 for vertex in configs[0] + configs[1][:-1]: 1137 leg = vertex.get('legs')[-1] 1138 if leg.get('id') not in particle_dict: 1139 # Fake propagator used in multiparticle vertices 1140 pow_part = 0 1141 else: 1142 particle = particle_dict[leg.get('id')] 1143 1144 pow_part = 1 + int(particle.is_boson()) 1145 1146 lines2.append("M %4d %4d %4d %10d " % \ 1147 (iFKS,leg.get('number'), iconf + 1, leg.get('id'))) 1148 lines2.append("P %4d %4d %4d %4d " % \ 1149 (iFKS,leg.get('number'), iconf + 1, pow_part)) 1150 1151 # Write the file 1152 open(filename,'w').write('\n'.join(lines+lines2)) 1153 1154 return max_iconfig, max_leg_number, nconfs
1155 1156
1157 - def write_leshouche_info_declarations(self, writer, nfksconfs, 1158 maxproc, maxflow, nexternal, fortran_model):
1159 """writes the declarations for the variables relevant for leshouche_info 1160 """ 1161 lines = [] 1162 lines.append('integer maxproc_used, maxflow_used') 1163 lines.append('parameter (maxproc_used = %d)' % maxproc) 1164 lines.append('parameter (maxflow_used = %d)' % maxflow) 1165 lines.append('integer idup_d(%d,%d,maxproc_used)' % (nfksconfs, nexternal)) 1166 lines.append('integer mothup_d(%d,%d,%d,maxproc_used)' % (nfksconfs, 2, nexternal)) 1167 lines.append('integer icolup_d(%d,%d,%d,maxflow_used)' % (nfksconfs, 2, nexternal)) 1168 lines.append('integer niprocs_d(%d)' % (nfksconfs)) 1169 1170 writer.writelines(lines)
1171 1172
1173 - def write_genps(self, writer, maxproc,ngraphs,ncolor,maxflow, fortran_model):
1174 """writes the genps.inc file 1175 """ 1176 lines = [] 1177 lines.append("include 'maxparticles.inc'") 1178 lines.append("include 'maxconfigs.inc'") 1179 lines.append("integer maxproc,ngraphs,ncolor,maxflow") 1180 lines.append("parameter (maxproc=%d,ngraphs=%d,ncolor=%d,maxflow=%d)" % \ 1181 (maxproc,ngraphs,ncolor,maxflow)) 1182 writer.writelines(lines)
1183 1184
1185 - def write_leshouche_info_file(self, filename, matrix_element):
1186 """writes the leshouche_info.inc file which contains 1187 the LHA informations for all the real emission processes 1188 """ 1189 lines = [] 1190 lines.append("# I -> IDUP_D") 1191 lines.append("# M -> MOTHUP_D") 1192 lines.append("# C -> ICOLUP_D") 1193 nfksconfs = len(matrix_element.get_fks_info_list()) 1194 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1195 1196 maxproc = 0 1197 maxflow = 0 1198 for i, conf in enumerate(matrix_element.get_fks_info_list()): 1199 # for i, real in enumerate(matrix_element.real_processes): 1200 (newlines, nprocs, nflows) = self.get_leshouche_lines( 1201 matrix_element.real_processes[conf['n_me'] - 1].matrix_element, i + 1) 1202 lines.extend(newlines) 1203 maxproc = max(maxproc, nprocs) 1204 maxflow = max(maxflow, nflows) 1205 1206 # Write the file 1207 open(filename,'w').write('\n'.join(lines)) 1208 1209 return nfksconfs, maxproc, maxflow, nexternal
1210 1211
1212 - def write_pdf_wrapper(self, writer, matrix_element, fortran_model):
1213 """writes the wrapper which allows to chose among the different real matrix elements""" 1214 1215 file = \ 1216 """double precision function dlum() 1217 implicit none 1218 integer nfksprocess 1219 common/c_nfksprocess/nfksprocess 1220 """ 1221 if matrix_element.real_processes: 1222 for n, info in enumerate(matrix_element.get_fks_info_list()): 1223 file += \ 1224 """if (nfksprocess.eq.%(n)d) then 1225 call dlum_%(n_me)d(dlum) 1226 else""" % {'n': n + 1, 'n_me' : info['n_me']} 1227 file += \ 1228 """ 1229 write(*,*) 'ERROR: invalid n in dlum :', nfksprocess 1230 stop 1231 endif 1232 return 1233 end 1234 """ 1235 else: 1236 file+= \ 1237 """call dlum_0(dlum) 1238 return 1239 end 1240 """ 1241 1242 # Write the file 1243 writer.writelines(file) 1244 return 0
1245 1246
1247 - def write_real_me_wrapper(self, writer, matrix_element, fortran_model):
1248 """writes the wrapper which allows to chose among the different real matrix elements""" 1249 1250 file = \ 1251 """subroutine smatrix_real(p, wgt) 1252 implicit none 1253 include 'nexternal.inc' 1254 double precision p(0:3, nexternal) 1255 double precision wgt 1256 integer nfksprocess 1257 common/c_nfksprocess/nfksprocess 1258 """ 1259 for n, info in enumerate(matrix_element.get_fks_info_list()): 1260 file += \ 1261 """if (nfksprocess.eq.%(n)d) then 1262 call smatrix_%(n_me)d(p, wgt) 1263 else""" % {'n': n + 1, 'n_me' : info['n_me']} 1264 1265 if matrix_element.real_processes: 1266 file += \ 1267 """ 1268 write(*,*) 'ERROR: invalid n in real_matrix :', nfksprocess 1269 stop 1270 endif 1271 return 1272 end 1273 """ 1274 else: 1275 file += \ 1276 """ 1277 wgt=0d0 1278 return 1279 end 1280 """ 1281 # Write the file 1282 writer.writelines(file) 1283 return 0
1284 1285
1286 - def draw_feynman_diagrams(self, matrix_element):
1287 """Create the ps files containing the feynman diagrams for the born process, 1288 as well as for all the real emission processes""" 1289 1290 filename = 'born.ps' 1291 plot = draw.MultiEpsDiagramDrawer(matrix_element.born_matrix_element.\ 1292 get('base_amplitude').get('diagrams'), 1293 filename, 1294 model=matrix_element.born_matrix_element.\ 1295 get('processes')[0].get('model'), 1296 amplitude=True, diagram_type='born') 1297 plot.draw() 1298 1299 for n, fksreal in enumerate(matrix_element.real_processes): 1300 filename = 'matrix_%d.ps' % (n + 1) 1301 plot = draw.MultiEpsDiagramDrawer(fksreal.matrix_element.\ 1302 get('base_amplitude').get('diagrams'), 1303 filename, 1304 model=fksreal.matrix_element.\ 1305 get('processes')[0].get('model'), 1306 amplitude=True, diagram_type='real') 1307 plot.draw()
1308 1309
1310 - def write_real_matrix_elements(self, matrix_element, fortran_model):
1311 """writes the matrix_i.f files which contain the real matrix elements""" 1312 1313 1314 1315 for n, fksreal in enumerate(matrix_element.real_processes): 1316 filename = 'matrix_%d.f' % (n + 1) 1317 self.write_matrix_element_fks(writers.FortranWriter(filename), 1318 fksreal.matrix_element, n + 1, 1319 fortran_model)
1320
1321 - def write_pdf_calls(self, matrix_element, fortran_model):
1322 """writes the parton_lum_i.f files which contain the real matrix elements. 1323 If no real emission existst, write the one for the born""" 1324 1325 if matrix_element.real_processes: 1326 for n, fksreal in enumerate(matrix_element.real_processes): 1327 filename = 'parton_lum_%d.f' % (n + 1) 1328 self.write_pdf_file(writers.FortranWriter(filename), 1329 fksreal.matrix_element, n + 1, 1330 fortran_model) 1331 else: 1332 filename = 'parton_lum_0.f' 1333 self.write_pdf_file(writers.FortranWriter(filename), 1334 matrix_element.born_matrix_element, 0, 1335 fortran_model)
1336 1337
1338 - def generate_born_fks_files(self, matrix_element, fortran_model, me_number, path):
1339 """generates the files needed for the born amplitude in the P* directory, which will 1340 be needed by the P* directories""" 1341 pathdir = os.getcwd() 1342 1343 filename = 'born.f' 1344 calls_born, ncolor_born = \ 1345 self.write_born_fks(writers.FortranWriter(filename),\ 1346 matrix_element, 1347 fortran_model) 1348 1349 filename = 'born_hel.f' 1350 self.write_born_hel(writers.FortranWriter(filename),\ 1351 matrix_element, 1352 fortran_model) 1353 1354 1355 filename = 'born_conf.inc' 1356 nconfigs, mapconfigs, s_and_t_channels = \ 1357 self.write_configs_file( 1358 writers.FortranWriter(filename), 1359 matrix_element.born_matrix_element, 1360 fortran_model) 1361 1362 filename = 'born_props.inc' 1363 self.write_props_file(writers.FortranWriter(filename), 1364 matrix_element.born_matrix_element, 1365 fortran_model, 1366 s_and_t_channels) 1367 1368 filename = 'born_decayBW.inc' 1369 self.write_decayBW_file(writers.FortranWriter(filename), 1370 s_and_t_channels) 1371 1372 filename = 'born_leshouche.inc' 1373 nflows = self.write_leshouche_file(writers.FortranWriter(filename), 1374 matrix_element.born_matrix_element, 1375 fortran_model) 1376 1377 filename = 'born_nhel.inc' 1378 self.write_born_nhel_file(writers.FortranWriter(filename), 1379 matrix_element.born_matrix_element, nflows, 1380 fortran_model, 1381 ncolor_born) 1382 1383 filename = 'born_ngraphs.inc' 1384 self.write_ngraphs_file(writers.FortranWriter(filename), 1385 matrix_element.born_matrix_element.get_number_of_amplitudes()) 1386 1387 filename = 'ncombs.inc' 1388 self.write_ncombs_file(writers.FortranWriter(filename), 1389 matrix_element.born_matrix_element, 1390 fortran_model) 1391 1392 filename = 'born_maxamps.inc' 1393 maxamps = len(matrix_element.get('diagrams')) 1394 maxflows = ncolor_born 1395 self.write_maxamps_file(writers.FortranWriter(filename), 1396 maxamps, 1397 maxflows, 1398 max([len(matrix_element.get('processes')) for me in \ 1399 matrix_element.born_matrix_element]),1) 1400 1401 filename = 'config_subproc_map.inc' 1402 self.write_config_subproc_map_file(writers.FortranWriter(filename), 1403 s_and_t_channels) 1404 1405 filename = 'coloramps.inc' 1406 self.write_coloramps_file(writers.FortranWriter(filename), 1407 mapconfigs, 1408 matrix_element.born_matrix_element, 1409 fortran_model) 1410 1411 #write the sborn_sf.f and the b_sf_files 1412 filename = ['sborn_sf.f', 'sborn_sf_dum.f'] 1413 for i, links in enumerate([matrix_element.color_links, []]): 1414 self.write_sborn_sf(writers.FortranWriter(filename[i]), 1415 links, 1416 fortran_model) 1417 self.color_link_files = [] 1418 for i in range(len(matrix_element.color_links)): 1419 filename = 'b_sf_%3.3d.f' % (i + 1) 1420 self.color_link_files.append(filename) 1421 self.write_b_sf_fks(writers.FortranWriter(filename), 1422 matrix_element, i, 1423 fortran_model)
1424 1425
1426 - def generate_virtuals_from_OLP(self,process_list,export_path, OLP):
1427 """Generates the library for computing the loop matrix elements 1428 necessary for this process using the OLP specified.""" 1429 1430 # Start by writing the BLHA order file 1431 virtual_path = pjoin(export_path,'OLP_virtuals') 1432 if not os.path.exists(virtual_path): 1433 os.makedirs(virtual_path) 1434 filename = os.path.join(virtual_path,'OLE_order.lh') 1435 self.write_lh_order(filename, process_list, OLP) 1436 1437 fail_msg='Generation of the virtuals with %s failed.\n'%OLP+\ 1438 'Please check the virt_generation.log file in %s.'\ 1439 %str(pjoin(virtual_path,'virt_generation.log')) 1440 1441 # Perform some tasks specific to certain OLP's 1442 if OLP=='GoSam': 1443 cp(pjoin(self.mgme_dir,'Template','loop_material','OLP_specifics', 1444 'GoSam','makevirt'),pjoin(virtual_path,'makevirt')) 1445 cp(pjoin(self.mgme_dir,'Template','loop_material','OLP_specifics', 1446 'GoSam','gosam.rc'),pjoin(virtual_path,'gosam.rc')) 1447 ln(pjoin(export_path,'Cards','param_card.dat'),virtual_path) 1448 # Now generate the process 1449 logger.info('Generating the loop matrix elements with %s...'%OLP) 1450 virt_generation_log = \ 1451 open(pjoin(virtual_path,'virt_generation.log'), 'w') 1452 retcode = subprocess.call(['./makevirt'],cwd=virtual_path, 1453 stdout=virt_generation_log, stderr=virt_generation_log) 1454 virt_generation_log.close() 1455 # Check what extension is used for the share libraries on this system 1456 possible_other_extensions = ['so','dylib'] 1457 shared_lib_ext='so' 1458 for ext in possible_other_extensions: 1459 if os.path.isfile(pjoin(virtual_path,'Virtuals','lib', 1460 'libgolem_olp.'+ext)): 1461 shared_lib_ext = ext 1462 1463 # Now check that everything got correctly generated 1464 files_to_check = ['olp_module.mod',str(pjoin('lib', 1465 'libgolem_olp.'+shared_lib_ext))] 1466 if retcode != 0 or any([not os.path.exists(pjoin(virtual_path, 1467 'Virtuals',f)) for f in files_to_check]): 1468 raise fks_common.FKSProcessError(fail_msg) 1469 # link the library to the lib folder 1470 ln(pjoin(virtual_path,'Virtuals','lib','libgolem_olp.'+shared_lib_ext), 1471 pjoin(export_path,'lib')) 1472 1473 # Specify in make_opts the right library necessitated by the OLP 1474 make_opts_content=open(pjoin(export_path,'Source','make_opts')).read() 1475 make_opts=open(pjoin(export_path,'Source','make_opts'),'w') 1476 if OLP=='GoSam': 1477 if platform.system().lower()=='darwin': 1478 # On mac the -rpath is not supported and the path of the dynamic 1479 # library is automatically wired in the executable 1480 make_opts_content=make_opts_content.replace('libOLP=', 1481 'libOLP=-Wl,-lgolem_olp') 1482 else: 1483 # On other platforms the option , -rpath= path to libgolem.so is necessary 1484 # Using a relative path is not ideal because the file libgolem.so is not 1485 # copied on the worker nodes. 1486 # make_opts_content=make_opts_content.replace('libOLP=', 1487 # 'libOLP=-Wl,-rpath=../$(LIBDIR) -lgolem_olp') 1488 # Using the absolute path is working in the case where the disk of the 1489 # front end machine is mounted on all worker nodes as well. 1490 make_opts_content=make_opts_content.replace('libOLP=', 1491 'libOLP=-Wl,-rpath='+str(pjoin(export_path,'lib'))+' -lgolem_olp') 1492 1493 1494 make_opts.write(make_opts_content) 1495 make_opts.close() 1496 1497 # A priori this is generic to all OLP's 1498 1499 # Parse the contract file returned and propagate the process label to 1500 # the include of the BinothLHA.f file 1501 proc_to_label = self.parse_contract_file( 1502 pjoin(virtual_path,'OLE_order.olc')) 1503 1504 self.write_BinothLHA_inc(process_list,proc_to_label,\ 1505 pjoin(export_path,'SubProcesses')) 1506 1507 # Link the contract file to within the SubProcess directory 1508 ln(pjoin(virtual_path,'OLE_order.olc'),pjoin(export_path,'SubProcesses'))
1509
1510 - def write_BinothLHA_inc(self, processes, proc_to_label, SubProcPath):
1511 """ Write the file Binoth_proc.inc in each SubProcess directory so as 1512 to provide the right process_label to use in the OLP call to get the 1513 loop matrix element evaluation. The proc_to_label is the dictionary of 1514 the format of the one returned by the function parse_contract_file.""" 1515 1516 for proc in processes: 1517 name = "P%s"%proc.shell_string() 1518 proc_pdgs=(tuple([leg.get('id') for leg in proc.get('legs') if \ 1519 not leg.get('state')]), 1520 tuple([leg.get('id') for leg in proc.get('legs') if \ 1521 leg.get('state')])) 1522 incFile = open(pjoin(SubProcPath, name,'Binoth_proc.inc'),'w') 1523 try: 1524 incFile.write( 1525 """ INTEGER PROC_LABEL 1526 PARAMETER (PROC_LABEL=%d)"""%(proc_to_label[proc_pdgs])) 1527 except KeyError: 1528 raise fks_common.FKSProcessError('Could not found the target'+\ 1529 ' process %s > %s in '%(str(proc_pdgs[0]),str(proc_pdgs[1]))+\ 1530 ' the proc_to_label argument in write_BinothLHA_inc.') 1531 incFile.close()
1532
1533 - def parse_contract_file(self, contract_file_path):
1534 """ Parses the BLHA contract file, make sure all parameters could be 1535 understood by the OLP and return a mapping of the processes (characterized 1536 by the pdg's of the initial and final state particles) to their process 1537 label. The format of the mapping is {((in_pdgs),(out_pdgs)):proc_label}. 1538 """ 1539 1540 proc_def_to_label = {} 1541 1542 if not os.path.exists(contract_file_path): 1543 raise fks_common.FKSProcessError('Could not find the contract file'+\ 1544 ' OLE_order.olc in %s.'%str(contract_file_path)) 1545 1546 comment_re=re.compile(r"^\s*#") 1547 proc_def_re=re.compile( 1548 r"^(?P<in_pdgs>(\s*-?\d+\s*)+)->(?P<out_pdgs>(\s*-?\d+\s*)+)\|"+ 1549 r"\s*(?P<proc_class>\d+)\s*(?P<proc_label>\d+)\s*$") 1550 line_OK_re=re.compile(r"^.*\|\s*OK") 1551 for line in file(contract_file_path): 1552 # Ignore comments 1553 if not comment_re.match(line) is None: 1554 continue 1555 # Check if it is a proc definition line 1556 proc_def = proc_def_re.match(line) 1557 if not proc_def is None: 1558 if int(proc_def.group('proc_class'))!=1: 1559 raise fks_common.FKSProcessError( 1560 'aMCatNLO can only handle loop processes generated by the OLP which have only '+\ 1561 ' process class attribute. Found %s instead in: \n%s'\ 1562 %(proc_def.group('proc_class'),line)) 1563 in_pdgs=tuple([int(in_pdg) for in_pdg in \ 1564 proc_def.group('in_pdgs').split()]) 1565 out_pdgs=tuple([int(out_pdg) for out_pdg in \ 1566 proc_def.group('out_pdgs').split()]) 1567 proc_def_to_label[(in_pdgs,out_pdgs)]=\ 1568 int(proc_def.group('proc_label')) 1569 continue 1570 # For the other types of line, just make sure they end with | OK 1571 if line_OK_re.match(line) is None: 1572 raise fks_common.FKSProcessError( 1573 'The OLP could not process the following line: \n%s'%line) 1574 1575 return proc_def_to_label
1576 1577
1578 - def generate_virt_directory(self, loop_matrix_element, fortran_model, dir_name):
1579 """writes the V**** directory inside the P**** directories specified in 1580 dir_name""" 1581 1582 cwd = os.getcwd() 1583 1584 matrix_element = loop_matrix_element 1585 1586 # Create the MadLoop5_resources directory if not already existing 1587 dirpath = os.path.join(dir_name, 'MadLoop5_resources') 1588 try: 1589 os.mkdir(dirpath) 1590 except os.error as error: 1591 logger.warning(error.strerror + " " + dirpath) 1592 1593 # Create the directory PN_xx_xxxxx in the specified path 1594 name = "V%s" % matrix_element.get('processes')[0].shell_string() 1595 dirpath = os.path.join(dir_name, name) 1596 1597 try: 1598 os.mkdir(dirpath) 1599 except os.error as error: 1600 logger.warning(error.strerror + " " + dirpath) 1601 1602 try: 1603 os.chdir(dirpath) 1604 except os.error: 1605 logger.error('Could not cd to directory %s' % dirpath) 1606 return 0 1607 1608 logger.info('Creating files in directory %s' % name) 1609 1610 # Extract number of external particles 1611 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 1612 1613 calls=self.write_loop_matrix_element_v4(None,matrix_element,fortran_model) 1614 # The born matrix element, if needed 1615 filename = 'born_matrix.f' 1616 calls = self.write_bornmatrix( 1617 writers.FortranWriter(filename), 1618 matrix_element, 1619 fortran_model) 1620 1621 filename = 'nexternal.inc' 1622 self.write_nexternal_file(writers.FortranWriter(filename), 1623 nexternal, ninitial) 1624 1625 filename = 'pmass.inc' 1626 self.write_pmass_file(writers.FortranWriter(filename), 1627 matrix_element) 1628 1629 filename = 'ngraphs.inc' 1630 self.write_ngraphs_file(writers.FortranWriter(filename), 1631 len(matrix_element.get_all_amplitudes())) 1632 1633 filename = "loop_matrix.ps" 1634 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 1635 matrix_element.get('base_amplitude').get('loop_diagrams')[:1000]), 1636 filename, 1637 model=matrix_element.get('processes')[0].get('model'), 1638 amplitude='') 1639 logger.info("Drawing loop Feynman diagrams for " + \ 1640 matrix_element.get('processes')[0].nice_string(print_weighted=False)) 1641 plot.draw() 1642 1643 filename = "born_matrix.ps" 1644 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 1645 get('born_diagrams'),filename,model=matrix_element.get('processes')[0].\ 1646 get('model'),amplitude='') 1647 logger.info("Generating born Feynman diagrams for " + \ 1648 matrix_element.get('processes')[0].nice_string(print_weighted=False)) 1649 plot.draw() 1650 1651 # We also need to write the overall maximum quantities for this group 1652 # of processes in 'global_specs.inc'. In aMCatNLO, there is always 1653 # only one process, so this is trivial 1654 self.write_global_specs(matrix_element, output_path=pjoin(dirpath,'global_specs.inc')) 1655 open('unique_id.inc','w').write( 1656 """ integer UNIQUE_ID 1657 parameter(UNIQUE_ID=1)""") 1658 1659 linkfiles = ['coupl.inc', 'mp_coupl.inc', 'mp_coupl_same_name.inc', 1660 'cts_mprec.h', 'cts_mpc.h', 'MadLoopParamReader.f', 1661 'MadLoopCommons.f','MadLoopParams.inc'] 1662 1663 # We should move to MadLoop5_resources directory from the SubProcesses 1664 ln(pjoin(os.path.pardir,os.path.pardir,'MadLoopParams.dat'), 1665 pjoin('..','MadLoop5_resources')) 1666 1667 for file in linkfiles: 1668 ln('../../%s' % file) 1669 1670 os.system("ln -s ../../makefile_loop makefile") 1671 1672 linkfiles = ['mpmodule.mod'] 1673 1674 for file in linkfiles: 1675 ln('../../../lib/%s' % file) 1676 1677 linkfiles = ['coef_specs.inc'] 1678 1679 for file in linkfiles: 1680 ln('../../../Source/DHELAS/%s' % file) 1681 1682 # Return to original PWD 1683 os.chdir(cwd) 1684 1685 if not calls: 1686 calls = 0 1687 return calls
1688
1689 - def get_qed_qcd_orders_from_weighted(self, nexternal, weighted):
1690 """computes the QED/QCD orders from the knowledge of the n of ext particles 1691 and of the weighted orders""" 1692 # n vertices = nexternal - 2 =QED + QCD 1693 # weighted = 2*QED + QCD 1694 QED = weighted - nexternal + 2 1695 QCD = weighted - 2 * QED 1696 return QED, QCD
1697 1698 1699 1700 #=============================================================================== 1701 # write_lh_order 1702 #=============================================================================== 1703 #test written
1704 - def write_lh_order(self, filename, process_list, OLP='MadLoop'):
1705 """Creates the OLE_order.lh file. This function should be edited according 1706 to the OLP which is used. For now it is generic.""" 1707 1708 1709 if len(process_list)==0: 1710 raise fks_common.FKSProcessError('No matrix elements provided to '+\ 1711 'the function write_lh_order.') 1712 return 1713 1714 # We assume the orders to be common to all Subprocesses 1715 1716 orders = process_list[0].get('orders') 1717 if 'QED' in orders.keys() and 'QCD' in orders.keys(): 1718 QED=orders['QED'] 1719 QCD=orders['QCD'] 1720 elif 'QED' in orders.keys(): 1721 QED=orders['QED'] 1722 QCD=0 1723 elif 'QCD' in orders.keys(): 1724 QED=0 1725 QCD=orders['QCD'] 1726 else: 1727 QED, QCD = self.get_qed_qcd_orders_from_weighted(\ 1728 len(process_list[0].get('legs')), 1729 orders['WEIGHTED']) 1730 1731 replace_dict = {} 1732 replace_dict['mesq'] = 'CHaveraged' 1733 replace_dict['corr'] = ' '.join(process_list[0].\ 1734 get('perturbation_couplings')) 1735 replace_dict['irreg'] = 'CDR' 1736 replace_dict['aspow'] = QCD 1737 replace_dict['aepow'] = QED 1738 replace_dict['modelfile'] = './param_card.dat' 1739 replace_dict['params'] = 'alpha_s' 1740 proc_lines=[] 1741 for proc in process_list: 1742 proc_lines.append('%s -> %s' % \ 1743 (' '.join(str(l['id']) for l in proc['legs'] if not l['state']), 1744 ' '.join(str(l['id']) for l in proc['legs'] if l['state']))) 1745 replace_dict['pdgs'] = '\n'.join(proc_lines) 1746 replace_dict['symfin'] = 'Yes' 1747 content = \ 1748 "#OLE_order written by MadGraph5_aMC@NLO\n\ 1749 \n\ 1750 MatrixElementSquareType %(mesq)s\n\ 1751 CorrectionType %(corr)s\n\ 1752 IRregularisation %(irreg)s\n\ 1753 AlphasPower %(aspow)d\n\ 1754 AlphaPower %(aepow)d\n\ 1755 NJetSymmetrizeFinal %(symfin)s\n\ 1756 ModelFile %(modelfile)s\n\ 1757 Parameters %(params)s\n\ 1758 \n\ 1759 # process\n\ 1760 %(pdgs)s\n\ 1761 " % replace_dict 1762 1763 file = open(filename, 'w') 1764 file.write(content) 1765 file.close 1766 return
1767 1768 1769 #=============================================================================== 1770 # write_born_fks 1771 #=============================================================================== 1772 # test written
1773 - def write_born_fks(self, writer, fksborn, fortran_model):
1774 """Export a matrix element to a born.f file in MadFKS format""" 1775 1776 matrix_element = fksborn.born_matrix_element 1777 1778 if not matrix_element.get('processes') or \ 1779 not matrix_element.get('diagrams'): 1780 return 0 1781 1782 if not isinstance(writer, writers.FortranWriter): 1783 raise writers.FortranWriter.FortranWriterError(\ 1784 "writer not FortranWriter") 1785 # Set lowercase/uppercase Fortran code 1786 writers.FortranWriter.downcase = False 1787 1788 replace_dict = {} 1789 1790 # Extract version number and date from VERSION file 1791 info_lines = self.get_mg5_info_lines() 1792 replace_dict['info_lines'] = info_lines 1793 1794 # Extract process info lines 1795 process_lines = self.get_process_info_lines(matrix_element) 1796 replace_dict['process_lines'] = process_lines 1797 1798 1799 # Extract ncomb 1800 ncomb = matrix_element.get_helicity_combinations() 1801 replace_dict['ncomb'] = ncomb 1802 1803 # Extract helicity lines 1804 helicity_lines = self.get_helicity_lines(matrix_element) 1805 replace_dict['helicity_lines'] = helicity_lines 1806 1807 # Extract IC line 1808 ic_line = self.get_ic_line(matrix_element) 1809 replace_dict['ic_line'] = ic_line 1810 1811 # Extract overall denominator 1812 # Averaging initial state color, spin, and identical FS particles 1813 #den_factor_line = get_den_factor_line(matrix_element) 1814 1815 # Extract ngraphs 1816 ngraphs = matrix_element.get_number_of_amplitudes() 1817 replace_dict['ngraphs'] = ngraphs 1818 1819 # Extract nwavefuncs 1820 nwavefuncs = matrix_element.get_number_of_wavefunctions() 1821 replace_dict['nwavefuncs'] = nwavefuncs 1822 1823 # Extract ncolor 1824 ncolor = max(1, len(matrix_element.get('color_basis'))) 1825 replace_dict['ncolor'] = ncolor 1826 1827 # Extract color data lines 1828 color_data_lines = self.get_color_data_lines(matrix_element) 1829 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 1830 1831 # Extract helas calls 1832 helas_calls = fortran_model.get_matrix_element_calls(\ 1833 matrix_element) 1834 replace_dict['helas_calls'] = "\n".join(helas_calls) 1835 1836 # Extract amp2 lines 1837 amp2_lines = self.get_amp2_lines(matrix_element) 1838 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 1839 1840 # Extract JAMP lines 1841 jamp_lines = self.get_JAMP_lines(matrix_element) 1842 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 1843 1844 # Set the size of Wavefunction 1845 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]): 1846 replace_dict['wavefunctionsize'] = 20 1847 else: 1848 replace_dict['wavefunctionsize'] = 8 1849 1850 # Extract glu_ij_lines 1851 ij_lines = self.get_ij_lines(fksborn) 1852 replace_dict['ij_lines'] = '\n'.join(ij_lines) 1853 1854 # Extract den_factor_lines 1855 den_factor_lines = self.get_den_factor_lines(fksborn) 1856 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines) 1857 1858 # Extract the number of FKS process 1859 replace_dict['nconfs'] = max(len(fksborn.get_fks_info_list()),1) 1860 1861 file = open(os.path.join(_file_path, \ 1862 'iolibs/template_files/born_fks.inc')).read() 1863 file = file % replace_dict 1864 1865 # Write the file 1866 writer.writelines(file) 1867 1868 return len(filter(lambda call: call.find('#') != 0, helas_calls)), ncolor
1869 1870
1871 - def write_born_hel(self, writer, fksborn, fortran_model):
1872 """Export a matrix element to a born_hel.f file in MadFKS format""" 1873 1874 matrix_element = fksborn.born_matrix_element 1875 1876 if not matrix_element.get('processes') or \ 1877 not matrix_element.get('diagrams'): 1878 return 0 1879 1880 if not isinstance(writer, writers.FortranWriter): 1881 raise writers.FortranWriter.FortranWriterError(\ 1882 "writer not FortranWriter") 1883 # Set lowercase/uppercase Fortran code 1884 writers.FortranWriter.downcase = False 1885 1886 replace_dict = {} 1887 1888 # Extract version number and date from VERSION file 1889 info_lines = self.get_mg5_info_lines() 1890 replace_dict['info_lines'] = info_lines 1891 1892 # Extract process info lines 1893 process_lines = self.get_process_info_lines(matrix_element) 1894 replace_dict['process_lines'] = process_lines 1895 1896 1897 # Extract ncomb 1898 ncomb = matrix_element.get_helicity_combinations() 1899 replace_dict['ncomb'] = ncomb 1900 1901 # Extract helicity lines 1902 helicity_lines = self.get_helicity_lines(matrix_element) 1903 replace_dict['helicity_lines'] = helicity_lines 1904 1905 # Extract IC line 1906 ic_line = self.get_ic_line(matrix_element) 1907 replace_dict['ic_line'] = ic_line 1908 1909 # Extract overall denominator 1910 # Averaging initial state color, spin, and identical FS particles 1911 #den_factor_line = get_den_factor_line(matrix_element) 1912 1913 # Extract ngraphs 1914 ngraphs = matrix_element.get_number_of_amplitudes() 1915 replace_dict['ngraphs'] = ngraphs 1916 1917 # Extract nwavefuncs 1918 nwavefuncs = matrix_element.get_number_of_wavefunctions() 1919 replace_dict['nwavefuncs'] = nwavefuncs 1920 1921 # Extract ncolor 1922 ncolor = max(1, len(matrix_element.get('color_basis'))) 1923 replace_dict['ncolor'] = ncolor 1924 1925 # Extract color data lines 1926 color_data_lines = self.get_color_data_lines(matrix_element) 1927 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 1928 1929 # Extract amp2 lines 1930 amp2_lines = self.get_amp2_lines(matrix_element) 1931 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 1932 1933 # Extract JAMP lines 1934 jamp_lines = self.get_JAMP_lines(matrix_element) 1935 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 1936 1937 # Extract den_factor_lines 1938 den_factor_lines = self.get_den_factor_lines(fksborn) 1939 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines) 1940 1941 # Extract the number of FKS process 1942 replace_dict['nconfs'] = len(fksborn.get_fks_info_list()) 1943 1944 file = open(os.path.join(_file_path, \ 1945 'iolibs/template_files/born_fks_hel.inc')).read() 1946 file = file % replace_dict 1947 1948 # Write the file 1949 writer.writelines(file) 1950 1951 return
1952 1953 1954 #=============================================================================== 1955 # write_born_sf_fks 1956 #=============================================================================== 1957 #test written
1958 - def write_sborn_sf(self, writer, color_links, fortran_model):
1959 """Creates the sborn_sf.f file, containing the calls to the different 1960 color linked borns""" 1961 1962 replace_dict = {} 1963 nborns = len(color_links) 1964 ifkss = [] 1965 iborns = [] 1966 mms = [] 1967 nns = [] 1968 iflines = "\n" 1969 1970 #header for the sborn_sf.f file 1971 file = """subroutine sborn_sf(p_born,m,n,wgt) 1972 implicit none 1973 include "nexternal.inc" 1974 double precision p_born(0:3,nexternal-1),wgt 1975 double complex wgt1(2) 1976 integer m,n \n""" 1977 1978 if nborns > 0: 1979 1980 for i, c_link in enumerate(color_links): 1981 iborn = i+1 1982 1983 iff = {True : 'if', False : 'elseif'}[i==0] 1984 1985 m, n = c_link['link'] 1986 1987 if m != n: 1988 iflines += \ 1989 "c b_sf_%(iborn)3.3d links partons %(m)d and %(n)d \n\ 1990 %(iff)s ((m.eq.%(m)d .and. n.eq.%(n)d).or.(m.eq.%(n)d .and. n.eq.%(m)d)) then \n\ 1991 call sb_sf_%(iborn)3.3d(p_born,wgt)\n\n" \ 1992 %{'m':m, 'n': n, 'iff': iff, 'iborn': iborn} 1993 else: 1994 iflines += \ 1995 "c b_sf_%(iborn)3.3d links partons %(m)d and %(n)d \n\ 1996 %(iff)s (m.eq.%(m)d .and. n.eq.%(n)d) then \n\ 1997 call sb_sf_%(iborn)3.3d(p_born,wgt)\n\n" \ 1998 %{'m':m, 'n': n, 'iff': iff, 'iborn': iborn} 1999 2000 2001 file += iflines + \ 2002 """else 2003 wgt = 0d0 2004 endif 2005 2006 return 2007 end""" 2008 elif nborns == 0: 2009 #write a dummy file 2010 file+=""" 2011 c This is a dummy function because 2012 c this subdir has no soft singularities 2013 wgt = 0d0 2014 2015 return 2016 end""" 2017 # Write the end of the file 2018 2019 writer.writelines(file)
2020 2021 2022 #=============================================================================== 2023 # write_b_sf_fks 2024 #=============================================================================== 2025 #test written
2026 - def write_b_sf_fks(self, writer, fksborn, i, fortran_model):
2027 """Create the b_sf_xxx.f file for the soft linked born in MadFKS format""" 2028 2029 matrix_element = copy.copy(fksborn.born_matrix_element) 2030 2031 if not matrix_element.get('processes') or \ 2032 not matrix_element.get('diagrams'): 2033 return 0 2034 2035 if not isinstance(writer, writers.FortranWriter): 2036 raise writers.FortranWriter.FortranWriterError(\ 2037 "writer not FortranWriter") 2038 # Set lowercase/uppercase Fortran code 2039 writers.FortranWriter.downcase = False 2040 2041 iborn = i + 1 2042 link = fksborn.color_links[i] 2043 2044 replace_dict = {} 2045 2046 replace_dict['iborn'] = iborn 2047 2048 # Extract version number and date from VERSION file 2049 info_lines = self.get_mg5_info_lines() 2050 replace_dict['info_lines'] = info_lines 2051 2052 # Extract process info lines 2053 process_lines = self.get_process_info_lines(matrix_element) 2054 replace_dict['process_lines'] = process_lines + \ 2055 "\nc spectators: %d %d \n" % tuple(link['link']) 2056 2057 # Extract ncomb 2058 ncomb = matrix_element.get_helicity_combinations() 2059 replace_dict['ncomb'] = ncomb 2060 2061 # Extract helicity lines 2062 helicity_lines = self.get_helicity_lines(matrix_element) 2063 replace_dict['helicity_lines'] = helicity_lines 2064 2065 # Extract IC line 2066 ic_line = self.get_ic_line(matrix_element) 2067 replace_dict['ic_line'] = ic_line 2068 2069 # Extract den_factor_lines 2070 den_factor_lines = self.get_den_factor_lines(fksborn) 2071 replace_dict['den_factor_lines'] = '\n'.join(den_factor_lines) 2072 2073 # Extract ngraphs 2074 ngraphs = matrix_element.get_number_of_amplitudes() 2075 replace_dict['ngraphs'] = ngraphs 2076 2077 # Extract nwavefuncs 2078 nwavefuncs = matrix_element.get_number_of_wavefunctions() 2079 replace_dict['nwavefuncs'] = nwavefuncs 2080 2081 # Extract ncolor 2082 ncolor1 = max(1, len(link['orig_basis'])) 2083 replace_dict['ncolor1'] = ncolor1 2084 ncolor2 = max(1, len(link['link_basis'])) 2085 replace_dict['ncolor2'] = ncolor2 2086 2087 # Extract color data lines 2088 color_data_lines = self.get_color_data_lines_from_color_matrix(\ 2089 link['link_matrix']) 2090 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 2091 2092 # Extract amp2 lines 2093 amp2_lines = self.get_amp2_lines(matrix_element) 2094 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 2095 2096 # Extract JAMP lines 2097 jamp_lines = self.get_JAMP_lines(matrix_element) 2098 new_jamp_lines = [] 2099 for line in jamp_lines: 2100 line = string.replace(line, 'JAMP', 'JAMP1') 2101 new_jamp_lines.append(line) 2102 replace_dict['jamp1_lines'] = '\n'.join(new_jamp_lines) 2103 2104 matrix_element.set('color_basis', link['link_basis'] ) 2105 jamp_lines = self.get_JAMP_lines(matrix_element) 2106 new_jamp_lines = [] 2107 for line in jamp_lines: 2108 line = string.replace(line, 'JAMP', 'JAMP2') 2109 new_jamp_lines.append(line) 2110 replace_dict['jamp2_lines'] = '\n'.join(new_jamp_lines) 2111 2112 2113 # Extract the number of FKS process 2114 replace_dict['nconfs'] = len(fksborn.get_fks_info_list()) 2115 2116 file = open(os.path.join(_file_path, \ 2117 'iolibs/template_files/b_sf_xxx_fks.inc')).read() 2118 file = file % replace_dict 2119 2120 # Write the file 2121 writer.writelines(file) 2122 2123 return 0 , ncolor1
2124 2125 2126 #=============================================================================== 2127 # write_born_nhel_file 2128 #=============================================================================== 2129 #test written
2130 - def write_born_nhel_file(self, writer, matrix_element, nflows, fortran_model, ncolor):
2131 """Write the born_nhel.inc file for MG4.""" 2132 2133 ncomb = matrix_element.get_helicity_combinations() 2134 file = " integer max_bhel, max_bcol \n" 2135 file = file + "parameter (max_bhel=%d)\nparameter(max_bcol=%d)" % \ 2136 (ncomb, nflows) 2137 2138 # Write the file 2139 writer.writelines(file) 2140 2141 return True
2142 2143 #=============================================================================== 2144 # write_fks_info_file 2145 #===============================================================================
2146 - def write_nfksconfigs_file(self, writer, fksborn, fortran_model):
2147 """Writes the content of nFKSconfigs.inc, which just gives the 2148 total FKS dirs as a parameter. 2149 nFKSconfigs is always >=1 (use a fake configuration for LOonly)""" 2150 replace_dict = {} 2151 replace_dict['nconfs'] = max(len(fksborn.get_fks_info_list()), 1) 2152 content = \ 2153 """ INTEGER FKS_CONFIGS 2154 PARAMETER (FKS_CONFIGS=%(nconfs)d) 2155 2156 """ % replace_dict 2157 2158 writer.writelines(content)
2159 2160 2161 #=============================================================================== 2162 # write_fks_info_file 2163 #===============================================================================
2164 - def write_fks_info_file(self, writer, fksborn, fortran_model): #test_written
2165 """Writes the content of fks_info.inc, which lists the informations on the 2166 possible splittings of the born ME. 2167 nconfs is always >=1 (use a fake configuration for LOonly). 2168 The fake configuration use an 'antigluon' (id -21, color=8) as i_fks and 2169 the last colored particle as j_fks.""" 2170 2171 replace_dict = {} 2172 fks_info_list = fksborn.get_fks_info_list() 2173 replace_dict['nconfs'] = max(len(fks_info_list), 1) 2174 2175 # this is for processes with 'real' or 'all' as NLO mode 2176 if len(fks_info_list) > 0: 2177 fks_i_values = ', '.join(['%d' % info['fks_info']['i'] \ 2178 for info in fks_info_list]) 2179 fks_j_values = ', '.join(['%d' % info['fks_info']['j'] \ 2180 for info in fks_info_list]) 2181 2182 col_lines = [] 2183 pdg_lines = [] 2184 charge_lines = [] 2185 fks_j_from_i_lines = [] 2186 for i, info in enumerate(fks_info_list): 2187 col_lines.append( \ 2188 'DATA (PARTICLE_TYPE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2189 % (i + 1, ', '.join('%d' % col for col in fksborn.real_processes[info['n_me']-1].colors) )) 2190 pdg_lines.append( \ 2191 'DATA (PDG_TYPE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2192 % (i + 1, ', '.join('%d' % pdg for pdg in info['pdgs']))) 2193 charge_lines.append(\ 2194 'DATA (PARTICLE_CHARGE_D(%d, IPOS), IPOS=1, NEXTERNAL) / %s /'\ 2195 % (i + 1, ', '.join('%19.15fd0' % charg\ 2196 for charg in fksborn.real_processes[info['n_me']-1].charges) )) 2197 fks_j_from_i_lines.extend(self.get_fks_j_from_i_lines(fksborn.real_processes[info['n_me']-1],\ 2198 i + 1)) 2199 else: 2200 # this is for 'LOonly', generate a fake FKS configuration with 2201 # - i_fks = nexternal, pdg type = -21 and color =8 2202 # - j_fks = the last colored particle 2203 bornproc = fksborn.born_matrix_element.get('processes')[0] 2204 pdgs = [l.get('id') for l in bornproc.get('legs')] + [-21] 2205 colors = [l.get('color') for l in bornproc.get('legs')] + [8] 2206 charges = [0.] * len(colors) 2207 2208 fks_i = len(colors) 2209 # use the first colored particle if it exists, or 2210 # just the first 2211 fks_j=1 2212 for cpos, col in enumerate(colors[:-1]): 2213 if col != 1: 2214 fks_j = cpos+1 2215 2216 fks_i_values = str(fks_i) 2217 fks_j_values = str(fks_j) 2218 col_lines = ['DATA (PARTICLE_TYPE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2219 % ', '.join([str(col) for col in colors])] 2220 pdg_lines = ['DATA (PDG_TYPE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2221 % ', '.join([str(pdg) for pdg in pdgs])] 2222 charge_lines = ['DATA (PARTICLE_CHARGE_D(1, IPOS), IPOS=1, NEXTERNAL) / %s /' \ 2223 % ', '.join('%19.15fd0' % charg for charg in charges)] 2224 fks_j_from_i_lines = ['DATA (FKS_J_FROM_I_D(1, %d, JPOS), JPOS = 0, 1) / 1, %d /' \ 2225 % (fks_i, fks_j)] 2226 2227 2228 replace_dict['fks_i_line'] = "data fks_i_D / %s /" % fks_i_values 2229 replace_dict['fks_j_line'] = "data fks_j_D / %s /" % fks_j_values 2230 replace_dict['col_lines'] = '\n'.join(col_lines) 2231 replace_dict['pdg_lines'] = '\n'.join(pdg_lines) 2232 replace_dict['charge_lines'] = '\n'.join(charge_lines) 2233 replace_dict['fks_j_from_i_lines'] = '\n'.join(fks_j_from_i_lines) 2234 2235 content = \ 2236 """ INTEGER IPOS, JPOS 2237 INTEGER FKS_I_D(%(nconfs)d), FKS_J_D(%(nconfs)d) 2238 INTEGER FKS_J_FROM_I_D(%(nconfs)d, NEXTERNAL, 0:NEXTERNAL) 2239 INTEGER PARTICLE_TYPE_D(%(nconfs)d, NEXTERNAL), PDG_TYPE_D(%(nconfs)d, NEXTERNAL) 2240 REAL*8 PARTICLE_CHARGE_D(%(nconfs)d, NEXTERNAL) 2241 2242 %(fks_i_line)s 2243 %(fks_j_line)s 2244 2245 %(fks_j_from_i_lines)s 2246 2247 C 2248 C Particle type: 2249 C octet = 8, triplet = 3, singlet = 1 2250 %(col_lines)s 2251 2252 C 2253 C Particle type according to PDG: 2254 C 2255 %(pdg_lines)s 2256 2257 C 2258 C Particle charge: 2259 C charge is set 0. with QCD corrections, which is irrelevant 2260 %(charge_lines)s 2261 """ % replace_dict 2262 if not isinstance(writer, writers.FortranWriter): 2263 raise writers.FortranWriter.FortranWriterError(\ 2264 "writer not FortranWriter") 2265 # Set lowercase/uppercase Fortran code 2266 writers.FortranWriter.downcase = False 2267 2268 writer.writelines(content) 2269 2270 return True
2271 2272 2273 #=============================================================================== 2274 # write_matrix_element_fks 2275 #=============================================================================== 2276 #test written
2277 - def write_matrix_element_fks(self, writer, matrix_element, n, fortran_model):
2278 """Export a matrix element to a matrix.f file in MG4 madevent format""" 2279 2280 if not matrix_element.get('processes') or \ 2281 not matrix_element.get('diagrams'): 2282 return 0,0 2283 2284 if not isinstance(writer, writers.FortranWriter): 2285 raise writers.FortranWriter.FortranWriterError(\ 2286 "writer not FortranWriter") 2287 # Set lowercase/uppercase Fortran code 2288 writers.FortranWriter.downcase = False 2289 2290 replace_dict = {} 2291 replace_dict['N_me'] = n 2292 2293 # Extract version number and date from VERSION file 2294 info_lines = self.get_mg5_info_lines() 2295 replace_dict['info_lines'] = info_lines 2296 2297 # Extract process info lines 2298 process_lines = self.get_process_info_lines(matrix_element) 2299 replace_dict['process_lines'] = process_lines 2300 2301 # Extract ncomb 2302 ncomb = matrix_element.get_helicity_combinations() 2303 replace_dict['ncomb'] = ncomb 2304 2305 # Extract helicity lines 2306 helicity_lines = self.get_helicity_lines(matrix_element) 2307 replace_dict['helicity_lines'] = helicity_lines 2308 2309 # Extract IC line 2310 ic_line = self.get_ic_line(matrix_element) 2311 replace_dict['ic_line'] = ic_line 2312 2313 # Extract overall denominator 2314 # Averaging initial state color, spin, and identical FS particles 2315 den_factor_line = self.get_den_factor_line(matrix_element) 2316 replace_dict['den_factor_line'] = den_factor_line 2317 2318 # Extract ngraphs 2319 ngraphs = matrix_element.get_number_of_amplitudes() 2320 replace_dict['ngraphs'] = ngraphs 2321 2322 # Extract ncolor 2323 ncolor = max(1, len(matrix_element.get('color_basis'))) 2324 replace_dict['ncolor'] = ncolor 2325 2326 # Extract color data lines 2327 color_data_lines = self.get_color_data_lines(matrix_element) 2328 replace_dict['color_data_lines'] = "\n".join(color_data_lines) 2329 2330 # Extract helas calls 2331 helas_calls = fortran_model.get_matrix_element_calls(\ 2332 matrix_element) 2333 replace_dict['helas_calls'] = "\n".join(helas_calls) 2334 2335 # Extract nwavefuncs (important to place after get_matrix_element_calls 2336 # so that 'me_id' is set) 2337 nwavefuncs = matrix_element.get_number_of_wavefunctions() 2338 replace_dict['nwavefuncs'] = nwavefuncs 2339 2340 # Extract amp2 lines 2341 amp2_lines = self.get_amp2_lines(matrix_element) 2342 replace_dict['amp2_lines'] = '\n'.join(amp2_lines) 2343 2344 # Set the size of Wavefunction 2345 if not self.model or any([p.get('spin') in [4,5] for p in self.model.get('particles') if p]): 2346 replace_dict['wavefunctionsize'] = 20 2347 else: 2348 replace_dict['wavefunctionsize'] = 8 2349 2350 # Extract JAMP lines 2351 jamp_lines = self.get_JAMP_lines(matrix_element) 2352 2353 replace_dict['jamp_lines'] = '\n'.join(jamp_lines) 2354 2355 realfile = open(os.path.join(_file_path, \ 2356 'iolibs/template_files/realmatrix_fks.inc')).read() 2357 2358 realfile = realfile % replace_dict 2359 2360 # Write the file 2361 writer.writelines(realfile) 2362 2363 return len(filter(lambda call: call.find('#') != 0, helas_calls)), ncolor
2364 2365 2366 #=============================================================================== 2367 # write_pdf_file 2368 #===============================================================================
2369 - def write_pdf_file(self, writer, matrix_element, n, fortran_model):
2370 #test written 2371 """Write the auto_dsig.f file for MadFKS, which contains 2372 pdf call information""" 2373 2374 if not matrix_element.get('processes') or \ 2375 not matrix_element.get('diagrams'): 2376 return 0 2377 2378 nexternal, ninitial = matrix_element.get_nexternal_ninitial() 2379 2380 if ninitial < 1 or ninitial > 2: 2381 raise writers.FortranWriter.FortranWriterError, \ 2382 """Need ninitial = 1 or 2 to write auto_dsig file""" 2383 2384 replace_dict = {} 2385 2386 replace_dict['N_me'] = n 2387 2388 # Extract version number and date from VERSION file 2389 info_lines = self.get_mg5_info_lines() 2390 replace_dict['info_lines'] = info_lines 2391 2392 # Extract process info lines 2393 process_lines = self.get_process_info_lines(matrix_element) 2394 replace_dict['process_lines'] = process_lines 2395 2396 pdf_vars, pdf_data, pdf_lines = \ 2397 self.get_pdf_lines_mir(matrix_element, ninitial, False, False) 2398 replace_dict['pdf_vars'] = pdf_vars 2399 replace_dict['pdf_data'] = pdf_data 2400 replace_dict['pdf_lines'] = pdf_lines 2401 2402 pdf_vars_mirr, pdf_data_mirr, pdf_lines_mirr = \ 2403 self.get_pdf_lines_mir(matrix_element, ninitial, False, True) 2404 replace_dict['pdf_lines_mirr'] = pdf_lines_mirr 2405 2406 file = open(os.path.join(_file_path, \ 2407 'iolibs/template_files/parton_lum_n_fks.inc')).read() 2408 file = file % replace_dict 2409 2410 # Write the file 2411 writer.writelines(file)
2412 2413 2414 2415 #=============================================================================== 2416 # write_coloramps_file 2417 #=============================================================================== 2418 #test written
2419 - def write_coloramps_file(self, writer, mapconfigs, matrix_element, fortran_model):
2420 """Write the coloramps.inc file for MadEvent""" 2421 2422 lines = [] 2423 lines.append( "logical icolamp(%d,%d,1)" % \ 2424 (max(len(matrix_element.get('color_basis').keys()), 1), 2425 len(mapconfigs))) 2426 2427 lines += self.get_icolamp_lines(mapconfigs, matrix_element, 1) 2428 2429 # Write the file 2430 writer.writelines(lines) 2431 2432 return True
2433 2434 2435 #=============================================================================== 2436 # write_leshouche_file 2437 #=============================================================================== 2438 #test written
2439 - def write_leshouche_file(self, writer, matrix_element, fortran_model):
2440 """Write the leshouche.inc file for MG4""" 2441 2442 # Extract number of external particles 2443 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2444 2445 lines = [] 2446 for iproc, proc in enumerate(matrix_element.get('processes')): 2447 legs = proc.get_legs_with_decays() 2448 lines.append("DATA (IDUP(i,%d),i=1,%d)/%s/" % \ 2449 (iproc + 1, nexternal, 2450 ",".join([str(l.get('id')) for l in legs]))) 2451 for i in [1, 2]: 2452 lines.append("DATA (MOTHUP(%d,i,%3r),i=1,%2r)/%s/" % \ 2453 (i, iproc + 1, nexternal, 2454 ",".join([ "%3r" % 0 ] * ninitial + \ 2455 [ "%3r" % i ] * (nexternal - ninitial)))) 2456 2457 # Here goes the color connections corresponding to the JAMPs 2458 # Only one output, for the first subproc! 2459 if iproc == 0: 2460 # If no color basis, just output trivial color flow 2461 if not matrix_element.get('color_basis'): 2462 for i in [1, 2]: 2463 lines.append("DATA (ICOLUP(%d,i, 1),i=1,%2r)/%s/" % \ 2464 (i, nexternal, 2465 ",".join([ "%3r" % 0 ] * nexternal))) 2466 color_flow_list = [] 2467 2468 else: 2469 # First build a color representation dictionnary 2470 repr_dict = {} 2471 for l in legs: 2472 repr_dict[l.get('number')] = \ 2473 proc.get('model').get_particle(l.get('id')).get_color()\ 2474 * (-1)**(1+l.get('state')) 2475 # Get the list of color flows 2476 color_flow_list = \ 2477 matrix_element.get('color_basis').color_flow_decomposition(repr_dict, 2478 ninitial) 2479 # And output them properly 2480 for cf_i, color_flow_dict in enumerate(color_flow_list): 2481 for i in [0, 1]: 2482 lines.append("DATA (ICOLUP(%d,i,%3r),i=1,%2r)/%s/" % \ 2483 (i + 1, cf_i + 1, nexternal, 2484 ",".join(["%3r" % color_flow_dict[l.get('number')][i] \ 2485 for l in legs]))) 2486 2487 # Write the file 2488 writer.writelines(lines) 2489 2490 return len(color_flow_list)
2491 2492 2493 #=============================================================================== 2494 # write_configs_file 2495 #=============================================================================== 2496 #test_written
2497 - def write_configs_file(self, writer, matrix_element, fortran_model):
2498 """Write the configs.inc file for MadEvent""" 2499 2500 # Extract number of external particles 2501 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2502 lines = [] 2503 2504 iconfig = 0 2505 2506 s_and_t_channels = [] 2507 mapconfigs = [] 2508 2509 model = matrix_element.get('processes')[0].get('model') 2510 # new_pdg = model.get_first_non_pdg() 2511 2512 base_diagrams = matrix_element.get('base_amplitude').get('diagrams') 2513 model = matrix_element.get('base_amplitude').get('process').get('model') 2514 minvert = min([max([len(vert.get('legs')) for vert in \ 2515 diag.get('vertices')]) for diag in base_diagrams]) 2516 2517 for idiag, diag in enumerate(base_diagrams): 2518 if any([len(vert.get('legs')) > minvert for vert in 2519 diag.get('vertices')]): 2520 # Only 3-vertices allowed in configs.inc 2521 continue 2522 iconfig = iconfig + 1 2523 helas_diag = matrix_element.get('diagrams')[idiag] 2524 mapconfigs.append(helas_diag.get('number')) 2525 lines.append("# Diagram %d, Amplitude %d" % \ 2526 (helas_diag.get('number'),helas_diag.get('amplitudes')[0]['number'])) 2527 # Correspondance between the config and the amplitudes 2528 lines.append("data mapconfig(%4d)/%4d/" % (iconfig, 2529 helas_diag.get('amplitudes')[0]['number'])) 2530 2531 # Need to reorganize the topology so that we start with all 2532 # final state external particles and work our way inwards 2533 schannels, tchannels = helas_diag.get('amplitudes')[0].\ 2534 get_s_and_t_channels(ninitial, model, 990) 2535 2536 s_and_t_channels.append([schannels, tchannels]) 2537 2538 # Write out propagators for s-channel and t-channel vertices 2539 allchannels = schannels 2540 if len(tchannels) > 1: 2541 # Write out tchannels only if there are any non-trivial ones 2542 allchannels = schannels + tchannels 2543 2544 for vert in allchannels: 2545 daughters = [leg.get('number') for leg in vert.get('legs')[:-1]] 2546 last_leg = vert.get('legs')[-1] 2547 lines.append("data (iforest(i,%3d,%4d),i=1,%d)/%s/" % \ 2548 (last_leg.get('number'), iconfig, len(daughters), 2549 ",".join(["%3d" % d for d in daughters]))) 2550 if vert in schannels: 2551 lines.append("data sprop(%4d,%4d)/%8d/" % \ 2552 (last_leg.get('number'), iconfig, 2553 last_leg.get('id'))) 2554 elif vert in tchannels[:-1]: 2555 lines.append("data tprid(%4d,%4d)/%8d/" % \ 2556 (last_leg.get('number'), iconfig, 2557 abs(last_leg.get('id')))) 2558 2559 # Write out number of configs 2560 lines.append("# Number of configs") 2561 lines.append("data mapconfig(0)/%4d/" % iconfig) 2562 2563 # Write the file 2564 writer.writelines(lines) 2565 2566 return iconfig, mapconfigs, s_and_t_channels
2567 2568 2569 #=============================================================================== 2570 # write_decayBW_file 2571 #=============================================================================== 2572 #test written
2573 - def write_decayBW_file(self, writer, s_and_t_channels):
2574 """Write the decayBW.inc file for MadEvent""" 2575 2576 lines = [] 2577 2578 booldict = {False: ".false.", True: ".false."} 2579 ####Changed by MZ 2011-11-23!!!! 2580 2581 for iconf, config in enumerate(s_and_t_channels): 2582 schannels = config[0] 2583 for vertex in schannels: 2584 # For the resulting leg, pick out whether it comes from 2585 # decay or not, as given by the from_group flag 2586 leg = vertex.get('legs')[-1] 2587 lines.append("data gForceBW(%d,%d)/%s/" % \ 2588 (leg.get('number'), iconf + 1, 2589 booldict[leg.get('from_group')])) 2590 2591 # Write the file 2592 writer.writelines(lines) 2593 2594 return True
2595 2596 2597 #=============================================================================== 2598 # write_dname_file 2599 #===============================================================================
2600 - def write_dname_file(self, writer, matrix_element, fortran_model):
2601 """Write the dname.mg file for MG4""" 2602 2603 line = "DIRNAME=P%s" % \ 2604 matrix_element.get('processes')[0].shell_string() 2605 2606 # Write the file 2607 writer.write(line + "\n") 2608 2609 return True
2610 2611 2612 #=============================================================================== 2613 # write_iproc_file 2614 #===============================================================================
2615 - def write_iproc_file(self, writer, me_number):
2616 """Write the iproc.dat file for MG4""" 2617 2618 line = "%d" % (me_number + 1) 2619 2620 # Write the file 2621 for line_to_write in writer.write_line(line): 2622 writer.write(line_to_write) 2623 return True
2624 2625 2626 #=============================================================================== 2627 # Helper functions 2628 #=============================================================================== 2629 2630 2631 #=============================================================================== 2632 # get_fks_j_from_i_lines 2633 #=============================================================================== 2634
2635 - def get_fks_j_from_i_lines(self, me, i = 0): #test written
2636 """generate the lines for fks.inc describing initializating the 2637 fks_j_from_i array""" 2638 lines = [] 2639 if not me.isfinite: 2640 for ii, js in me.fks_j_from_i.items(): 2641 if js: 2642 lines.append('DATA (FKS_J_FROM_I_D(%d, %d, JPOS), JPOS = 0, %d) / %d, %s /' \ 2643 % (i, ii, len(js), len(js), ', '.join(["%d" % j for j in js]))) 2644 else: 2645 lines.append('DATA (FKS_J_FROM_I_D(%d, JPOS), JPOS = 0, %d) / %d, %s /' \ 2646 % (2, 1, 1, '1')) 2647 lines.append('') 2648 2649 return lines 2650 2651 2652 #=============================================================================== 2653 # get_leshouche_lines 2654 #===============================================================================
2655 - def get_leshouche_lines(self, matrix_element, ime):
2656 #test written 2657 """Write the leshouche.inc file for MG4""" 2658 2659 # Extract number of external particles 2660 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2661 2662 lines = [] 2663 for iproc, proc in enumerate(matrix_element.get('processes')): 2664 legs = proc.get_legs_with_decays() 2665 lines.append("I %4d %4d %s" % \ 2666 (ime, iproc + 1, 2667 " ".join([str(l.get('id')) for l in legs]))) 2668 for i in [1, 2]: 2669 lines.append("M %4d %4d %4d %s" % \ 2670 (ime, i, iproc + 1, 2671 " ".join([ "%3d" % 0 ] * ninitial + \ 2672 [ "%3d" % i ] * (nexternal - ninitial)))) 2673 2674 # Here goes the color connections corresponding to the JAMPs 2675 # Only one output, for the first subproc! 2676 if iproc == 0: 2677 # If no color basis, just output trivial color flow 2678 if not matrix_element.get('color_basis'): 2679 for i in [1, 2]: 2680 lines.append("C %4d %4d 1 %s" % \ 2681 (ime, i, 2682 " ".join([ "%3d" % 0 ] * nexternal))) 2683 color_flow_list = [] 2684 nflow = 1 2685 2686 else: 2687 # First build a color representation dictionnary 2688 repr_dict = {} 2689 for l in legs: 2690 repr_dict[l.get('number')] = \ 2691 proc.get('model').get_particle(l.get('id')).get_color()\ 2692 * (-1)**(1+l.get('state')) 2693 # Get the list of color flows 2694 color_flow_list = \ 2695 matrix_element.get('color_basis').color_flow_decomposition(repr_dict, 2696 ninitial) 2697 # And output them properly 2698 for cf_i, color_flow_dict in enumerate(color_flow_list): 2699 for i in [0, 1]: 2700 lines.append("C %4d %4d %4d %s" % \ 2701 (ime, i + 1, cf_i + 1, 2702 " ".join(["%3d" % color_flow_dict[l.get('number')][i] \ 2703 for l in legs]))) 2704 2705 nflow = len(color_flow_list) 2706 2707 nproc = len(matrix_element.get('processes')) 2708 2709 return lines, nproc, nflow
2710 2711 2712 #=============================================================================== 2713 # get_den_factor_lines 2714 #===============================================================================
2715 - def get_den_factor_lines(self, fks_born):
2716 """returns the lines with the information on the denominator keeping care 2717 of the identical particle factors in the various real emissions""" 2718 2719 lines = [] 2720 info_list = fks_born.get_fks_info_list() 2721 if info_list: 2722 # if the reals have been generated, fill with the corresponding average factor 2723 lines.append('INTEGER IDEN_VALUES(%d)' % len(info_list)) 2724 lines.append('DATA IDEN_VALUES /' + \ 2725 ', '.join(['%d' % ( 2726 fks_born.born_matrix_element.get_denominator_factor() ) \ 2727 for info in info_list]) + '/') 2728 else: 2729 # otherwise use the born 2730 lines.append('INTEGER IDEN_VALUES(1)') 2731 lines.append('DATA IDEN_VALUES / %d /' \ 2732 % fks_born.born_matrix_element.get_denominator_factor()) 2733 2734 return lines
2735 2736 2737 #=============================================================================== 2738 # get_ij_lines 2739 #===============================================================================
2740 - def get_ij_lines(self, fks_born):
2741 """returns the lines with the information on the particle number of the born 2742 that splits""" 2743 info_list = fks_born.get_fks_info_list() 2744 lines = [] 2745 if info_list: 2746 # if the reals have been generated, fill with the corresponding value of ij if 2747 # ij is massless, or with 0 if ij is massive (no collinear singularity) 2748 ij_list = [info['fks_info']['ij']if \ 2749 fks_born.born_matrix_element['processes'][0]['legs'][info['fks_info']['ij']-1]['massless'] \ 2750 else 0 for info in info_list] 2751 lines.append('INTEGER IJ_VALUES(%d)' % len(info_list)) 2752 lines.append('DATA IJ_VALUES /' + ', '.join(['%d' % ij for ij in ij_list]) + '/') 2753 else: 2754 #otherwise just put the first leg 2755 lines.append('INTEGER IJ_VALUES(1)') 2756 lines.append('DATA IJ_VALUES / 1 /') 2757 2758 return lines
2759 2760
2761 - def get_pdf_lines_mir(self, matrix_element, ninitial, subproc_group = False,\ 2762 mirror = False): #test written
2763 """Generate the PDF lines for the auto_dsig.f file""" 2764 2765 processes = matrix_element.get('processes') 2766 model = processes[0].get('model') 2767 2768 pdf_definition_lines = "" 2769 pdf_data_lines = "" 2770 pdf_lines = "" 2771 2772 if ninitial == 1: 2773 pdf_lines = "PD(0) = 0d0\nIPROC = 0\n" 2774 for i, proc in enumerate(processes): 2775 process_line = proc.base_string() 2776 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line 2777 pdf_lines = pdf_lines + "\nPD(IPROC) = 1d0\n" 2778 pdf_lines = pdf_lines + "\nPD(0)=PD(0)+PD(IPROC)\n" 2779 else: 2780 # Pick out all initial state particles for the two beams 2781 initial_states = [sorted(list(set([p.get_initial_pdg(1) for \ 2782 p in processes]))), 2783 sorted(list(set([p.get_initial_pdg(2) for \ 2784 p in processes])))] 2785 2786 # Prepare all variable names 2787 pdf_codes = dict([(p, model.get_particle(p).get_name()) for p in \ 2788 sum(initial_states,[])]) 2789 for key,val in pdf_codes.items(): 2790 pdf_codes[key] = val.replace('~','x').replace('+','p').replace('-','m') 2791 2792 # Set conversion from PDG code to number used in PDF calls 2793 pdgtopdf = {21: 0, 22: 7} 2794 # Fill in missing entries of pdgtopdf 2795 for pdg in sum(initial_states,[]): 2796 if not pdg in pdgtopdf and not pdg in pdgtopdf.values(): 2797 pdgtopdf[pdg] = pdg 2798 elif pdg not in pdgtopdf and pdg in pdgtopdf.values(): 2799 # If any particle has pdg code 7, we need to use something else 2800 pdgtopdf[pdg] = 6000000 + pdg 2801 2802 # Get PDF variable declarations for all initial states 2803 for i in [0,1]: 2804 pdf_definition_lines += "DOUBLE PRECISION " + \ 2805 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \ 2806 for pdg in \ 2807 initial_states[i]]) + \ 2808 "\n" 2809 2810 # Get PDF data lines for all initial states 2811 for i in [0,1]: 2812 pdf_data_lines += "DATA " + \ 2813 ",".join(["%s%d" % (pdf_codes[pdg],i+1) \ 2814 for pdg in initial_states[i]]) + \ 2815 "/%d*1D0/" % len(initial_states[i]) + \ 2816 "\n" 2817 2818 # Get PDF values for the different initial states 2819 for i, init_states in enumerate(initial_states): 2820 if not mirror: 2821 ibeam = i + 1 2822 else: 2823 ibeam = 2 - i 2824 if subproc_group: 2825 pdf_lines = pdf_lines + \ 2826 "IF (ABS(LPP(IB(%d))).GE.1) THEN\nLP=SIGN(1,LPP(IB(%d)))\n" \ 2827 % (ibeam, ibeam) 2828 else: 2829 pdf_lines = pdf_lines + \ 2830 "IF (ABS(LPP(%d)) .GE. 1) THEN\nLP=SIGN(1,LPP(%d))\n" \ 2831 % (ibeam, ibeam) 2832 2833 for initial_state in init_states: 2834 if initial_state in pdf_codes.keys(): 2835 if subproc_group: 2836 if abs(pdgtopdf[initial_state]) <= 7: 2837 pdf_lines = pdf_lines + \ 2838 ("%s%d=PDG2PDF(ABS(LPP(IB(%d))),%d*LP," + \ 2839 "XBK(IB(%d)),DSQRT(Q2FACT(%d)))\n") % \ 2840 (pdf_codes[initial_state], 2841 i + 1, ibeam, pdgtopdf[initial_state], 2842 ibeam, ibeam) 2843 else: 2844 # setting other partons flavours outside quark, gluon, photon to be 0d0 2845 pdf_lines = pdf_lines + \ 2846 ("c settings other partons flavours outside quark, gluon, photon to 0d0\n" + \ 2847 "%s%d=0d0\n") % \ 2848 (pdf_codes[initial_state],i + 1) 2849 else: 2850 if abs(pdgtopdf[initial_state]) <= 7: 2851 pdf_lines = pdf_lines + \ 2852 ("%s%d=PDG2PDF(ABS(LPP(%d)),%d*LP," + \ 2853 "XBK(%d),DSQRT(Q2FACT(%d)))\n") % \ 2854 (pdf_codes[initial_state], 2855 i + 1, ibeam, pdgtopdf[initial_state], 2856 ibeam, ibeam) 2857 else: 2858 # setting other partons flavours outside quark, gluon, photon to be 0d0 2859 pdf_lines = pdf_lines + \ 2860 ("c settings other partons flavours outside quark, gluon, photon to 0d0\n" + \ 2861 "%s%d=0d0\n") % \ 2862 (pdf_codes[initial_state],i + 1) 2863 2864 pdf_lines = pdf_lines + "ENDIF\n" 2865 2866 # Add up PDFs for the different initial state particles 2867 pdf_lines = pdf_lines + "PD(0) = 0d0\nIPROC = 0\n" 2868 for proc in processes: 2869 process_line = proc.base_string() 2870 pdf_lines = pdf_lines + "IPROC=IPROC+1 ! " + process_line 2871 pdf_lines = pdf_lines + "\nPD(IPROC) = " 2872 for ibeam in [1, 2]: 2873 initial_state = proc.get_initial_pdg(ibeam) 2874 if initial_state in pdf_codes.keys(): 2875 pdf_lines = pdf_lines + "%s%d*" % \ 2876 (pdf_codes[initial_state], ibeam) 2877 else: 2878 pdf_lines = pdf_lines + "1d0*" 2879 # Remove last "*" from pdf_lines 2880 pdf_lines = pdf_lines[:-1] + "\n" 2881 2882 # Remove last line break from pdf_lines 2883 return pdf_definition_lines[:-1], pdf_data_lines[:-1], pdf_lines[:-1] 2884 2885 2886 #test written
2887 - def get_color_data_lines_from_color_matrix(self, color_matrix, n=6):
2888 """Return the color matrix definition lines for the given color_matrix. Split 2889 rows in chunks of size n.""" 2890 2891 if not color_matrix: 2892 return ["DATA Denom(1)/1/", "DATA (CF(i,1),i=1,1) /1/"] 2893 else: 2894 ret_list = [] 2895 my_cs = color.ColorString() 2896 for index, denominator in \ 2897 enumerate(color_matrix.get_line_denominators()): 2898 # First write the common denominator for this color matrix line 2899 ret_list.append("DATA Denom(%i)/%i/" % (index + 1, denominator)) 2900 # Then write the numerators for the matrix elements 2901 num_list = color_matrix.get_line_numerators(index, denominator) 2902 for k in xrange(0, len(num_list), n): 2903 ret_list.append("DATA (CF(i,%3r),i=%3r,%3r) /%s/" % \ 2904 (index + 1, k + 1, min(k + n, len(num_list)), 2905 ','.join(["%5r" % i for i in num_list[k:k + n]]))) 2906 2907 return ret_list
2908 2909 #=========================================================================== 2910 # write_maxamps_file 2911 #===========================================================================
2912 - def write_maxamps_file(self, writer, maxamps, maxflows, 2913 maxproc,maxsproc):
2914 """Write the maxamps.inc file for MG4.""" 2915 2916 file = " integer maxamps, maxflow, maxproc, maxsproc\n" 2917 file = file + "parameter (maxamps=%d, maxflow=%d)\n" % \ 2918 (maxamps, maxflows) 2919 file = file + "parameter (maxproc=%d, maxsproc=%d)" % \ 2920 (maxproc, maxsproc) 2921 2922 # Write the file 2923 writer.writelines(file) 2924 2925 return True
2926 2927 #=============================================================================== 2928 # write_ncombs_file 2929 #===============================================================================
2930 - def write_ncombs_file(self, writer, matrix_element, fortran_model):
2931 # #test written 2932 """Write the ncombs.inc file for MadEvent.""" 2933 2934 # Extract number of external particles 2935 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 2936 2937 # ncomb (used for clustering) is 2^(nexternal) 2938 file = " integer n_max_cl\n" 2939 file = file + "parameter (n_max_cl=%d)" % (2 ** (nexternal+1)) 2940 2941 # Write the file 2942 writer.writelines(file) 2943 2944 return True
2945 2946 #=========================================================================== 2947 # write_config_subproc_map_file 2948 #===========================================================================
2949 - def write_config_subproc_map_file(self, writer, s_and_t_channels):
2950 """Write a dummy config_subproc.inc file for MadEvent""" 2951 2952 lines = [] 2953 2954 for iconfig in range(len(s_and_t_channels)): 2955 lines.append("DATA CONFSUB(1,%d)/1/" % \ 2956 (iconfig + 1)) 2957 2958 # Write the file 2959 writer.writelines(lines) 2960 2961 return True
2962 2963 #=========================================================================== 2964 # write_colors_file 2965 #===========================================================================
2966 - def write_colors_file(self, writer, matrix_element):
2967 """Write the get_color.f file for MadEvent, which returns color 2968 for all particles used in the matrix element.""" 2969 2970 try: 2971 matrix_elements=matrix_element.real_processes[0].matrix_element 2972 except IndexError: 2973 matrix_elements=[matrix_element.born_matrix_element] 2974 2975 if isinstance(matrix_elements, helas_objects.HelasMatrixElement): 2976 matrix_elements = [matrix_elements] 2977 2978 model = matrix_elements[0].get('processes')[0].get('model') 2979 2980 # We need the both particle and antiparticle wf_ids, since the identity 2981 # depends on the direction of the wf. 2982 # loop on the real emissions 2983 wf_ids = set(sum([sum([sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \ 2984 for wf in d.get('wavefunctions')],[]) \ 2985 for d in me.get('diagrams')],[]) \ 2986 for me in [real_proc.matrix_element]],[])\ 2987 for real_proc in matrix_element.real_processes],[])) 2988 # and also on the born 2989 wf_ids = wf_ids.union(set(sum([sum([[wf.get_pdg_code(),wf.get_anti_pdg_code()] \ 2990 for wf in d.get('wavefunctions')],[]) \ 2991 for d in matrix_element.born_matrix_element.get('diagrams')],[]))) 2992 2993 # loop on the real emissions 2994 leg_ids = set(sum([sum([sum([[l.get('id') for l in \ 2995 p.get_legs_with_decays()] for p in \ 2996 me.get('processes')], []) for me in \ 2997 [real_proc.matrix_element]], []) for real_proc in \ 2998 matrix_element.real_processes],[])) 2999 # and also on the born 3000 leg_ids = leg_ids.union(set(sum([[l.get('id') for l in \ 3001 p.get_legs_with_decays()] for p in \ 3002 matrix_element.born_matrix_element.get('processes')], []))) 3003 particle_ids = sorted(list(wf_ids.union(leg_ids))) 3004 3005 lines = """function get_color(ipdg) 3006 implicit none 3007 integer get_color, ipdg 3008 3009 if(ipdg.eq.%d)then 3010 get_color=%d 3011 return 3012 """ % (particle_ids[0], model.get_particle(particle_ids[0]).get_color()) 3013 3014 for part_id in particle_ids[1:]: 3015 lines += """else if(ipdg.eq.%d)then 3016 get_color=%d 3017 return 3018 """ % (part_id, model.get_particle(part_id).get_color()) 3019 # Dummy particle for multiparticle vertices with pdg given by 3020 # first code not in the model 3021 lines += """else if(ipdg.eq.%d)then 3022 c This is dummy particle used in multiparticle vertices 3023 get_color=2 3024 return 3025 """ % model.get_first_non_pdg() 3026 lines += """else 3027 write(*,*)'Error: No color given for pdg ',ipdg 3028 get_color=0 3029 return 3030 endif 3031 end 3032 """ 3033 3034 # Write the file 3035 writer.writelines(lines) 3036 3037 return True
3038 3039 #=============================================================================== 3040 # write_props_file 3041 #=============================================================================== 3042 #test_written
3043 - def write_props_file(self, writer, matrix_element, fortran_model, s_and_t_channels):
3044 """Write the props.inc file for MadEvent. Needs input from 3045 write_configs_file. With respect to the parent routine, it has some 3046 more specific formats that allow the props.inc file to be read by the 3047 link program""" 3048 3049 lines = [] 3050 3051 particle_dict = matrix_element.get('processes')[0].get('model').\ 3052 get('particle_dict') 3053 3054 for iconf, configs in enumerate(s_and_t_channels): 3055 for vertex in configs[0] + configs[1][:-1]: 3056 leg = vertex.get('legs')[-1] 3057 if leg.get('id') not in particle_dict: 3058 # Fake propagator used in multiparticle vertices 3059 mass = 'zero' 3060 width = 'zero' 3061 pow_part = 0 3062 else: 3063 particle = particle_dict[leg.get('id')] 3064 # Get mass 3065 if particle.get('mass').lower() == 'zero': 3066 mass = particle.get('mass') 3067 else: 3068 mass = "abs(%s)" % particle.get('mass') 3069 # Get width 3070 if particle.get('width').lower() == 'zero': 3071 width = particle.get('width') 3072 else: 3073 width = "abs(%s)" % particle.get('width') 3074 3075 pow_part = 1 + int(particle.is_boson()) 3076 3077 lines.append("pmass(%3d,%4d) = %s" % \ 3078 (leg.get('number'), iconf + 1, mass)) 3079 lines.append("pwidth(%3d,%4d) = %s" % \ 3080 (leg.get('number'), iconf + 1, width)) 3081 lines.append("pow(%3d,%4d) = %d" % \ 3082 (leg.get('number'), iconf + 1, pow_part)) 3083 3084 # Write the file 3085 writer.writelines(lines) 3086 3087 return True
3088 3089 3090 #=========================================================================== 3091 # write_subproc 3092 #===========================================================================
3093 - def write_subproc(self, writer, subprocdir):
3094 """Append this subprocess to the subproc.mg file for MG4""" 3095 3096 # Write line to file 3097 writer.write(subprocdir + "\n") 3098 3099 return True
3100 3101 3102 3103 3104 3105 #================================================================================= 3106 # Class for using the optimized Loop process 3107 #=================================================================================
3108 -class ProcessOptimizedExporterFortranFKS(loop_exporters.LoopProcessOptimizedExporterFortranSA,\ 3109 ProcessExporterFortranFKS):
3110 """Class to take care of exporting a set of matrix elements to 3111 Fortran (v4) format.""" 3112 3113
3114 - def finalize(self, *args, **opts):
3116 #export_v4.ProcessExporterFortranSA.finalize(self, *args, **opts) 3117 3118 #=============================================================================== 3119 # copy the Template in a new directory. 3120 #===============================================================================
3121 - def copy_fkstemplate(self):
3122 """create the directory run_name as a copy of the MadEvent 3123 Template, and clean the directory 3124 For now it is just the same as copy_v4template, but it will be modified 3125 """ 3126 mgme_dir = self.mgme_dir 3127 dir_path = self.dir_path 3128 clean =self.opt['clean'] 3129 3130 #First copy the full template tree if dir_path doesn't exit 3131 if not os.path.isdir(dir_path): 3132 if not mgme_dir: 3133 raise MadGraph5Error, \ 3134 "No valid MG_ME path given for MG4 run directory creation." 3135 logger.info('initialize a new directory: %s' % \ 3136 os.path.basename(dir_path)) 3137 shutil.copytree(os.path.join(mgme_dir, 'Template', 'NLO'), dir_path, True) 3138 # distutils.dir_util.copy_tree since dir_path already exists 3139 dir_util.copy_tree(pjoin(self.mgme_dir, 'Template', 'Common'), 3140 dir_path) 3141 # Copy plot_card 3142 for card in ['plot_card']: 3143 if os.path.isfile(pjoin(self.dir_path, 'Cards',card + '.dat')): 3144 try: 3145 shutil.copy(pjoin(self.dir_path, 'Cards', card + '.dat'), 3146 pjoin(self.dir_path, 'Cards', card + '_default.dat')) 3147 except IOError: 3148 logger.warning("Failed to copy " + card + ".dat to default") 3149 3150 elif not os.path.isfile(os.path.join(dir_path, 'TemplateVersion.txt')): 3151 if not mgme_dir: 3152 raise MadGraph5Error, \ 3153 "No valid MG_ME path given for MG4 run directory creation." 3154 try: 3155 shutil.copy(os.path.join(mgme_dir, 'MGMEVersion.txt'), dir_path) 3156 except IOError: 3157 MG5_version = misc.get_pkg_info() 3158 open(os.path.join(dir_path, 'MGMEVersion.txt'), 'w').write( \ 3159 "5." + MG5_version['version']) 3160 3161 #Ensure that the Template is clean 3162 if clean: 3163 logger.info('remove old information in %s' % os.path.basename(dir_path)) 3164 if os.environ.has_key('MADGRAPH_BASE'): 3165 subprocess.call([os.path.join('bin', 'internal', 'clean_template'), 3166 '--web'], cwd=dir_path) 3167 else: 3168 try: 3169 subprocess.call([os.path.join('bin', 'internal', 'clean_template')], \ 3170 cwd=dir_path) 3171 except Exception, why: 3172 raise MadGraph5Error('Failed to clean correctly %s: \n %s' \ 3173 % (os.path.basename(dir_path),why)) 3174 #Write version info 3175 MG_version = misc.get_pkg_info() 3176 open(os.path.join(dir_path, 'SubProcesses', 'MGVersion.txt'), 'w').write( 3177 MG_version['version']) 3178 3179 # We must link the CutTools to the Library folder of the active Template 3180 self.link_CutTools(dir_path) 3181 # We must link the TIR to the Library folder of the active Template 3182 link_tir_libs=[] 3183 tir_libs=[] 3184 tir_include=[] 3185 for tir in self.all_tir: 3186 tir_dir="%s_dir"%tir 3187 libpath=getattr(self,tir_dir) 3188 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'), 3189 libpath,"lib%s.a"%tir,tir_name=tir) 3190 setattr(self,tir_dir,libpath) 3191 if libpath != "": 3192 if tir in ['pjfry','ninja','golem', 'samurai','collier']: 3193 # We should link dynamically when possible, so we use the original 3194 # location of these libraries. 3195 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir)) 3196 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir)) 3197 # For Ninja, we must also link against OneLoop. 3198 if tir in ['ninja']: 3199 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext)) 3200 for ext in ['a','dylib','so']): 3201 raise MadGraph5Error( 3202 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath) 3203 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo')) 3204 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo')) 3205 # We must add the corresponding includes for these TIR 3206 if tir in ['golem','samurai','ninja','collier']: 3207 trg_path = pjoin(os.path.dirname(libpath),'include') 3208 if os.path.isdir(trg_path): 3209 to_include = misc.find_includes_path(trg_path, 3210 self.include_names[tir]) 3211 else: 3212 to_include = None 3213 # Special possible location for collier 3214 if to_include is None and tir=='collier': 3215 to_include = misc.find_includes_path( 3216 pjoin(libpath,'modules'),self.include_names[tir]) 3217 if to_include is None: 3218 logger.error( 3219 'Could not find the include directory for %s, looking in %s.\n' % (tir ,str(trg_path))+ 3220 'Generation carries on but you will need to edit the include path by hand in the makefiles.') 3221 to_include = '<Not_found_define_it_yourself>' 3222 tir_include.append('-I %s'%to_include) 3223 else: 3224 link_tir_libs.append('-l%s'%tir) 3225 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir) 3226 3227 os.remove(os.path.join(self.dir_path,'SubProcesses','makefile_loop.inc')) 3228 cwd = os.getcwd() 3229 dirpath = os.path.join(self.dir_path, 'SubProcesses') 3230 try: 3231 os.chdir(dirpath) 3232 except os.error: 3233 logger.error('Could not cd to directory %s' % dirpath) 3234 return 0 3235 filename = 'makefile_loop' 3236 calls = self.write_makefile_TIR(writers.MakefileWriter(filename), 3237 link_tir_libs,tir_libs,tir_include=tir_include) 3238 os.remove(os.path.join(self.dir_path,'Source','make_opts.inc')) 3239 dirpath = os.path.join(self.dir_path, 'Source') 3240 try: 3241 os.chdir(dirpath) 3242 except os.error: 3243 logger.error('Could not cd to directory %s' % dirpath) 3244 return 0 3245 filename = 'make_opts' 3246 calls = self.write_make_opts(writers.MakefileWriter(filename), 3247 link_tir_libs,tir_libs) 3248 # Return to original PWD 3249 os.chdir(cwd) 3250 3251 cwd = os.getcwd() 3252 dirpath = os.path.join(self.dir_path, 'SubProcesses') 3253 try: 3254 os.chdir(dirpath) 3255 except os.error: 3256 logger.error('Could not cd to directory %s' % dirpath) 3257 return 0 3258 3259 # We add here the user-friendly MadLoop option setter. 3260 cpfiles= ["SubProcesses/MadLoopParamReader.f", 3261 "Cards/MadLoopParams.dat", 3262 "SubProcesses/MadLoopParams.inc"] 3263 3264 for file in cpfiles: 3265 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file), 3266 os.path.join(self.dir_path, file)) 3267 3268 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'), 3269 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat')) 3270 3271 3272 3273 if os.path.exists(pjoin(self.dir_path, 'Cards', 'MadLoopParams.dat')): 3274 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.dir_path, 3275 'Cards', 'MadLoopParams.dat')) 3276 # write the output file 3277 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses", 3278 "MadLoopParams.dat")) 3279 3280 # We need minimal editing of MadLoopCommons.f 3281 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone', 3282 "SubProcesses","MadLoopCommons.inc")).read() 3283 writer = writers.FortranWriter(os.path.join(self.dir_path, 3284 "SubProcesses","MadLoopCommons.f")) 3285 writer.writelines(MadLoopCommon%{ 3286 'print_banner_commands':self.MadLoop_banner}, 3287 context={'collier_available':self.tir_available_dict['collier']}) 3288 writer.close() 3289 3290 # link the files from the MODEL 3291 model_path = self.dir_path + '/Source/MODEL/' 3292 # Note that for the [real=] mode, these files are not present 3293 if os.path.isfile(os.path.join(model_path,'mp_coupl.inc')): 3294 ln(model_path + '/mp_coupl.inc', self.dir_path + '/SubProcesses') 3295 if os.path.isfile(os.path.join(model_path,'mp_coupl_same_name.inc')): 3296 ln(model_path + '/mp_coupl_same_name.inc', \ 3297 self.dir_path + '/SubProcesses') 3298 3299 # Write the cts_mpc.h and cts_mprec.h files imported from CutTools 3300 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\ 3301 writers.FortranWriter('cts_mpc.h'),) 3302 3303 self.copy_python_files() 3304 3305 3306 # We need to create the correct open_data for the pdf 3307 self.write_pdf_opendata() 3308 3309 3310 # Return to original PWD 3311 os.chdir(cwd)
3312
3313 - def generate_virt_directory(self, loop_matrix_element, fortran_model, dir_name):
3314 """writes the V**** directory inside the P**** directories specified in 3315 dir_name""" 3316 3317 cwd = os.getcwd() 3318 3319 matrix_element = loop_matrix_element 3320 3321 # Create the MadLoop5_resources directory if not already existing 3322 dirpath = os.path.join(dir_name, 'MadLoop5_resources') 3323 try: 3324 os.mkdir(dirpath) 3325 except os.error as error: 3326 logger.warning(error.strerror + " " + dirpath) 3327 3328 # Create the directory PN_xx_xxxxx in the specified path 3329 name = "V%s" % matrix_element.get('processes')[0].shell_string() 3330 dirpath = os.path.join(dir_name, name) 3331 3332 try: 3333 os.mkdir(dirpath) 3334 except os.error as error: 3335 logger.warning(error.strerror + " " + dirpath) 3336 3337 try: 3338 os.chdir(dirpath) 3339 except os.error: 3340 logger.error('Could not cd to directory %s' % dirpath) 3341 return 0 3342 3343 logger.info('Creating files in directory %s' % name) 3344 3345 # Extract number of external particles 3346 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial() 3347 3348 calls=self.write_loop_matrix_element_v4(None,matrix_element,fortran_model) 3349 3350 # We need a link to coefs.inc from DHELAS 3351 ln(pjoin(self.dir_path, 'Source', 'DHELAS', 'coef_specs.inc'), 3352 abspath=False, cwd=None) 3353 3354 # The born matrix element, if needed 3355 filename = 'born_matrix.f' 3356 calls = self.write_bornmatrix( 3357 writers.FortranWriter(filename), 3358 matrix_element, 3359 fortran_model) 3360 3361 filename = 'nexternal.inc' 3362 self.write_nexternal_file(writers.FortranWriter(filename), 3363 nexternal, ninitial) 3364 3365 filename = 'pmass.inc' 3366 self.write_pmass_file(writers.FortranWriter(filename), 3367 matrix_element) 3368 3369 filename = 'ngraphs.inc' 3370 self.write_ngraphs_file(writers.FortranWriter(filename), 3371 len(matrix_element.get_all_amplitudes())) 3372 3373 filename = "loop_matrix.ps" 3374 writers.FortranWriter(filename).writelines("""C Post-helas generation loop-drawing is not ready yet.""") 3375 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList( 3376 matrix_element.get('base_amplitude').get('loop_diagrams')[:1000]), 3377 filename, 3378 model=matrix_element.get('processes')[0].get('model'), 3379 amplitude='') 3380 logger.info("Drawing loop Feynman diagrams for " + \ 3381 matrix_element.get('processes')[0].nice_string(\ 3382 print_weighted=False)) 3383 plot.draw() 3384 3385 filename = "born_matrix.ps" 3386 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\ 3387 get('born_diagrams'), 3388 filename, 3389 model=matrix_element.get('processes')[0].\ 3390 get('model'), 3391 amplitude='') 3392 logger.info("Generating born Feynman diagrams for " + \ 3393 matrix_element.get('processes')[0].nice_string(\ 3394 print_weighted=False)) 3395 plot.draw() 3396 3397 # We also need to write the overall maximum quantities for this group 3398 # of processes in 'global_specs.inc'. In aMCatNLO, there is always 3399 # only one process, so this is trivial 3400 self.write_global_specs(matrix_element, output_path=pjoin(dirpath,'global_specs.inc')) 3401 3402 open('unique_id.inc','w').write( 3403 """ integer UNIQUE_ID 3404 parameter(UNIQUE_ID=1)""") 3405 3406 linkfiles = ['coupl.inc', 'mp_coupl.inc', 'mp_coupl_same_name.inc', 3407 'cts_mprec.h', 'cts_mpc.h', 'MadLoopParamReader.f', 3408 'MadLoopParams.inc','MadLoopCommons.f'] 3409 3410 for file in linkfiles: 3411 ln('../../%s' % file) 3412 3413 os.system("ln -s ../../makefile_loop makefile") 3414 3415 # We should move to MadLoop5_resources directory from the SubProcesses 3416 ln(pjoin(os.path.pardir,os.path.pardir,'MadLoopParams.dat'), 3417 pjoin('..','MadLoop5_resources')) 3418 3419 linkfiles = ['mpmodule.mod'] 3420 3421 for file in linkfiles: 3422 ln('../../../lib/%s' % file) 3423 3424 linkfiles = ['coef_specs.inc'] 3425 3426 for file in linkfiles: 3427 ln('../../../Source/DHELAS/%s' % file) 3428 3429 # Return to original PWD 3430 os.chdir(cwd) 3431 3432 if not calls: 3433 calls = 0 3434 return calls
3435 3436 3437 #=============================================================================== 3438 # write_coef_specs 3439 #===============================================================================
3440 - def write_coef_specs_file(self, max_loop_vertex_ranks):
3441 """ writes the coef_specs.inc in the DHELAS folder. Should not be called in the 3442 non-optimized mode""" 3443 filename = os.path.join(self.dir_path, 'Source', 'DHELAS', 'coef_specs.inc') 3444 3445 replace_dict = {} 3446 replace_dict['max_lwf_size'] = 4 3447 replace_dict['vertex_max_coefs'] = max(\ 3448 [q_polynomial.get_number_of_coefs_for_rank(n) 3449 for n in max_loop_vertex_ranks]) 3450 IncWriter=writers.FortranWriter(filename,'w') 3451 IncWriter.writelines("""INTEGER MAXLWFSIZE 3452 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d) 3453 INTEGER VERTEXMAXCOEFS 3454 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\ 3455 % replace_dict) 3456 IncWriter.close()
3457