1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Methods and classes to export matrix elements to v4 format."""
16
17 import copy
18 import fractions
19 import glob
20 import logging
21 import os
22 import stat
23 import sys
24 import re
25 import shutil
26 import subprocess
27 import itertools
28 import time
29 import datetime
30
31
32 import aloha
33
34 import madgraph.core.base_objects as base_objects
35 import madgraph.core.color_algebra as color
36 import madgraph.core.helas_objects as helas_objects
37 import madgraph.loop.loop_helas_objects as loop_helas_objects
38 import madgraph.iolibs.drawing_eps as draw
39 import madgraph.iolibs.files as files
40 import madgraph.iolibs.group_subprocs as group_subprocs
41 import madgraph.various.misc as misc
42 import madgraph.various.q_polynomial as q_polynomial
43 import madgraph.iolibs.file_writers as writers
44 import madgraph.iolibs.gen_infohtml as gen_infohtml
45 import madgraph.iolibs.template_files as template_files
46 import madgraph.iolibs.ufo_expression_parsers as parsers
47 import madgraph.iolibs.export_v4 as export_v4
48 import madgraph.various.diagram_symmetry as diagram_symmetry
49 import madgraph.various.process_checks as process_checks
50 import madgraph.various.progressbar as pbar
51 import madgraph.various.q_polynomial as q_polynomial
52 import madgraph.core.color_amp as color_amp
53 import madgraph.iolibs.helas_call_writers as helas_call_writers
54 import models.check_param_card as check_param_card
55 from madgraph.loop.loop_base_objects import LoopDiagram
56 from madgraph.loop.MadLoopBannerStyles import MadLoopBannerStyles
57
58 import madgraph.various.banner as banner_mod
59
60 pjoin = os.path.join
61
62 import aloha.create_aloha as create_aloha
63 import models.write_param_card as param_writer
64 from madgraph import MadGraph5Error, MG5DIR, InvalidCmd
65 from madgraph.iolibs.files import cp, ln, mv
66 pjoin = os.path.join
67 _file_path = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0] + '/'
68 logger = logging.getLogger('madgraph.loop_exporter')
69
70
71
72
74 """ Class to define general helper functions to the different
75 loop fortran exporters (ME, SA, MEGroup, etc..) which will inherit both
76 from this class AND from the corresponding ProcessExporterFortran(ME,SA,...).
77 It plays the same role as ProcessExporterFrotran and simply defines here
78 loop-specific helpers functions necessary for all loop exporters.
79 Notice that we do not have LoopExporterFortran inheriting from
80 ProcessExporterFortran but give access to arguments like dir_path and
81 clean using options. This avoids method resolution object ambiguity"""
82
83 default_opt = dict(export_v4.ProcessExporterFortran.default_opt)
84 default_opt.update({'clean': False, 'complex_mass':False,
85 'export_format':'madloop', 'mp':True,
86 'loop_dir':'', 'cuttools_dir':'',
87 'fortran_compiler':'gfortran',
88 'SubProc_prefix': 'P',
89 'output_dependencies': 'external',
90 'compute_color_flows': False,
91 'mode':''})
92
93 include_names = {'ninja' : 'mninja.mod',
94 'golem' : 'generic_function_1p.mod',
95 'samurai':'msamurai.mod',
96 'collier': 'collier.mod'}
97
98 - def __init__(self, dir_path = "", opt=None):
99 """Initiate the LoopExporterFortran with directory information on where
100 to find all the loop-related source files, like CutTools"""
101
102
103 self.opt = dict(self.default_opt)
104 if opt:
105 self.opt.update(opt)
106
107 self.SubProc_prefix = self.opt['SubProc_prefix']
108 self.loop_dir = self.opt['loop_dir']
109 self.cuttools_dir = self.opt['cuttools_dir']
110 self.fortran_compiler = self.opt['fortran_compiler']
111 self.dependencies = self.opt['output_dependencies']
112 self.compute_color_flows = self.opt['compute_color_flows']
113
114 super(LoopExporterFortran,self).__init__(dir_path, self.opt)
115
116
190
192 """ Caches the aloha model created here as an attribute of the loop
193 exporter so that it can later be used in the LoopHelasMatrixElement
194 in the function compute_all_analytic_information for recycling aloha
195 computations across different LoopHelasMatrixElements steered by the
196 same loop exporter.
197 """
198 if not hasattr(self, 'aloha_model'):
199 self.aloha_model = create_aloha.AbstractALOHAModel(os.path.basename(model.get('modelpath')))
200 return self.aloha_model
201
202
203
204
206 """Write the cts_mprec.h and cts_mpc.h"""
207
208 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mprec.h')).read()
209 writer_mprec.writelines(file)
210
211 file = open(os.path.join(self.cuttools_dir, 'src/cts/cts_mpc.h')).read()
212 file = file.replace('&','')
213 writer_mpc.writelines(file)
214
215 return True
216
217
218
219
222
223 """Class to take care of exporting a set of loop matrix elements in the
224 Fortran format."""
225
226 template_dir=os.path.join(_file_path,'iolibs/template_files/loop')
227 madloop_makefile_name = 'makefile'
228
229 MadLoop_banner = MadLoopBannerStyles.get_MadLoop_Banner(
230 style='classic2', color='green',
231 top_frame_char = '=', bottom_frame_char = '=',
232 left_frame_char = '{',right_frame_char = '}',
233 print_frame=True, side_margin = 7, up_margin = 1)
234
238
245
248
250 """ Write the general check_sa.py in SubProcesses that calls all processes successively."""
251
252
253 file = open(os.path.join(self.template_dir,\
254 'check_sa_all.py.inc')).read()
255 open(output_path,'w').writelines(file)
256
257 os.chmod(output_path, os.stat(output_path).st_mode | stat.S_IEXEC)
258
260 """write a function to call the correct matrix element"""
261
262 template = """
263 %(python_information)s
264
265 SUBROUTINE INITIALISE(PATH)
266 C ROUTINE FOR F2PY to read the benchmark point.
267 IMPLICIT NONE
268 CHARACTER*512 PATH
269 CF2PY INTENT(IN) :: PATH
270 CALL SETPARA(PATH) !first call to setup the paramaters
271 RETURN
272 END
273
274 SUBROUTINE SET_MADLOOP_PATH(PATH)
275 C Routine to set the path of the folder 'MadLoop5_resources' to MadLoop
276 CHARACTER(512) PATH
277 CF2PY intent(in)::path
278 CALL SETMADLOOPPATH(PATH)
279 END
280
281 subroutine smatrixhel(pdgs, npdg, p, ALPHAS, SCALES2, nhel, ANS, RETURNCODE)
282 IMPLICIT NONE
283
284 CF2PY double precision, intent(in), dimension(0:3,npdg) :: p
285 CF2PY integer, intent(in), dimension(npdg) :: pdgs
286 CF2PY integer, intent(in) :: npdg
287 CF2PY double precision, intent(out) :: ANS
288 CF2PY integer, intent(out) :: RETURNCODE
289 CF2PY double precision, intent(in) :: ALPHAS
290 CF2PY double precision, intent(in) :: SCALES2
291
292 integer pdgs(*)
293 integer npdg, nhel, RETURNCODE
294 double precision p(*)
295 double precision ANS, ALPHAS, PI,SCALES2
296
297 %(smatrixhel)s
298
299 return
300 end
301
302 subroutine get_pdg_order(OUT)
303 IMPLICIT NONE
304 CF2PY INTEGER, intent(out) :: OUT(%(nb_me)i,%(maxpart)i)
305
306 INTEGER OUT(%(nb_me)i,%(maxpart)i), PDGS(%(nb_me)i,%(maxpart)i)
307 DATA PDGS/ %(pdgs)s /
308 OUT=PDGS
309 RETURN
310 END
311
312 subroutine get_prefix(PREFIX)
313 IMPLICIT NONE
314 CF2PY CHARACTER*20, intent(out) :: PREFIX(%(nb_me)i)
315 character*20 PREFIX(%(nb_me)i),PREF(%(nb_me)i)
316 DATA PREF / '%(prefix)s'/
317 PREFIX = PREF
318 RETURN
319 END
320
321 """
322
323 allids = self.prefix_info.keys()
324 allprefix = [self.prefix_info[key][0] for key in allids]
325 min_nexternal = min([len(ids) for ids in allids])
326 max_nexternal = max([len(ids) for ids in allids])
327
328 info = []
329 for key, (prefix, tag) in self.prefix_info.items():
330 info.append('#PY %s : %s # %s' % (tag, key, prefix))
331
332
333 text = []
334 for n_ext in range(min_nexternal, max_nexternal+1):
335 current = [ids for ids in allids if len(ids)==n_ext]
336 if not current:
337 continue
338 if min_nexternal != max_nexternal:
339 if n_ext == min_nexternal:
340 text.append(' if (npdg.eq.%i)then' % n_ext)
341 else:
342 text.append(' else if (npdg.eq.%i)then' % n_ext)
343 for ii,pdgs in enumerate(current):
344 condition = '.and.'.join(['%i.eq.pdgs(%i)' %(pdg, i+1) for i, pdg in enumerate(pdgs)])
345 if ii==0:
346 text.append( ' if(%s) then ! %i' % (condition, i))
347 else:
348 text.append( ' else if(%s) then ! %i' % (condition,i))
349 text.append(' call %sget_me(p, ALPHAS, DSQRT(SCALES2), NHEL, ANS, RETURNCODE)' % self.prefix_info[pdgs][0])
350 text.append(' endif')
351
352 if min_nexternal != max_nexternal:
353 text.append('endif')
354
355 formatting = {'python_information':'\n'.join(info),
356 'smatrixhel': '\n'.join(text),
357 'maxpart': max_nexternal,
358 'nb_me': len(allids),
359 'pdgs': ','.join([str(pdg[i]) if i<len(pdg) else '0'
360 for i in range(max_nexternal) \
361 for pdg in allids]),
362 'prefix':'\',\''.join(allprefix)
363 }
364
365
366 text = template % formatting
367 fsock = writers.FortranWriter(pjoin(self.dir_path, 'SubProcesses', 'all_matrix.f'),'w')
368 fsock.writelines(text)
369 fsock.close()
370
371
372
374 """ Perform additional actions specific for this class when setting
375 up the template with the copy_template function."""
376
377
378 cpfiles= ["Cards/MadLoopParams.dat",
379 "SubProcesses/MadLoopParamReader.f",
380 "SubProcesses/MadLoopParams.inc"]
381 if copy_Source_makefile:
382 cpfiles.append("Source/makefile")
383
384 for file in cpfiles:
385 shutil.copy(os.path.join(self.loop_dir,'StandAlone/', file),
386 os.path.join(self.dir_path, file))
387
388
389 shutil.copy(pjoin(self.dir_path, 'Cards','MadLoopParams.dat'),
390 pjoin(self.dir_path, 'Cards','MadLoopParams_default.dat'))
391
392 self.MadLoopparam = banner_mod.MadLoopParam(pjoin(self.loop_dir,'StandAlone',
393 'Cards', 'MadLoopParams.dat'))
394
395 self.MadLoopparam.write(pjoin(self.dir_path,"SubProcesses",
396 "MadLoopParams.dat"))
397
398
399 shutil.copy(pjoin(self.loop_dir,'StandAlone','SubProcesses','makefile'),
400 pjoin(self.dir_path, 'SubProcesses',self.madloop_makefile_name))
401
402
403
404 link_tir_libs=[]
405 tir_libs=[]
406
407 filePath = pjoin(self.dir_path, 'SubProcesses',
408 'MadLoop_makefile_definitions')
409 calls = self.write_loop_makefile_definitions(
410 writers.MakefileWriter(filePath),link_tir_libs,tir_libs)
411
412
413
414
415 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone',
416 "SubProcesses","MadLoopCommons.inc")).read()
417 writer = writers.FortranWriter(os.path.join(self.dir_path,
418 "SubProcesses","MadLoopCommons.f"))
419 writer.writelines(MadLoopCommon%{
420 'print_banner_commands':self.MadLoop_banner}, context={
421 'collier_available':False})
422 writer.close()
423
424
425 if not os.path.exists(pjoin(self.dir_path,'SubProcesses',
426 'MadLoop5_resources')):
427 cp(pjoin(self.loop_dir,'StandAlone','SubProcesses',
428 'MadLoop5_resources'),pjoin(self.dir_path,'SubProcesses'))
429
430
431 ln(pjoin(self.dir_path,'SubProcesses','MadLoopParams.dat'),
432 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources'))
433 ln(pjoin(self.dir_path,'Cards','param_card.dat'),
434 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources'))
435 ln(pjoin(self.dir_path,'Cards','ident_card.dat'),
436 pjoin(self.dir_path,'SubProcesses','MadLoop5_resources'))
437
438
439
440 if os.path.isfile(pjoin(self.dir_path,'SubProcesses','check_sa.f')):
441 os.remove(pjoin(self.dir_path,'SubProcesses','check_sa.f'))
442
443 cwd = os.getcwd()
444 dirpath = os.path.join(self.dir_path, 'SubProcesses')
445 try:
446 os.chdir(dirpath)
447 except os.error:
448 logger.error('Could not cd to directory %s' % dirpath)
449 return 0
450
451
452 self.write_mp_files(writers.FortranWriter('cts_mprec.h'),\
453 writers.FortranWriter('cts_mpc.h'))
454
455
456 os.chdir(cwd)
457
458
459 super(LoopProcessExporterFortranSA, self).link_CutTools(self.dir_path)
460
461
462
465 """ Create the file makefile which links to the TIR libraries."""
466
467 file = open(os.path.join(self.loop_dir,'StandAlone',
468 'SubProcesses','MadLoop_makefile_definitions.inc')).read()
469 replace_dict={}
470 replace_dict['link_tir_libs']=' '.join(link_tir_libs)
471 replace_dict['tir_libs']=' '.join(tir_libs)
472 replace_dict['dotf']='%.f'
473 replace_dict['prefix']= self.SubProc_prefix
474 replace_dict['doto']='%.o'
475 replace_dict['tir_include']=' '.join(tir_include)
476 file=file%replace_dict
477 if writer:
478 writer.writelines(file)
479 else:
480 return file
481
482 - def convert_model(self, model, wanted_lorentz = [],
483 wanted_couplings = []):
490
491 - def get_ME_identifier(self, matrix_element,
492 group_number = None, group_elem_number = None):
493 """ A function returning a string uniquely identifying the matrix
494 element given in argument so that it can be used as a prefix to all
495 MadLoop5 subroutines and common blocks related to it. This allows
496 to compile several processes into one library as requested by the
497 BLHA (Binoth LesHouches Accord) guidelines.
498 The arguments group_number and proc_id are just for the LoopInduced
499 output with MadEvent."""
500
501
502
503
504 if (not group_number is None) and group_elem_number is None:
505 return 'ML5_%d_%s_'%(matrix_element.get('processes')[0].get('id'),
506 group_number)
507 elif group_number is None or group_elem_number is None:
508 return 'ML5_%d_'%matrix_element.get('processes')[0].get('id')
509 else:
510 return 'ML5_%d_%s_%s_'%(matrix_element.get('processes')[0].get('id'),
511 group_number, group_elem_number)
512
515 """Returns the name of the SubProcess directory, which can contain
516 the process goup and group element number for the case of loop-induced
517 integration with MadEvent."""
518
519
520
521
522 if not group_number is None and group_elem_number is None:
523 return "%s%d_%s_%s"%(self.SubProc_prefix, process.get('id'),
524 group_number,process.shell_string(print_id=False))
525 elif group_number is None or group_elem_number is None:
526 return "%s%s" %(self.SubProc_prefix,process.shell_string())
527 else:
528 return "%s%d_%s_%s_%s"%(self.SubProc_prefix, process.get('id'),
529 group_number, group_elem_number,process.shell_string(print_id=False))
530
531
532
533
535 """ Different daughter classes might want different compilers.
536 Here, the gfortran compiler is used throughout the compilation
537 (mandatory for CutTools written in f90) """
538 if isinstance(compiler, str):
539 fortran_compiler = compiler
540 compiler = export_v4.default_compiler
541 compiler['fortran'] = fortran_compiler
542
543 if not compiler['fortran'] is None and not \
544 any([name in compiler['fortran'] for name in \
545 ['gfortran','ifort']]):
546 logger.info('For loop processes, the compiler must be fortran90'+\
547 'compatible, like gfortran.')
548 compiler['fortran'] = 'gfortran'
549 self.set_compiler(compiler,True)
550 else:
551 self.set_compiler(compiler)
552
553 self.set_cpp_compiler(compiler['cpp'])
554
556
557
558
559
560
561
562
563 MP=re.compile(r"(?P<toSub>^.*CALL\s+)",re.IGNORECASE | re.MULTILINE)
564
565 def replaceWith(match_obj):
566 return match_obj.group('toSub')+'MP_'
567
568 DCMPLX=re.compile(r"DCMPLX\((?P<toSub>([^\)]*))\)",\
569 re.IGNORECASE | re.MULTILINE)
570
571 for i, helas_call in enumerate(helas_calls_list):
572 new_helas_call=MP.sub(replaceWith,helas_call)
573 helas_calls_list[i]=DCMPLX.sub(r"CMPLX(\g<toSub>,KIND=16)",\
574 new_helas_call)
575
577 """ In the loop output, we don't need the files from the Source folder """
578 pass
579
581 """ Add the linking of the additional model files for multiple precision
582 """
583 super(LoopProcessExporterFortranSA, self).make_model_symbolic_link()
584 model_path = self.dir_path + '/Source/MODEL/'
585 ln(model_path + '/mp_coupl.inc', self.dir_path + '/SubProcesses')
586 ln(model_path + '/mp_coupl_same_name.inc', self.dir_path + '/SubProcesses')
587
589 """ Compiles the additional dependences for loop (such as CutTools)."""
590 super(LoopProcessExporterFortranSA, self).make()
591
592
593 libdir = os.path.join(self.dir_path,'lib')
594 sourcedir = os.path.join(self.dir_path,'Source')
595 if self.dependencies=='internal':
596 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \
597 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))):
598 if os.path.exists(pjoin(sourcedir,'CutTools')):
599 logger.info('Compiling CutTools (can take a couple of minutes) ...')
600 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1)
601 logger.info(' ...done.')
602 else:
603 raise MadGraph5Error('Could not compile CutTools because its'+\
604 ' source directory could not be found in the SOURCE folder.')
605 if not os.path.exists(os.path.realpath(pjoin(libdir, 'libcts.a'))) or \
606 not os.path.exists(os.path.realpath(pjoin(libdir, 'mpmodule.mod'))):
607 raise MadGraph5Error('CutTools compilation failed.')
608
609
610
611 compiler_log_path = pjoin(os.path.dirname((os.path.realpath(pjoin(
612 libdir, 'libcts.a')))),'compiler_version.log')
613 if os.path.exists(compiler_log_path):
614 compiler_version_used = open(compiler_log_path,'r').read()
615 if not str(misc.get_gfortran_version(misc.detect_current_compiler(\
616 pjoin(sourcedir,'make_opts')))) in compiler_version_used:
617 if os.path.exists(pjoin(sourcedir,'CutTools')):
618 logger.info('CutTools was compiled with a different fortran'+\
619 ' compiler. Re-compiling it now...')
620 misc.compile(['cleanCT'], cwd = sourcedir)
621 misc.compile(['CutTools','-j1'], cwd = sourcedir, nb_core=1)
622 logger.info(' ...done.')
623 else:
624 raise MadGraph5Error("CutTools installation in %s"\
625 %os.path.realpath(pjoin(libdir, 'libcts.a'))+\
626 " seems to have been compiled with a different compiler than"+\
627 " the one specified in MG5_aMC. Please recompile CutTools.")
628
629 - def cat_coeff(self, ff_number, frac, is_imaginary, Nc_power, Nc_value=3):
630 """Concatenate the coefficient information to reduce it to
631 (fraction, is_imaginary) """
632
633 total_coeff = ff_number * frac * fractions.Fraction(Nc_value) ** Nc_power
634
635 return (total_coeff, is_imaginary)
636
638 """ Returns a list with element 'i' being a list of tuples corresponding
639 to all apparition of amplitude number 'i' in the jamp number 'j'
640 with coeff 'coeff_j'. The format of each tuple describing an apparition
641 is (j, coeff_j). where coeff_j is of the form (Fraction, is_imag)."""
642
643 if(isinstance(col_amps,list)):
644 if(col_amps and isinstance(col_amps[0],list)):
645 color_amplitudes=col_amps
646 else:
647 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map"
648 else:
649 raise MadGraph5Error, "Incorrect col_amps argument passed to get_amp_to_jamp_map"
650
651
652 res_list = [[] for i in range(n_amps)]
653 for i, coeff_list in enumerate(color_amplitudes):
654 for (coefficient, amp_number) in coeff_list:
655 res_list[amp_number-1].append((i,self.cat_coeff(\
656 coefficient[0],coefficient[1],coefficient[2],coefficient[3])))
657
658 return res_list
659
661 """Return the color matrix definition lines. This color matrix is of size
662 NLOOPAMPSxNBORNAMPS and allows for squaring individually each Loop and Born
663 amplitude."""
664
665 logger.info('Computing diagram color coefficients')
666
667
668
669
670
671 ampl_to_jampl=self.get_amp_to_jamp_map(\
672 matrix_element.get_loop_color_amplitudes(),
673 matrix_element.get_number_of_loop_amplitudes())
674 if matrix_element.get('processes')[0].get('has_born'):
675 ampb_to_jampb=self.get_amp_to_jamp_map(\
676 matrix_element.get_born_color_amplitudes(),
677 matrix_element.get_number_of_born_amplitudes())
678 else:
679 ampb_to_jampb=ampl_to_jampl
680
681 if matrix_element.get('color_matrix'):
682 ColorMatrixDenom = \
683 matrix_element.get('color_matrix').get_line_denominators()
684 ColorMatrixNum = [ matrix_element.get('color_matrix').\
685 get_line_numerators(index, denominator) for
686 (index, denominator) in enumerate(ColorMatrixDenom) ]
687 else:
688 ColorMatrixDenom= [1]
689 ColorMatrixNum = [[1]]
690
691
692 ColorMatrixNumOutput=[]
693 ColorMatrixDenomOutput=[]
694
695
696
697 start = time.time()
698 progress_bar = None
699 time_info = False
700 for i, jampl_list in enumerate(ampl_to_jampl):
701
702
703
704 if i==5:
705 elapsed_time = time.time()-start
706 t = len(ampl_to_jampl)*(elapsed_time/5.0)
707 if t > 10.0:
708 time_info = True
709 logger.info('The color factors computation will take '+\
710 ' about %s to run. '%str(datetime.timedelta(seconds=int(t)))+\
711 'Started on %s.'%datetime.datetime.now().strftime(\
712 "%d-%m-%Y %H:%M"))
713 if logger.getEffectiveLevel()<logging.WARNING:
714 widgets = ['Color computation:', pbar.Percentage(), ' ',
715 pbar.Bar(),' ', pbar.ETA(), ' ']
716 progress_bar = pbar.ProgressBar(widgets=widgets,
717 maxval=len(ampl_to_jampl), fd=sys.stdout)
718
719 if not progress_bar is None:
720 progress_bar.update(i+1)
721
722 sys.stdout.flush()
723
724 line_num=[]
725 line_denom=[]
726
727
728
729
730
731
732
733
734 if len(jampl_list)==0:
735 line_num=[0]*len(ampb_to_jampb)
736 line_denom=[1]*len(ampb_to_jampb)
737 ColorMatrixNumOutput.append(line_num)
738 ColorMatrixDenomOutput.append(line_denom)
739 continue
740
741 for jampb_list in ampb_to_jampb:
742 real_num=0
743 imag_num=0
744 common_denom=color_amp.ColorMatrix.lcmm(*[abs(ColorMatrixDenom[jampl]*
745 ampl_coeff[0].denominator*ampb_coeff[0].denominator) for
746 ((jampl, ampl_coeff),(jampb,ampb_coeff)) in
747 itertools.product(jampl_list,jampb_list)])
748 for ((jampl, ampl_coeff),(jampb, ampb_coeff)) in \
749 itertools.product(jampl_list,jampb_list):
750
751
752 buff_num=ampl_coeff[0].numerator*\
753 ampb_coeff[0].numerator*ColorMatrixNum[jampl][jampb]*\
754 abs(common_denom)/(ampl_coeff[0].denominator*\
755 ampb_coeff[0].denominator*ColorMatrixDenom[jampl])
756
757
758
759 if ampl_coeff[1] and ampb_coeff[1]:
760 real_num=real_num+buff_num
761 elif not ampl_coeff[1] and not ampb_coeff[1]:
762 real_num=real_num+buff_num
763 elif not ampl_coeff[1] and ampb_coeff[1]:
764 imag_num=imag_num-buff_num
765 else:
766 imag_num=imag_num+buff_num
767 assert not (real_num!=0 and imag_num!=0), "MadGraph5_aMC@NLO found a "+\
768 "color matrix element which has both a real and imaginary part."
769 if imag_num!=0:
770 res=fractions.Fraction(imag_num,common_denom)
771 line_num.append(res.numerator)
772
773
774 line_denom.append(res.denominator*-1)
775 else:
776 res=fractions.Fraction(real_num,common_denom)
777 line_num.append(res.numerator)
778
779 line_denom.append(res.denominator)
780
781 ColorMatrixNumOutput.append(line_num)
782 ColorMatrixDenomOutput.append(line_denom)
783
784 if time_info:
785 logger.info('Finished on %s.'%datetime.datetime.now().strftime(\
786 "%d-%m-%Y %H:%M"))
787 if progress_bar!=None:
788 progress_bar.finish()
789
790 return (ColorMatrixNumOutput,ColorMatrixDenomOutput)
791
792 - def get_context(self,matrix_element):
793 """ Returns the contextual variables which need to be set when
794 pre-processing the template files."""
795
796
797
798
799
800
801 try:
802 n_squared_split_orders = matrix_element.rep_dict['nSquaredSO']
803 except (KeyError, AttributeError):
804 n_squared_split_orders = 1
805
806 LoopInduced = not matrix_element.get('processes')[0].get('has_born')
807
808
809 ComputeColorFlows = self.compute_color_flows or LoopInduced
810
811
812 AmplitudeReduction = LoopInduced or ComputeColorFlows
813
814
815 TIRCaching = AmplitudeReduction or n_squared_split_orders>1
816 MadEventOutput = False
817
818 return {'LoopInduced': LoopInduced,
819 'ComputeColorFlows': ComputeColorFlows,
820 'AmplitudeReduction': AmplitudeReduction,
821 'TIRCaching': TIRCaching,
822 'MadEventOutput': MadEventOutput}
823
824
825
826
827 - def generate_loop_subprocess(self, matrix_element, fortran_model,
828 group_number = None, proc_id = None, config_map=None, unique_id=None):
829 """Generate the Pxxxxx directory for a loop subprocess in MG4 standalone,
830 including the necessary loop_matrix.f, born_matrix.f and include files.
831 Notice that this is too different from generate_subprocess_directory
832 so that there is no point reusing this mother function.
833 The 'group_number' and 'proc_id' options are only used for the LoopInduced
834 MadEvent output and only to specify the ME_identifier and the P*
835 SubProcess directory name."""
836
837 cwd = os.getcwd()
838 proc_dir_name = self.get_SubProc_folder_name(
839 matrix_element.get('processes')[0],group_number,proc_id)
840 dirpath = os.path.join(self.dir_path, 'SubProcesses', proc_dir_name)
841
842 try:
843 os.mkdir(dirpath)
844 except os.error as error:
845 logger.warning(error.strerror + " " + dirpath)
846
847 try:
848 os.chdir(dirpath)
849 except os.error:
850 logger.error('Could not cd to directory %s' % dirpath)
851 return 0
852
853 logger.info('Creating files in directory %s' % dirpath)
854
855 if unique_id is None:
856 raise MadGraph5Error, 'A unique id must be provided to the function'+\
857 'generate_loop_subprocess of LoopProcessExporterFortranSA.'
858
859 open('unique_id.inc','w').write(
860 """ integer UNIQUE_ID
861 parameter(UNIQUE_ID=%d)"""%unique_id)
862
863
864 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
865
866 calls=self.write_loop_matrix_element_v4(None,matrix_element,
867 fortran_model, group_number = group_number,
868 proc_id = proc_id, config_map = config_map)
869
870
871
872
873
874 if matrix_element.get('processes')[0].get('has_born'):
875 filename = 'born_matrix.f'
876 calls = self.write_bornmatrix(
877 writers.FortranWriter(filename),
878 matrix_element,
879 fortran_model)
880
881 filename = 'pmass.inc'
882 self.write_pmass_file(writers.FortranWriter(filename),
883 matrix_element)
884
885 filename = 'ngraphs.inc'
886 self.write_ngraphs_file(writers.FortranWriter(filename),
887 len(matrix_element.get_all_amplitudes()))
888
889
890
891 loop_diags = [loop_diag for loop_diag in\
892 matrix_element.get('base_amplitude').get('loop_diagrams')\
893 if isinstance(loop_diag,LoopDiagram) and loop_diag.get('type') > 0]
894 if len(loop_diags)>5000:
895 logger.info("There are more than 5000 loop diagrams."+\
896 "Only the first 5000 are drawn.")
897 filename = "loop_matrix.ps"
898 plot = draw.MultiEpsDiagramDrawer(base_objects.DiagramList(
899 loop_diags[:5000]),filename,
900 model=matrix_element.get('processes')[0].get('model'),amplitude='')
901 logger.info("Drawing loop Feynman diagrams for " + \
902 matrix_element.get('processes')[0].nice_string())
903 plot.draw()
904
905 if matrix_element.get('processes')[0].get('has_born'):
906 filename = "born_matrix.ps"
907 plot = draw.MultiEpsDiagramDrawer(matrix_element.get('base_amplitude').\
908 get('born_diagrams'),
909 filename,
910 model=matrix_element.get('processes')[0].\
911 get('model'),
912 amplitude='')
913 logger.info("Generating born Feynman diagrams for " + \
914 matrix_element.get('processes')[0].nice_string(\
915 print_weighted=False))
916 plot.draw()
917
918 self.link_files_from_Subprocesses(self.get_SubProc_folder_name(
919 matrix_element.get('processes')[0],group_number,proc_id))
920
921
922 os.chdir(cwd)
923
924 if not calls:
925 calls = 0
926 return calls
927
929 """ To link required files from the Subprocesses directory to the
930 different P* ones"""
931
932 linkfiles = ['coupl.inc',
933 'cts_mprec.h', 'cts_mpc.h', 'mp_coupl.inc',
934 'mp_coupl_same_name.inc',
935 'MadLoopParamReader.f','MadLoopCommons.f',
936 'MadLoopParams.inc','global_specs.inc']
937
938 for file in linkfiles:
939 ln('../%s' % file)
940
941 ln('../%s'%self.madloop_makefile_name, name='makefile')
942
943
944 ln('../../lib/mpmodule.mod')
945
946
947 ln('../MadLoop5_resources')
948
951 """Generates the entries for the general replacement dictionary used
952 for the different output codes for this exporter.The arguments
953 group_number and proc_id are just for the LoopInduced output with MadEvent."""
954
955 dict={}
956
957
958
959
960 dict['proc_prefix'] = self.get_ME_identifier(matrix_element,
961 group_number = group_number, group_elem_number = proc_id)
962
963 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']:
964 for proc in matrix_element.get('processes'):
965 ids = [l.get('id') for l in proc.get('legs_with_decays')]
966 self.prefix_info[tuple(ids)] = [dict['proc_prefix'], proc.get_tag()]
967
968
969
970 dict['proc_id'] = ''
971
972 info_lines = self.get_mg5_info_lines()
973 dict['info_lines'] = info_lines
974
975 process_lines = self.get_process_info_lines(matrix_element)
976 dict['process_lines'] = process_lines
977
978 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
979 dict['nexternal'] = nexternal
980 dict['nincoming'] = ninitial
981
982 ncomb = matrix_element.get_helicity_combinations()
983 dict['ncomb'] = ncomb
984
985 nloopamps = matrix_element.get_number_of_loop_amplitudes()
986 dict['nloopamps'] = nloopamps
987
988 nloopdiags = len(matrix_element.get('diagrams'))
989 dict['nloopdiags'] = nloopdiags
990
991 nctamps = matrix_element.get_number_of_CT_amplitudes()
992 dict['nctamps'] = nctamps
993
994 nwavefuncs = matrix_element.get_number_of_external_wavefunctions()
995 dict['nwavefuncs'] = nwavefuncs
996
997 dict['real_dp_format']='real*8'
998 dict['real_mp_format']='real*16'
999
1000 dict['complex_dp_format']='complex*16'
1001 dict['complex_mp_format']='complex*32'
1002
1003 dict['mass_dp_format'] = dict['complex_dp_format']
1004 dict['mass_mp_format'] = dict['complex_mp_format']
1005
1006
1007 dict['nmultichannels'] = 0
1008 dict['nmultichannel_configs'] = 0
1009 dict['config_map_definition'] = ''
1010 dict['config_index_map_definition'] = ''
1011
1012
1013
1014
1015
1016 if matrix_element.get('processes')[0].get('has_born'):
1017 dict['color_matrix_size'] = 'nbornamps'
1018 dict['get_nsqso_born']=\
1019 "include 'nsqso_born.inc'"
1020 else:
1021 dict['get_nsqso_born']="""INTEGER NSQSO_BORN
1022 PARAMETER (NSQSO_BORN=0)
1023 """
1024 dict['color_matrix_size'] = 'nloopamps'
1025
1026
1027
1028
1029 if matrix_element.get('processes')[0].get('has_born'):
1030
1031 nbornamps = matrix_element.get_number_of_born_amplitudes()
1032 dict['nbornamps'] = nbornamps
1033 dict['ncomb_helas_objs'] = ',ncomb'
1034 dict['nbornamps_decl'] = \
1035 """INTEGER NBORNAMPS
1036 PARAMETER (NBORNAMPS=%d)"""%nbornamps
1037 dict['nBornAmps'] = nbornamps
1038
1039 else:
1040 dict['ncomb_helas_objs'] = ''
1041 dict['dp_born_amps_decl'] = ''
1042 dict['dp_born_amps_decl_in_mp'] = ''
1043 dict['copy_mp_to_dp_born_amps'] = ''
1044 dict['mp_born_amps_decl'] = ''
1045 dict['nbornamps_decl'] = ''
1046 dict['nbornamps'] = 0
1047 dict['nBornAmps'] = 0
1048
1049 return dict
1050
1051 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model,
1052 group_number = None, proc_id = None, config_map = None):
1053 """ Writes loop_matrix.f, CT_interface.f, loop_num.f and
1054 mp_born_amps_and_wfs.
1055 The arguments group_number and proc_id are just for the LoopInduced
1056 output with MadEvent and only used in get_ME_identifier.
1057 """
1058
1059
1060
1061 if config_map:
1062 raise MadGraph5Error, 'The default loop output cannot be used with'+\
1063 'MadEvent and cannot compute the AMP2 for multi-channeling.'
1064
1065 if not isinstance(fortran_model,\
1066 helas_call_writers.FortranUFOHelasCallWriter):
1067 raise MadGraph5Error, 'The loop fortran output can only'+\
1068 ' work with a UFO Fortran model'
1069
1070 LoopFortranModel = helas_call_writers.FortranUFOHelasCallWriter(
1071 argument=fortran_model.get('model'),
1072 hel_sum=matrix_element.get('processes')[0].get('has_born'))
1073
1074
1075
1076
1077
1078 matrix_element.compute_all_analytic_information(
1079 self.get_aloha_model(matrix_element.get('processes')[0].get('model')))
1080
1081
1082
1083 matrix_element.rep_dict = self.generate_general_replace_dict(
1084 matrix_element, group_number = group_number, proc_id = proc_id)
1085
1086
1087 matrix_element.rep_dict['maxlcouplings']= \
1088 matrix_element.find_max_loop_coupling()
1089
1090
1091 if matrix_element.get('processes')[0].get('has_born'):
1092 matrix_element.rep_dict['dp_born_amps_decl_in_mp'] = \
1093 matrix_element.rep_dict['complex_dp_format']+" DPAMP(NBORNAMPS,NCOMB)"+\
1094 "\n common/%sAMPS/DPAMP"%matrix_element.rep_dict['proc_prefix']
1095 matrix_element.rep_dict['dp_born_amps_decl'] = \
1096 matrix_element.rep_dict['complex_dp_format']+" AMP(NBORNAMPS,NCOMB)"+\
1097 "\n common/%sAMPS/AMP"%matrix_element.rep_dict['proc_prefix']
1098 matrix_element.rep_dict['mp_born_amps_decl'] = \
1099 matrix_element.rep_dict['complex_mp_format']+" AMP(NBORNAMPS,NCOMB)"+\
1100 "\n common/%sMP_AMPS/AMP"%matrix_element.rep_dict['proc_prefix']
1101 matrix_element.rep_dict['copy_mp_to_dp_born_amps'] = \
1102 '\n'.join(['DO I=1,NBORNAMPS','DPAMP(I,H)=AMP(I,H)','ENDDO'])
1103
1104 if writer:
1105 raise MadGraph5Error, 'Matrix output mode no longer supported.'
1106
1107 filename = 'loop_matrix.f'
1108 calls = self.write_loopmatrix(writers.FortranWriter(filename),
1109 matrix_element,
1110 LoopFortranModel)
1111
1112
1113 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w')
1114 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix'])
1115 proc_prefix_writer.close()
1116
1117 filename = 'check_sa.f'
1118 self.write_check_sa(writers.FortranWriter(filename),matrix_element)
1119
1120 filename = 'CT_interface.f'
1121 self.write_CT_interface(writers.FortranWriter(filename),\
1122 matrix_element)
1123
1124
1125
1126 filename = 'improve_ps.f'
1127 calls = self.write_improve_ps(writers.FortranWriter(filename),
1128 matrix_element)
1129
1130 filename = 'loop_num.f'
1131 self.write_loop_num(writers.FortranWriter(filename),\
1132 matrix_element,LoopFortranModel)
1133
1134 filename = 'mp_born_amps_and_wfs.f'
1135 self.write_born_amps_and_wfs(writers.FortranWriter(filename),\
1136 matrix_element,LoopFortranModel)
1137
1138
1139 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
1140 filename = 'nexternal.inc'
1141 self.write_nexternal_file(writers.FortranWriter(filename),
1142 nexternal, ninitial)
1143
1144 filename = 'process_info.inc'
1145 self.write_process_info_file(writers.FortranWriter(filename),
1146 matrix_element)
1147 return calls
1148
1150 """A small structural function to write the include file specifying some
1151 process characteristics."""
1152
1153 model = matrix_element.get('processes')[0].get('model')
1154 process_info = {}
1155
1156
1157
1158
1159
1160 process_info['max_spin_connected_to_loop']=\
1161 matrix_element.get_max_spin_connected_to_loop()
1162
1163 process_info['max_spin_external_particle']= max(
1164 model.get_particle(l.get('id')).get('spin') for l in
1165 matrix_element.get('processes')[0].get('legs'))
1166
1167 proc_include = \
1168 """
1169 INTEGER MAX_SPIN_CONNECTED_TO_LOOP
1170 PARAMETER(MAX_SPIN_CONNECTED_TO_LOOP=%(max_spin_connected_to_loop)d)
1171 INTEGER MAX_SPIN_EXTERNAL_PARTICLE
1172 PARAMETER(MAX_SPIN_EXTERNAL_PARTICLE=%(max_spin_external_particle)d)
1173 """%process_info
1174
1175 writer.writelines(proc_include)
1176
1178 """ To overload the default name for this function such that the correct
1179 function is used when called from the command interface """
1180
1181 self.unique_id +=1
1182 return self.generate_loop_subprocess(matrix_element,fortran_model,
1183 unique_id=self.unique_id)
1184
1186 """Writes out the steering code check_sa. In the optimized output mode,
1187 All the necessary entries in the replace_dictionary have already been
1188 set in write_loopmatrix because it is only there that one has access to
1189 the information about split orders."""
1190 replace_dict = copy.copy(matrix_element.rep_dict)
1191 for key in ['print_so_born_results','print_so_loop_results',
1192 'write_so_born_results','write_so_loop_results','set_coupling_target']:
1193 if key not in replace_dict.keys():
1194 replace_dict[key]=''
1195
1196 if matrix_element.get('processes')[0].get('has_born'):
1197 file = open(os.path.join(self.template_dir,'check_sa.inc')).read()
1198 else:
1199 file = open(os.path.join(self.template_dir,\
1200 'check_sa_loop_induced.inc')).read()
1201 file=file%replace_dict
1202 writer.writelines(file)
1203
1204
1205 if not os.path.isfile(pjoin(self.template_dir,'check_py.f.inc')):
1206 return
1207
1208 file = open(os.path.join(self.template_dir,\
1209 'check_py.f.inc')).read()
1210
1211 if 'prefix' in self.cmd_options and self.cmd_options['prefix'] in ['int','proc']:
1212 replace_dict['prefix_routine'] = replace_dict['proc_prefix']
1213 else:
1214 replace_dict['prefix_routine'] = ''
1215 file=file%replace_dict
1216 new_path = writer.name.replace('check_sa.f', 'f2py_wrapper.f')
1217 new_writer = writer.__class__(new_path, 'w')
1218 new_writer.writelines(file)
1219
1220 file = open(os.path.join(self.template_dir,\
1221 'check_sa.py.inc')).read()
1222
1223
1224 curr_proc = matrix_element.get('processes')[0]
1225 random_PSpoint_python_formatted = \
1226 """# Specify your chosen PS point below. If you leave it filled with None, then the script will attempt to read it from the file PS.input.
1227 p= [[None,]*4]*%d"""%len(curr_proc.get('legs'))
1228
1229 process_definition_string = curr_proc.nice_string().replace('Process:','')
1230 file=file.format(random_PSpoint_python_formatted,process_definition_string,
1231 replace_dict['proc_prefix'].lower())
1232 new_path = writer.name.replace('check_sa.f', 'check_sa.py')
1233 new_writer = open(new_path, 'w')
1234 new_writer.writelines(file)
1235
1236 os.chmod(new_path, os.stat(new_path).st_mode | stat.S_IEXEC)
1237
1239 """ Write out the improve_ps subroutines which modify the PS point
1240 given in input and slightly deform it to achieve exact onshellness on
1241 all external particles as well as perfect energy-momentum conservation"""
1242 replace_dict = copy.copy(matrix_element.rep_dict)
1243
1244 (nexternal,ninitial)=matrix_element.get_nexternal_ninitial()
1245 replace_dict['ninitial']=ninitial
1246 mass_list=matrix_element.get_external_masses()[:-2]
1247 mp_variable_prefix = check_param_card.ParamCard.mp_prefix
1248
1249
1250 replace_dict['real_format']=replace_dict['real_mp_format']
1251 replace_dict['mp_prefix']='MP_'
1252 replace_dict['exp_letter']='e'
1253 replace_dict['mp_specifier']='_16'
1254 replace_dict['coupl_inc_name']='mp_coupl.inc'
1255 replace_dict['masses_def']='\n'.join(['MASSES(%(i)d)=%(prefix)s%(m)s'\
1256 %{'i':i+1,'m':m, 'prefix':mp_variable_prefix} for \
1257 i, m in enumerate(mass_list)])
1258 file_mp = open(os.path.join(self.template_dir,'improve_ps.inc')).read()
1259 file_mp=file_mp%replace_dict
1260
1261 writer.writelines(file_mp)
1262
1264 """ Create the file containing the core subroutine called by CutTools
1265 which contains the Helas calls building the loop"""
1266
1267 if not matrix_element.get('processes') or \
1268 not matrix_element.get('diagrams'):
1269 return 0
1270
1271
1272 writers.FortranWriter.downcase = False
1273
1274 file = open(os.path.join(self.template_dir,'loop_num.inc')).read()
1275
1276 replace_dict = copy.copy(matrix_element.rep_dict)
1277
1278 loop_helas_calls=fortran_model.get_loop_amplitude_helas_calls(matrix_element)
1279 replace_dict['maxlcouplings']=matrix_element.find_max_loop_coupling()
1280 replace_dict['loop_helas_calls'] = "\n".join(loop_helas_calls)
1281
1282
1283
1284 dp_squaring_lines=['DO I=1,NBORNAMPS',
1285 'CFTOT=DCMPLX(CF_N(AMPLNUM,I)/DBLE(ABS(CF_D(AMPLNUM,I))),0.0d0)',
1286 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1',
1287 'RES=RES+CFTOT*BUFF*DCONJG(AMP(I,H))','ENDDO']
1288 mp_squaring_lines=['DO I=1,NBORNAMPS',
1289 'CFTOT=CMPLX(CF_N(AMPLNUM,I)/(1.0E0_16*ABS(CF_D(AMPLNUM,I))),0.0E0_16,KIND=16)',
1290 'IF(CF_D(AMPLNUM,I).LT.0) CFTOT=CFTOT*IMAG1',
1291 'QPRES=QPRES+CFTOT*BUFF*CONJG(AMP(I,H))','ENDDO']
1292 if matrix_element.get('processes')[0].get('has_born'):
1293 replace_dict['dp_squaring']='\n'.join(dp_squaring_lines)
1294 replace_dict['mp_squaring']='\n'.join(mp_squaring_lines)
1295 else:
1296 replace_dict['dp_squaring']='RES=BUFF'
1297 replace_dict['mp_squaring']='QPRES=BUFF'
1298
1299
1300 self.turn_to_mp_calls(loop_helas_calls)
1301 replace_dict['mp_loop_helas_calls'] = "\n".join(loop_helas_calls)
1302
1303 file=file%replace_dict
1304
1305 if writer:
1306 writer.writelines(file)
1307 else:
1308 return file
1309
1311 """ Create the file CT_interface.f which contains the subroutine defining
1312 the loop HELAS-like calls along with the general interfacing subroutine.
1313 It is used to interface against any OPP tool, including Samurai and Ninja."""
1314
1315 files=[]
1316
1317
1318 replace_dict=copy.copy(matrix_element.rep_dict)
1319
1320
1321
1322 if matrix_element.get('processes')[0].get('has_born'):
1323 replace_dict['finalize_CT']='\n'.join([\
1324 'RES(%d)=NORMALIZATION*2.0d0*DBLE(RES(%d))'%(i,i) for i in range(1,4)])
1325 else:
1326 replace_dict['finalize_CT']='\n'.join([\
1327 'RES(%d)=NORMALIZATION*RES(%d)'%(i,i) for i in range(1,4)])
1328
1329 file = open(os.path.join(self.template_dir,'CT_interface.inc')).read()
1330
1331 file = file % replace_dict
1332 files.append(file)
1333
1334
1335
1336 HelasLoopAmpsCallKeys=matrix_element.get_used_helas_loop_amps()
1337
1338 for callkey in HelasLoopAmpsCallKeys:
1339 replace_dict=copy.copy(matrix_element.rep_dict)
1340
1341
1342 if matrix_element.get('processes')[0].get('has_born'):
1343 replace_dict['validh_or_nothing']=',validh'
1344 else:
1345 replace_dict['validh_or_nothing']=''
1346
1347
1348 if len(callkey)>2:
1349 replace_dict['ncplsargs']=callkey[2]
1350 cplsargs="".join(["C%d,MP_C%d, "%(i,i) for i in range(1,callkey[2]+1)])
1351 replace_dict['cplsargs']=cplsargs
1352 cplsdecl="".join(["C%d, "%i for i in range(1,callkey[2]+1)])[:-2]
1353 replace_dict['cplsdecl']=cplsdecl
1354 mp_cplsdecl="".join(["MP_C%d, "%i for i in range(1,callkey[2]+1)])[:-2]
1355 replace_dict['mp_cplsdecl']=mp_cplsdecl
1356 cplset="\n".join(["\n".join(["LC(%d)=C%d"%(i,i),\
1357 "MP_LC(%d)=MP_C%d"%(i,i)])\
1358 for i in range(1,callkey[2]+1)])
1359 replace_dict['cplset']=cplset
1360
1361 replace_dict['nloopline']=callkey[0]
1362 wfsargs="".join(["W%d, "%i for i in range(1,callkey[1]+1)])
1363 replace_dict['wfsargs']=wfsargs
1364
1365 if not optimized_output:
1366 margs="".join(["M%d,MP_M%d, "%(i,i) for i in range(1,callkey[0]+1)])
1367 else:
1368 margs="".join(["M%d, "%i for i in range(1,callkey[0]+1)])
1369 replace_dict['margs']=margs
1370 wfsargsdecl="".join([("W%d, "%i) for i in range(1,callkey[1]+1)])[:-2]
1371 replace_dict['wfsargsdecl']=wfsargsdecl
1372 margsdecl="".join(["M%d, "%i for i in range(1,callkey[0]+1)])[:-2]
1373 replace_dict['margsdecl']=margsdecl
1374 mp_margsdecl="".join(["MP_M%d, "%i for i in range(1,callkey[0]+1)])[:-2]
1375 replace_dict['mp_margsdecl']=mp_margsdecl
1376 weset="\n".join([("WE("+str(i)+")=W"+str(i)) for \
1377 i in range(1,callkey[1]+1)])
1378 replace_dict['weset']=weset
1379 weset="\n".join([("WE(%d)=W%d"%(i,i)) for i in range(1,callkey[1]+1)])
1380 replace_dict['weset']=weset
1381 msetlines=["M2L(1)=M%d**2"%(callkey[0]),]
1382 mset="\n".join(msetlines+["M2L(%d)=M%d**2"%(i,i-1) for \
1383 i in range(2,callkey[0]+1)])
1384 replace_dict['mset']=mset
1385 mset2lines=["ML(1)=M%d"%(callkey[0]),"ML(2)=M%d"%(callkey[0]),
1386 "MP_ML(1)=MP_M%d"%(callkey[0]),"MP_ML(2)=MP_M%d"%(callkey[0])]
1387 mset2="\n".join(mset2lines+["\n".join(["ML(%d)=M%d"%(i,i-2),
1388 "MP_ML(%d)=MP_M%d"%(i,i-2)]) for \
1389 i in range(3,callkey[0]+3)])
1390 replace_dict['mset2']=mset2
1391 replace_dict['nwfsargs'] = callkey[1]
1392 if callkey[0]==callkey[1]:
1393 replace_dict['nwfsargs_header'] = ""
1394 replace_dict['pairingargs']=""
1395 replace_dict['pairingdecl']=""
1396 pairingset="""DO I=1,NLOOPLINE
1397 PAIRING(I)=1
1398 ENDDO
1399 """
1400 replace_dict['pairingset']=pairingset
1401 else:
1402 replace_dict['nwfsargs_header'] = '_%d'%callkey[1]
1403 pairingargs="".join([("P"+str(i)+", ") for i in \
1404 range(1,callkey[0]+1)])
1405 replace_dict['pairingargs']=pairingargs
1406 pairingdecl="integer "+"".join([("P"+str(i)+", ") for i in \
1407 range(1,callkey[0]+1)])[:-2]
1408 replace_dict['pairingdecl']=pairingdecl
1409 pairingset="\n".join([("PAIRING("+str(i)+")=P"+str(i)) for \
1410 i in range(1,callkey[0]+1)])
1411 replace_dict['pairingset']=pairingset
1412
1413 file = open(os.path.join(self.template_dir,\
1414 'helas_loop_amplitude.inc')).read()
1415 file = file % replace_dict
1416 files.append(file)
1417
1418 file="\n".join(files)
1419
1420 if writer:
1421 writer.writelines(file,context=self.get_context(matrix_element))
1422 else:
1423 return file
1424
1425
1426
1427 - def split_HELASCALLS(self, writer, replace_dict, template_name, masterfile, \
1428 helas_calls, entry_name, bunch_name,n_helas=2000,
1429 required_so_broadcaster = 'LOOP_REQ_SO_DONE',
1430 continue_label = 1000, momenta_array_name='P',
1431 context={}):
1432 """ Finish the code generation with splitting.
1433 Split the helas calls in the argument helas_calls into bunches of
1434 size n_helas and place them in dedicated subroutine with name
1435 <bunch_name>_i. Also setup the corresponding calls to these subroutine
1436 in the replace_dict dictionary under the entry entry_name.
1437 The context specified will be forwarded to the the fileWriter."""
1438 helascalls_replace_dict=copy.copy(replace_dict)
1439 helascalls_replace_dict['bunch_name']=bunch_name
1440 helascalls_files=[]
1441 for i, k in enumerate(range(0, len(helas_calls), n_helas)):
1442 helascalls_replace_dict['bunch_number']=i+1
1443 helascalls_replace_dict['helas_calls']=\
1444 '\n'.join(helas_calls[k:k + n_helas])
1445 helascalls_replace_dict['required_so_broadcaster']=\
1446 required_so_broadcaster
1447 helascalls_replace_dict['continue_label']=continue_label
1448 new_helascalls_file = open(os.path.join(self.template_dir,\
1449 template_name)).read()
1450 new_helascalls_file = new_helascalls_file % helascalls_replace_dict
1451 helascalls_files.append(new_helascalls_file)
1452
1453 helascalls_calls = [ "CALL %s%s_%d(%s,NHEL,H,IC)"%\
1454 (replace_dict['proc_prefix'] ,bunch_name,a+1,momenta_array_name) \
1455 for a in range(len(helascalls_files))]
1456 replace_dict[entry_name]='\n'.join(helascalls_calls)
1457 if writer:
1458 for i, helascalls_file in enumerate(helascalls_files):
1459 filename = '%s_%d.f'%(bunch_name,i+1)
1460 writers.FortranWriter(filename).writelines(helascalls_file,
1461 context=context)
1462 else:
1463 masterfile='\n'.join([masterfile,]+helascalls_files)
1464
1465 return masterfile
1466
1467 - def write_loopmatrix(self, writer, matrix_element, fortran_model,
1468 noSplit=False):
1469 """Create the loop_matrix.f file."""
1470
1471 if not matrix_element.get('processes') or \
1472 not matrix_element.get('diagrams'):
1473 return 0
1474
1475
1476
1477 writers.FortranWriter.downcase = False
1478
1479 replace_dict = copy.copy(matrix_element.rep_dict)
1480
1481
1482
1483 den_factor_line = self.get_den_factor_line(matrix_element)
1484 replace_dict['den_factor_line'] = den_factor_line
1485
1486
1487 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
1488 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\
1489 matrix_element.get_beams_hel_avg_factor()
1490
1491
1492
1493
1494 if not matrix_element.get('processes')[0].get('has_born'):
1495 replace_dict['compute_born']=\
1496 """C There is of course no born for loop induced processes
1497 ANS(0)=0.0d0
1498 """
1499 replace_dict['set_reference']='\n'.join([
1500 'C For loop-induced, the reference for comparison is set later'+\
1501 ' from the total contribution of the previous PS point considered.',
1502 'C But you can edit here the value to be used for the first PS point.',
1503 'if (NPSPOINTS.eq.0) then','ref=1.0d-50','else',
1504 'ref=nextRef/DBLE(NPSPOINTS)','endif'])
1505 replace_dict['loop_induced_setup'] = '\n'.join([
1506 'HELPICKED_BU=HELPICKED','HELPICKED=H','MP_DONE=.FALSE.',
1507 'IF(SKIPLOOPEVAL) THEN','GOTO 1227','ENDIF'])
1508 replace_dict['loop_induced_finalize'] = \
1509 ("""DO I=NCTAMPS+1,NLOOPAMPS
1510 IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN
1511 WRITE(*,*) '##W03 WARNING Contribution ',I
1512 WRITE(*,*) ' is unstable for helicity ',H
1513 ENDIF
1514 C IF(.NOT.%(proc_prefix)sISZERO(ABS(AMPL(2,I))+ABS(AMPL(3,I)),REF,-1,H)) THEN
1515 C WRITE(*,*) '##W04 WARNING Contribution ',I,' for helicity ',H,' has a contribution to the poles.'
1516 C WRITE(*,*) 'Finite contribution = ',AMPL(1,I)
1517 C WRITE(*,*) 'single pole contribution = ',AMPL(2,I)
1518 C WRITE(*,*) 'double pole contribution = ',AMPL(3,I)
1519 C ENDIF
1520 ENDDO
1521 1227 CONTINUE
1522 HELPICKED=HELPICKED_BU""")%replace_dict
1523 replace_dict['loop_helas_calls']=""
1524 replace_dict['nctamps_or_nloopamps']='nloopamps'
1525 replace_dict['nbornamps_or_nloopamps']='nloopamps'
1526 replace_dict['squaring']=\
1527 """ANS(1)=ANS(1)+DBLE(CFTOT*AMPL(1,I)*DCONJG(AMPL(1,J)))
1528 IF (J.EQ.1) THEN
1529 ANS(2)=ANS(2)+DBLE(CFTOT*AMPL(2,I))+DIMAG(CFTOT*AMPL(2,I))
1530 ANS(3)=ANS(3)+DBLE(CFTOT*AMPL(3,I))+DIMAG(CFTOT*AMPL(3,I))
1531 ENDIF"""
1532 else:
1533 replace_dict['compute_born']=\
1534 """C Compute the born, for a specific helicity if asked so.
1535 call %(proc_prefix)ssmatrixhel(P_USER,USERHEL,ANS(0))
1536 """%matrix_element.rep_dict
1537 replace_dict['set_reference']=\
1538 """C We chose to use the born evaluation for the reference
1539 call %(proc_prefix)ssmatrix(p,ref)"""%matrix_element.rep_dict
1540 replace_dict['loop_induced_helas_calls'] = ""
1541 replace_dict['loop_induced_finalize'] = ""
1542 replace_dict['loop_induced_setup'] = ""
1543 replace_dict['nctamps_or_nloopamps']='nctamps'
1544 replace_dict['nbornamps_or_nloopamps']='nbornamps'
1545 replace_dict['squaring']='\n'.join(['DO K=1,3',
1546 'ANS(K)=ANS(K)+2.0d0*DBLE(CFTOT*AMPL(K,I)*DCONJG(AMP(J,H)))',
1547 'ENDDO'])
1548
1549
1550
1551
1552 writers.FortranWriter('nsquaredSO.inc').writelines(
1553 """INTEGER NSQUAREDSO
1554 PARAMETER (NSQUAREDSO=0)""")
1555
1556
1557
1558 actualize_ans=[]
1559 if matrix_element.get('processes')[0].get('has_born'):
1560 actualize_ans.append("DO I=NCTAMPS+1,NLOOPAMPS")
1561 actualize_ans.extend("ANS(%d)=ANS(%d)+AMPL(%d,I)"%(i,i,i) for i \
1562 in range(1,4))
1563 actualize_ans.append(\
1564 "IF((CTMODERUN.NE.-1).AND..NOT.CHECKPHASE.AND.(.NOT.S(I))) THEN")
1565 actualize_ans.append(\
1566 "WRITE(*,*) '##W03 WARNING Contribution ',I,' is unstable.'")
1567 actualize_ans.extend(["ENDIF","ENDDO"])
1568 replace_dict['actualize_ans']='\n'.join(actualize_ans)
1569 else:
1570 replace_dict['actualize_ans']=\
1571 ("""C We add five powers to the reference value to loosen a bit the vanishing pole check.
1572 C IF(.NOT.(CHECKPHASE.OR.(.NOT.HELDOUBLECHECKED)).AND..NOT.%(proc_prefix)sISZERO(ABS(ANS(2))+ABS(ANS(3)),ABS(ANS(1))*(10.0d0**5),-1,H)) THEN
1573 C WRITE(*,*) '##W05 WARNING Found a PS point with a contribution to the single pole.'
1574 C WRITE(*,*) 'Finite contribution = ',ANS(1)
1575 C WRITE(*,*) 'single pole contribution = ',ANS(2)
1576 C WRITE(*,*) 'double pole contribution = ',ANS(3)
1577 C ENDIF""")%replace_dict
1578
1579
1580 (CMNum,CMDenom) = self.get_color_matrix(matrix_element)
1581 CMWriter=open(pjoin('..','MadLoop5_resources',
1582 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w')
1583 for ColorLine in CMNum:
1584 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
1585 CMWriter.close()
1586 CMWriter=open(pjoin('..','MadLoop5_resources',
1587 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w')
1588 for ColorLine in CMDenom:
1589 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
1590 CMWriter.close()
1591
1592
1593 HelConfigs=matrix_element.get_helicity_matrix()
1594 HelConfigWriter=open(pjoin('..','MadLoop5_resources',
1595 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w')
1596 for HelConfig in HelConfigs:
1597 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n')
1598 HelConfigWriter.close()
1599
1600
1601 loop_amp_helas_calls = fortran_model.get_loop_amp_helas_calls(\
1602 matrix_element)
1603
1604 loop_amp_helas_calls = [lc % matrix_element.rep_dict
1605 for lc in loop_amp_helas_calls]
1606
1607 born_ct_helas_calls, UVCT_helas_calls = \
1608 fortran_model.get_born_ct_helas_calls(matrix_element)
1609
1610
1611 born_ct_helas_calls = born_ct_helas_calls + UVCT_helas_calls
1612 file = open(os.path.join(self.template_dir,\
1613
1614 'loop_matrix_standalone.inc')).read()
1615
1616 if matrix_element.get('processes')[0].get('has_born'):
1617 toBeRepaced='loop_helas_calls'
1618 else:
1619 toBeRepaced='loop_induced_helas_calls'
1620
1621
1622 if (not noSplit and (len(matrix_element.get_all_amplitudes())>1000)):
1623 file=self.split_HELASCALLS(writer,replace_dict,\
1624 'helas_calls_split.inc',file,born_ct_helas_calls,\
1625 'born_ct_helas_calls','helas_calls_ampb')
1626 file=self.split_HELASCALLS(writer,replace_dict,\
1627 'helas_calls_split.inc',file,loop_amp_helas_calls,\
1628 toBeRepaced,'helas_calls_ampl')
1629 else:
1630 replace_dict['born_ct_helas_calls']='\n'.join(born_ct_helas_calls)
1631 replace_dict[toBeRepaced]='\n'.join(loop_amp_helas_calls)
1632
1633 file = file % replace_dict
1634
1635 loop_calls_finder = re.compile(r'^\s*CALL\S*LOOP\S*')
1636 n_loop_calls = len(filter(lambda call:
1637 not loop_calls_finder.match(call) is None, loop_amp_helas_calls))
1638 if writer:
1639
1640 writer.writelines(file)
1641 return n_loop_calls
1642 else:
1643
1644 return n_loop_calls, file
1645
1647 """Create the born_matrix.f file for the born process as for a standard
1648 tree-level computation."""
1649
1650 if not matrix_element.get('processes') or \
1651 not matrix_element.get('diagrams'):
1652 return 0
1653
1654 if not isinstance(writer, writers.FortranWriter):
1655 raise writers.FortranWriter.FortranWriterError(\
1656 "writer not FortranWriter")
1657
1658
1659
1660
1661
1662
1663
1664 bornME = helas_objects.HelasMatrixElement()
1665 for prop in bornME.keys():
1666 bornME.set(prop,copy.deepcopy(matrix_element.get(prop)))
1667 bornME.set('base_amplitude',None,force=True)
1668 bornME.set('diagrams',copy.deepcopy(\
1669 matrix_element.get_born_diagrams()))
1670 bornME.set('color_basis',copy.deepcopy(\
1671 matrix_element.get('born_color_basis')))
1672 bornME.set('color_matrix',copy.deepcopy(\
1673 color_amp.ColorMatrix(bornME.get('color_basis'))))
1674
1675
1676 bornME.optimization = True
1677 return super(LoopProcessExporterFortranSA,self).write_matrix_element_v4(
1678 writer, bornME, fortran_model,
1679 proc_prefix=matrix_element.rep_dict['proc_prefix'])
1680
1683 """ Writes out the code for the subroutine MP_BORN_AMPS_AND_WFS which
1684 computes just the external wavefunction and born amplitudes in
1685 multiple precision. """
1686
1687 if not matrix_element.get('processes') or \
1688 not matrix_element.get('diagrams'):
1689 return 0
1690
1691 replace_dict = copy.copy(matrix_element.rep_dict)
1692
1693
1694 if matrix_element.get('processes')[0].get('has_born'):
1695 replace_dict['h_w_suffix']=',H'
1696 else:
1697 replace_dict['h_w_suffix']=''
1698
1699
1700 born_amps_and_wfs_calls , uvct_amp_calls = \
1701 fortran_model.get_born_ct_helas_calls(matrix_element, include_CT=True)
1702
1703
1704 born_amps_and_wfs_calls = born_amps_and_wfs_calls + uvct_amp_calls
1705
1706
1707
1708 self.turn_to_mp_calls(born_amps_and_wfs_calls)
1709
1710 file = open(os.path.join(self.template_dir,\
1711 'mp_born_amps_and_wfs.inc')).read()
1712
1713 if (not noSplit and (len(matrix_element.get_all_amplitudes())>2000)):
1714 file=self.split_HELASCALLS(writer,replace_dict,\
1715 'mp_helas_calls_split.inc',file,\
1716 born_amps_and_wfs_calls,'born_amps_and_wfs_calls',\
1717 'mp_helas_calls')
1718 else:
1719 replace_dict['born_amps_and_wfs_calls']=\
1720 '\n'.join(born_amps_and_wfs_calls)
1721
1722 file = file % replace_dict
1723 if writer:
1724
1725 writer.writelines(file)
1726 else:
1727
1728 return file
1729
1730
1731
1732
1733
1735 """Class to take care of exporting a set of loop matrix elements in the
1736 Fortran format which exploits the Pozzorini method of representing
1737 the loop numerators as polynomial to render its evaluations faster."""
1738
1739 template_dir=os.path.join(_file_path,'iolibs/template_files/loop_optimized')
1740
1741
1742 forbid_loop_grouping = False
1743
1744
1745
1746
1747
1748
1749
1750 all_tir=['pjfry','iregi','ninja','golem','samurai','collier']
1751
1752 - def __init__(self, dir_path = "", opt=None):
1753 """Initiate the LoopProcessOptimizedExporterFortranSA with directory
1754 information on where to find all the loop-related source files,
1755 like CutTools and TIR"""
1756
1757 super(LoopProcessOptimizedExporterFortranSA,self).__init__(dir_path, opt)
1758
1759
1760 self.tir_available_dict={'pjfry':True,'iregi':True,'golem':True,
1761 'samurai':True,'ninja':True,'collier':True}
1762
1763 for tir in self.all_tir:
1764 tir_dir="%s_dir"%tir
1765 if tir_dir in self.opt and not self.opt[tir_dir] is None:
1766
1767 tir_path = self.opt[tir_dir].strip()
1768 if tir_path.startswith('.'):
1769 tir_path = os.path.abspath(pjoin(MG5DIR,tir_path))
1770 setattr(self,tir_dir,tir_path)
1771 else:
1772 setattr(self,tir_dir,'')
1773
1781
1782 - def get_context(self,matrix_element, **opts):
1783 """ Additional contextual information which needs to be created for
1784 the optimized output."""
1785
1786 context = LoopProcessExporterFortranSA.get_context(self, matrix_element,
1787 **opts)
1788
1789
1790 try:
1791 context['ninja_supports_quad_prec'] = \
1792 misc.get_ninja_quad_prec_support(getattr(self,'ninja_dir'))
1793 except AttributeError:
1794 context['ninja_supports_quad_prec'] = False
1795
1796 for tir in self.all_tir:
1797 context['%s_available'%tir]=self.tir_available_dict[tir]
1798
1799 if tir not in ['golem','pjfry','iregi','samurai','ninja','collier']:
1800 raise MadGraph5Error,"%s was not a TIR currently interfaced."%tir_name
1801
1802 return context
1803
1805 """ Perform additional actions specific for this class when setting
1806 up the template with the copy_template function."""
1807
1808
1809 link_tir_libs=[]
1810 tir_libs=[]
1811 tir_include=[]
1812
1813 for tir in self.all_tir:
1814 tir_dir="%s_dir"%tir
1815 libpath=getattr(self,tir_dir)
1816 libname="lib%s.a"%tir
1817 tir_name=tir
1818 libpath = self.link_TIR(os.path.join(self.dir_path, 'lib'),
1819 libpath,libname,tir_name=tir_name)
1820 if libpath != "":
1821 if tir in ['ninja','pjfry','golem','samurai','collier']:
1822
1823 link_tir_libs.append('-L%s/ -l%s'%(libpath,tir))
1824 tir_libs.append('%s/lib%s.$(libext)'%(libpath,tir))
1825
1826 if tir in ['ninja']:
1827 if not any(os.path.isfile(pjoin(libpath,'libavh_olo.%s'%ext))
1828 for ext in ['a','dylib','so']):
1829 raise MadGraph5Error(
1830 "The OneLOop library 'libavh_olo.(a|dylib|so)' could no be found in path '%s'. Please place a symlink to it there."%libpath)
1831 link_tir_libs.append('-L%s/ -l%s'%(libpath,'avh_olo'))
1832 tir_libs.append('%s/lib%s.$(libext)'%(libpath,'avh_olo'))
1833 if tir in ['ninja','golem', 'samurai','collier']:
1834 trgt_path = pjoin(os.path.dirname(libpath),'include')
1835 if os.path.isdir(trgt_path):
1836 to_include = misc.find_includes_path(trgt_path,
1837 self.include_names[tir])
1838 else:
1839 to_include = None
1840
1841 if to_include is None and tir=='collier':
1842 to_include = misc.find_includes_path(
1843 pjoin(libpath,'modules'),self.include_names[tir])
1844 if to_include is None:
1845 logger.error(
1846 'Could not find the include directory for %s, looking in %s.\n' % (tir, str(trgt_path))+
1847 'Generation carries on but you will need to edit the include path by hand in the makefiles.')
1848 to_include = '<Not_found_define_it_yourself>'
1849 tir_include.append('-I %s'%str(to_include))
1850
1851
1852
1853
1854 name_map = {'golem':'golem95','samurai':'samurai',
1855 'ninja':'ninja','collier':'collier'}
1856 ln(to_include, starting_dir=pjoin(self.dir_path,'lib'),
1857 name='%s_include'%name_map[tir],abspath=True)
1858 ln(libpath, starting_dir=pjoin(self.dir_path,'lib'),
1859 name='%s_lib'%name_map[tir],abspath=True)
1860 else :
1861 link_tir_libs.append('-l%s'%tir)
1862 tir_libs.append('$(LIBDIR)lib%s.$(libext)'%tir)
1863
1864 MadLoop_makefile_definitions = pjoin(self.dir_path,'SubProcesses',
1865 'MadLoop_makefile_definitions')
1866 if os.path.isfile(MadLoop_makefile_definitions):
1867 os.remove(MadLoop_makefile_definitions)
1868
1869 calls = self.write_loop_makefile_definitions(
1870 writers.MakefileWriter(MadLoop_makefile_definitions),
1871 link_tir_libs,tir_libs, tir_include=tir_include)
1872
1873
1874
1875 MadLoopCommon = open(os.path.join(self.loop_dir,'StandAlone',
1876 "SubProcesses","MadLoopCommons.inc")).read()
1877 writer = writers.FortranWriter(os.path.join(self.dir_path,
1878 "SubProcesses","MadLoopCommons.f"))
1879 writer.writelines(MadLoopCommon%{
1880 'print_banner_commands':self.MadLoop_banner}, context={
1881 'collier_available':self.tir_available_dict['collier']})
1882 writer.close()
1883
1885 """ Does the same as the mother routine except that it also links
1886 coef_specs.inc in the HELAS folder."""
1887
1888 LoopProcessExporterFortranSA.link_files_from_Subprocesses(self,proc_name)
1889
1890
1891
1892 ln(os.path.join(self.dir_path,'Source','DHELAS','coef_specs.inc'),
1893 os.path.join(self.dir_path, 'SubProcesses', proc_name),
1894 abspath=False, cwd=None)
1895
1896
1897 - def link_TIR(self, targetPath,libpath,libname,tir_name='TIR'):
1898 """Link the TIR source directory inside the target path given
1899 in argument"""
1900
1901 if tir_name in ['pjfry','golem','samurai','ninja','collier']:
1902
1903 if (not isinstance(libpath,str)) or (not os.path.exists(libpath)) \
1904 or (not os.path.isfile(pjoin(libpath,libname))):
1905 if isinstance(libpath,str) and libpath != '' and \
1906 (not os.path.isfile(pjoin(libpath,libname))):
1907
1908 logger.warning("The %s reduction library could not be found"%tir_name\
1909 +" with PATH:%s specified in mg5_configuration.txt."%libpath\
1910 +" It will not be available.")
1911 self.tir_available_dict[tir_name]=False
1912 return ""
1913
1914 if tir_name in ['ninja','samurai'] and self.tir_available_dict[tir_name]:
1915
1916
1917 if os.path.isfile(pjoin(libpath,os.pardir,'AUTHORS')):
1918 try:
1919 version = open(pjoin(libpath,os.pardir,'VERSION'),'r').read()
1920 except IOError:
1921 version = None
1922 if version is None :
1923 logger.warning(
1924 "Your version of '%s' in \n %s\nseems too old %sto be compatible with MG5_aMC."
1925 %(tir_name, libpath ,'' if not version else '(v%s) '%version)+
1926 ("\nConsider updating it by hand or using the 'install' function of MG5_aMC." if tir_name!='samurai'
1927 else "\nAsk the authors for the latest version compatible with MG5_aMC."))
1928 else:
1929
1930 if (not isinstance(libpath,str)) or (not os.path.exists(libpath)):
1931
1932 logger.warning("The %s reduction library could not be found"%tir_name\
1933 +" with PATH:%s specified in mg5_configuration.txt."%libpath\
1934 +" It will not be available.")
1935 self.tir_available_dict[tir_name]=False
1936 return ""
1937
1938 if self.dependencies=='internal':
1939 if tir_name in ['pjfry','golem','samurai','ninja','collier']:
1940 self.tir_available_dict[tir_name]=False
1941 logger.info("When using the 'output_dependencies=internal' "+\
1942 " MG5_aMC option, the (optional) reduction library %s cannot be employed because"%tir_name+\
1943 " it is not distributed with the MG5_aMC code so that it cannot be copied locally.")
1944 return ""
1945 elif tir_name == "iregi":
1946
1947 new_iregi_path = pjoin(targetPath,os.path.pardir,'Source','IREGI')
1948 shutil.copytree(pjoin(libpath,os.path.pardir), new_iregi_path,
1949 symlinks=True)
1950
1951 current = misc.detect_current_compiler(
1952 pjoin(new_iregi_path,'src','makefile_ML5_lib'))
1953 new = 'gfortran' if self.fortran_compiler is None else \
1954 self.fortran_compiler
1955 if current != new:
1956 misc.mod_compilator(pjoin(new_iregi_path,'src'), new,current)
1957 misc.mod_compilator(pjoin(new_iregi_path,'src','oneloop'),
1958 new, current)
1959
1960
1961 ln(pjoin(targetPath,os.path.pardir,'Source','IREGI','src',
1962 libname),targetPath)
1963 else:
1964 logger.info("Tensor integral reduction library "+\
1965 "%s not implemented yet."%tir_name)
1966 return libpath
1967
1968 elif self.dependencies=='external':
1969 if not os.path.exists(pjoin(libpath,libname)) and tir_name=='iregi':
1970
1971 if 'heptools_install_dir' in self.opt and os.path.exists(pjoin(self.opt['heptools_install_dir'], 'IREGI')):
1972 misc.sprint('Going to use pre-compiled version of IREGI')
1973
1974 ln(os.path.join(self.opt['heptools_install_dir'],'IREGI','src','libiregi.a'),
1975 os.path.join(targetPath),abspath=True)
1976 return os.path.join(targetPath, 'libiregi.a')
1977
1978
1979
1980 logger.info('Compiling IREGI. This has to be done only once and'+\
1981 ' can take a couple of minutes.','$MG:color:BLACK')
1982
1983 current = misc.detect_current_compiler(os.path.join(\
1984 libpath,'makefile_ML5_lib'))
1985 new = 'gfortran' if self.fortran_compiler is None else \
1986 self.fortran_compiler
1987 if current != new:
1988 misc.mod_compilator(libpath, new,current)
1989 misc.mod_compilator(pjoin(libpath,'oneloop'), new, current)
1990
1991 misc.compile(cwd=libpath, job_specs = False)
1992
1993 if not os.path.exists(pjoin(libpath,libname)):
1994 logger.warning("IREGI could not be compiled. Check"+\
1995 "the compilation errors at %s. The related "%libpath+\
1996 "functionalities are turned off.")
1997 self.tir_available_dict[tir_name]=False
1998 return ""
1999
2000 if not tir_name in ['pjfry','golem','samurai','ninja','collier']:
2001 ln(os.path.join(libpath,libname),targetPath,abspath=True)
2002
2003 elif self.dependencies=='environment_paths':
2004
2005
2006 newlibpath = misc.which_lib(libname)
2007 if not newlibpath is None:
2008 logger.info('MG5_aMC is using %s installation found at %s.'%\
2009 (tir_name,newlibpath))
2010
2011 if not tir_name in ['pjfry','golem','samurai','ninja','collier']:
2012 ln(newlibpath,targetPath,abspath=True)
2013 self.tir_available_dict[tir_name]=True
2014 return os.path.dirname(newlibpath)
2015 else:
2016 logger.warning("Could not find the location of the file"+\
2017 " %s in you environment paths. The related "%libname+\
2018 "functionalities are turned off.")
2019 self.tir_available_dict[tir_name]=False
2020 return ""
2021
2022 self.tir_available_dict[tir_name]=True
2023 return libpath
2024
2026 """ Decides whether we must group loops or not for this matrix element"""
2027
2028
2029
2030 if self.forbid_loop_grouping:
2031 self.group_loops = False
2032 else:
2033 self.group_loops = (not self.get_context(matrix_element)['ComputeColorFlows'])\
2034 and matrix_element.get('processes')[0].get('has_born')
2035
2036 return self.group_loops
2037
2038 - def finalize(self, matrix_element, cmdhistory, MG5options, outputflag):
2044
2045
2046 - def write_loop_matrix_element_v4(self, writer, matrix_element, fortran_model,
2047 group_number = None, proc_id = None, config_map = None):
2048 """ Writes loop_matrix.f, CT_interface.f,TIR_interface.f,GOLEM_inteface.f
2049 and loop_num.f only but with the optimized FortranModel.
2050 The arguments group_number and proc_id are just for the LoopInduced
2051 output with MadEvent and only used in get_ME_identifier."""
2052
2053
2054
2055 if writer:
2056 raise MadGraph5Error, 'Matrix output mode no longer supported.'
2057
2058 if not isinstance(fortran_model,\
2059 helas_call_writers.FortranUFOHelasCallWriter):
2060 raise MadGraph5Error, 'The optimized loop fortran output can only'+\
2061 ' work with a UFO Fortran model'
2062 OptimizedFortranModel=\
2063 helas_call_writers.FortranUFOHelasCallWriterOptimized(\
2064 fortran_model.get('model'),False)
2065
2066
2067 if not matrix_element.get('processes')[0].get('has_born') and \
2068 not self.compute_color_flows:
2069 logger.debug("Color flows will be employed despite the option"+\
2070 " 'loop_color_flows' being set to False because it is necessary"+\
2071 " for optimizations.")
2072
2073
2074
2075
2076
2077 matrix_element.compute_all_analytic_information(
2078 self.get_aloha_model(matrix_element.get('processes')[0].get('model')))
2079
2080 self.set_group_loops(matrix_element)
2081
2082
2083
2084 matrix_element.rep_dict = LoopProcessExporterFortranSA.\
2085 generate_general_replace_dict(self, matrix_element,
2086 group_number = group_number, proc_id = proc_id)
2087
2088
2089 self.set_optimized_output_specific_replace_dict_entries(matrix_element)
2090
2091
2092 proc_prefix_writer = writers.FortranWriter('proc_prefix.txt','w')
2093 proc_prefix_writer.write(matrix_element.rep_dict['proc_prefix'])
2094 proc_prefix_writer.close()
2095
2096 filename = 'loop_matrix.f'
2097 calls = self.write_loopmatrix(writers.FortranWriter(filename),
2098 matrix_element,
2099 OptimizedFortranModel)
2100
2101 filename = 'check_sa.f'
2102 self.write_check_sa(writers.FortranWriter(filename),matrix_element)
2103
2104 filename = 'polynomial.f'
2105 calls = self.write_polynomial_subroutines(
2106 writers.FortranWriter(filename),
2107 matrix_element)
2108
2109 filename = 'improve_ps.f'
2110 calls = self.write_improve_ps(writers.FortranWriter(filename),
2111 matrix_element)
2112
2113 filename = 'CT_interface.f'
2114 self.write_CT_interface(writers.FortranWriter(filename),\
2115 matrix_element)
2116
2117 filename = 'TIR_interface.f'
2118 self.write_TIR_interface(writers.FortranWriter(filename),
2119 matrix_element)
2120
2121 if 'golem' in self.tir_available_dict and self.tir_available_dict['golem']:
2122 filename = 'GOLEM_interface.f'
2123 self.write_GOLEM_interface(writers.FortranWriter(filename),
2124 matrix_element)
2125
2126 if 'collier' in self.tir_available_dict and self.tir_available_dict['collier']:
2127 filename = 'COLLIER_interface.f'
2128 self.write_COLLIER_interface(writers.FortranWriter(filename),
2129 matrix_element)
2130
2131 filename = 'loop_num.f'
2132 self.write_loop_num(writers.FortranWriter(filename),\
2133 matrix_element,OptimizedFortranModel)
2134
2135 filename = 'mp_compute_loop_coefs.f'
2136 self.write_mp_compute_loop_coefs(writers.FortranWriter(filename),\
2137 matrix_element,OptimizedFortranModel)
2138
2139 if self.get_context(matrix_element)['ComputeColorFlows']:
2140 filename = 'compute_color_flows.f'
2141 self.write_compute_color_flows(writers.FortranWriter(filename),
2142 matrix_element, config_map = config_map)
2143
2144
2145 (nexternal, ninitial) = matrix_element.get_nexternal_ninitial()
2146 filename = 'nexternal.inc'
2147 self.write_nexternal_file(writers.FortranWriter(filename),
2148 nexternal, ninitial)
2149
2150
2151 filename = 'process_info.inc'
2152 self.write_process_info_file(writers.FortranWriter(filename),
2153 matrix_element)
2154
2155 if self.get_context(matrix_element)['TIRCaching']:
2156 filename = 'tir_cache_size.inc'
2157 self.write_tir_cache_size_include(writers.FortranWriter(filename))
2158
2159 return calls
2160
2162 """ Specify the entries of the replacement dictionary which are specific
2163 to the optimized output and only relevant to it (the more general entries
2164 are set in the the mother class LoopProcessExporterFortranSA."""
2165
2166 max_loop_rank=matrix_element.get_max_loop_rank()
2167 matrix_element.rep_dict['maxrank']=max_loop_rank
2168 matrix_element.rep_dict['loop_max_coefs']=\
2169 q_polynomial.get_number_of_coefs_for_rank(max_loop_rank)
2170 max_loop_vertex_rank=matrix_element.get_max_loop_vertex_rank()
2171 matrix_element.rep_dict['vertex_max_coefs']=\
2172 q_polynomial.get_number_of_coefs_for_rank(max_loop_vertex_rank)
2173
2174 matrix_element.rep_dict['nloopwavefuncs']=\
2175 matrix_element.get_number_of_loop_wavefunctions()
2176 max_spin=matrix_element.get_max_loop_particle_spin()
2177
2178 matrix_element.rep_dict['max_lwf_size']= 4 if max_spin <=3 else 16
2179 matrix_element.rep_dict['nloops']=len(\
2180 [1 for ldiag in matrix_element.get_loop_diagrams() for \
2181 lamp in ldiag.get_loop_amplitudes()])
2182
2183 if self.set_group_loops(matrix_element):
2184 matrix_element.rep_dict['nloop_groups']=\
2185 len(matrix_element.get('loop_groups'))
2186 else:
2187 matrix_element.rep_dict['nloop_groups']=\
2188 matrix_element.rep_dict['nloops']
2189
2191 """ Create the file containing the core subroutine called by CutTools
2192 which contains the Helas calls building the loop"""
2193
2194 replace_dict=copy.copy(matrix_element.rep_dict)
2195
2196 file = open(os.path.join(self.template_dir,'loop_num.inc')).read()
2197 file = file % replace_dict
2198 writer.writelines(file,context=self.get_context(matrix_element))
2199
2204
2206 """ Create the file TIR_interface.f which does NOT contain the subroutine
2207 defining the loop HELAS-like calls along with the general interfacing
2208 subroutine. """
2209
2210
2211 replace_dict=copy.copy(matrix_element.rep_dict)
2212
2213 file = open(os.path.join(self.template_dir,'TIR_interface.inc')).read()
2214
2215
2216
2217 loop_groups = matrix_element.get('loop_groups')
2218 has_HEFT_vertex = [False]*len(loop_groups)
2219 for i, (denom_structure, loop_amp_list) in enumerate(loop_groups):
2220 for lamp in loop_amp_list:
2221 final_lwf = lamp.get_final_loop_wavefunction()
2222 while not final_lwf is None:
2223
2224
2225 scalars = len([1 for wf in final_lwf.get('mothers') if
2226 wf.get('spin')==1])
2227 vectors = len([1 for wf in final_lwf.get('mothers') if
2228 wf.get('spin')==3 and wf.get('mass')=='ZERO'])
2229 if scalars>=1 and vectors>=1 and \
2230 scalars+vectors == len(final_lwf.get('mothers')):
2231 has_HEFT_vertex[i] = True
2232 break
2233 final_lwf = final_lwf.get_loop_mother()
2234 else:
2235 continue
2236 break
2237
2238 has_HEFT_list = []
2239 chunk_size = 9
2240 for k in xrange(0, len(has_HEFT_vertex), chunk_size):
2241 has_HEFT_list.append("DATA (HAS_AN_HEFT_VERTEX(I),I=%6r,%6r) /%s/" % \
2242 (k + 1, min(k + chunk_size, len(has_HEFT_vertex)),
2243 ','.join(['.TRUE.' if l else '.FALSE.' for l in
2244 has_HEFT_vertex[k:k + chunk_size]])))
2245 replace_dict['has_HEFT_list'] = '\n'.join(has_HEFT_list)
2246
2247 file = file % replace_dict
2248
2249 FPR = q_polynomial.FortranPolynomialRoutines(
2250 replace_dict['maxrank'],coef_format=replace_dict['complex_dp_format'],\
2251 sub_prefix=replace_dict['proc_prefix'])
2252 if self.tir_available_dict['pjfry']:
2253 file += '\n\n'+FPR.write_pjfry_mapping()
2254 if self.tir_available_dict['iregi']:
2255 file += '\n\n'+FPR.write_iregi_mapping()
2256
2257 if writer:
2258 writer.writelines(file,context=self.get_context(matrix_element))
2259 else:
2260 return file
2261
2263 """ Create the file COLLIER_interface.f"""
2264
2265
2266 replace_dict=copy.copy(matrix_element.rep_dict)
2267
2268 file = open(os.path.join(self.template_dir,'COLLIER_interface.inc')).read()
2269
2270 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\
2271 coef_format=replace_dict['complex_dp_format'],\
2272 sub_prefix=replace_dict['proc_prefix'])
2273 map_definition = []
2274 collier_map = FPR.get_COLLIER_mapping()
2275
2276 chunk_size = 10
2277 for map_name, indices_list in \
2278 [('COEFMAP_ZERO',[c[0] for c in collier_map]),
2279 ('COEFMAP_ONE',[c[1] for c in collier_map]),
2280 ('COEFMAP_TWO',[c[2] for c in collier_map]),
2281 ('COEFMAP_THREE',[c[3] for c in collier_map])]:
2282 for k in xrange(0, len(indices_list), chunk_size):
2283 map_definition.append("DATA (%s(I),I=%3r,%3r) /%s/" % \
2284 (map_name,k, min(k + chunk_size, len(indices_list))-1,
2285 ','.join('%2r'%ind for ind in indices_list[k:k + chunk_size])))
2286
2287 replace_dict['collier_coefmap'] = '\n'.join(map_definition)
2288
2289 file = file % replace_dict
2290
2291 if writer:
2292 writer.writelines(file,context=self.get_context(matrix_element))
2293 else:
2294 return file
2295
2297 """ Create the file GOLEM_interface.f which does NOT contain the subroutine
2298 defining the loop HELAS-like calls along with the general interfacing
2299 subroutine. """
2300
2301
2302 replace_dict=copy.copy(matrix_element.rep_dict)
2303
2304
2305
2306 if not self.get_context(matrix_element)['AmplitudeReduction']:
2307 replace_dict['loop_induced_sqsoindex']=',SQSOINDEX'
2308 else:
2309 replace_dict['loop_induced_sqsoindex']=''
2310
2311 file = open(os.path.join(self.template_dir,'GOLEM_interface.inc')).read()
2312
2313 file = file % replace_dict
2314
2315 FPR = q_polynomial.FortranPolynomialRoutines(replace_dict['maxrank'],\
2316 coef_format=replace_dict['complex_dp_format'],\
2317 sub_prefix=replace_dict['proc_prefix'])
2318
2319 file += '\n\n'+FPR.write_golem95_mapping()
2320
2321 if writer:
2322 writer.writelines(file,context=self.get_context(matrix_element))
2323 else:
2324 return file
2325
2327 """ Subroutine to create all the subroutines relevant for handling
2328 the polynomials representing the loop numerator """
2329
2330
2331 IncWriter=writers.FortranWriter('loop_max_coefs.inc','w')
2332 IncWriter.writelines("""INTEGER LOOPMAXCOEFS
2333 PARAMETER (LOOPMAXCOEFS=%(loop_max_coefs)d)"""
2334 %matrix_element.rep_dict)
2335
2336
2337
2338
2339
2340 coef_specs_path = pjoin(self.dir_path, 'Source','DHELAS','coef_specs.inc')
2341 if not os.path.isfile(coef_specs_path):
2342 IncWriter=writers.FortranWriter(coef_specs_path,'w')
2343 IncWriter.writelines("""INTEGER MAXLWFSIZE
2344 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d)
2345 INTEGER VERTEXMAXCOEFS
2346 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\
2347 %matrix_element.rep_dict)
2348 IncWriter.close()
2349
2350
2351 subroutines=[]
2352
2353
2354 replace_dict = copy.copy(matrix_element.rep_dict)
2355
2356 dp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read()
2357 mp_routine = open(os.path.join(self.template_dir,'polynomial.inc')).read()
2358
2359
2360 replace_dict['complex_format'] = replace_dict['complex_dp_format']
2361 replace_dict['real_format'] = replace_dict['real_dp_format']
2362 replace_dict['mp_prefix'] = ''
2363 replace_dict['kind'] = 8
2364 replace_dict['zero_def'] = '0.0d0'
2365 replace_dict['one_def'] = '1.0d0'
2366 dp_routine = dp_routine % replace_dict
2367
2368 replace_dict['complex_format'] = replace_dict['complex_mp_format']
2369 replace_dict['real_format'] = replace_dict['real_mp_format']
2370 replace_dict['mp_prefix'] = 'MP_'
2371 replace_dict['kind'] = 16
2372 replace_dict['zero_def'] = '0.0e0_16'
2373 replace_dict['one_def'] = '1.0e0_16'
2374 mp_routine = mp_routine % replace_dict
2375 subroutines.append(dp_routine)
2376 subroutines.append(mp_routine)
2377
2378
2379 poly_writer=q_polynomial.FortranPolynomialRoutines(
2380 matrix_element.get_max_loop_rank(),
2381 updater_max_rank = matrix_element.get_max_loop_vertex_rank(),
2382 sub_prefix=replace_dict['proc_prefix'],
2383 proc_prefix=replace_dict['proc_prefix'],
2384 mp_prefix='')
2385
2386 writer.writelines(poly_writer.write_polynomial_constant_module()+'\n')
2387
2388 mp_poly_writer=q_polynomial.FortranPolynomialRoutines(
2389 matrix_element.get_max_loop_rank(),
2390 updater_max_rank = matrix_element.get_max_loop_vertex_rank(),
2391 coef_format='complex*32', sub_prefix='MP_'+replace_dict['proc_prefix'],
2392 proc_prefix=replace_dict['proc_prefix'], mp_prefix='MP_')
2393
2394 subroutines.append(poly_writer.write_polynomial_evaluator())
2395 subroutines.append(mp_poly_writer.write_polynomial_evaluator())
2396
2397 subroutines.append(poly_writer.write_add_coefs())
2398 subroutines.append(mp_poly_writer.write_add_coefs())
2399
2400 subroutines.append(poly_writer.write_wl_merger())
2401 subroutines.append(mp_poly_writer.write_wl_merger())
2402 for wl_update in matrix_element.get_used_wl_updates():
2403
2404
2405
2406
2407 if wl_update[0]==wl_update[1]==1 or wl_update[0]==0 or wl_update[1]==0:
2408
2409
2410
2411 subroutines.append(poly_writer.write_expanded_wl_updater(\
2412 wl_update[0],wl_update[1]))
2413 subroutines.append(mp_poly_writer.write_expanded_wl_updater(\
2414 wl_update[0],wl_update[1]))
2415 elif wl_update[0] >= wl_update[1]:
2416
2417
2418
2419
2420 subroutines.append(poly_writer.write_compact_wl_updater(\
2421 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True))
2422 subroutines.append(mp_poly_writer.write_compact_wl_updater(\
2423 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=True))
2424 else:
2425
2426
2427
2428
2429
2430
2431 subroutines.append(poly_writer.write_compact_wl_updater(\
2432 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False))
2433 subroutines.append(mp_poly_writer.write_compact_wl_updater(\
2434 wl_update[0],wl_update[1],loop_over_vertex_coefs_first=False))
2435
2436 writer.writelines('\n\n'.join(subroutines),
2437 context=self.get_context(matrix_element))
2438
2440 """Create the write_mp_compute_loop_coefs.f file."""
2441
2442 if not matrix_element.get('processes') or \
2443 not matrix_element.get('diagrams'):
2444 return 0
2445
2446
2447
2448 writers.FortranWriter.downcase = False
2449
2450 replace_dict = copy.copy(matrix_element.rep_dict)
2451
2452
2453 squared_orders = matrix_element.get_squared_order_contribs()
2454 split_orders = matrix_element.get('processes')[0].get('split_orders')
2455
2456 born_ct_helas_calls , uvct_helas_calls = \
2457 fortran_model.get_born_ct_helas_calls(matrix_element,
2458 squared_orders=squared_orders, split_orders=split_orders)
2459 self.turn_to_mp_calls(born_ct_helas_calls)
2460 self.turn_to_mp_calls(uvct_helas_calls)
2461 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\
2462 matrix_element,group_loops=self.group_loops,
2463 squared_orders=squared_orders,split_orders=split_orders)
2464
2465 coef_construction = [c % matrix_element.rep_dict for c
2466 in coef_construction]
2467 self.turn_to_mp_calls(coef_construction)
2468 self.turn_to_mp_calls(coef_merging)
2469
2470 file = open(os.path.join(self.template_dir,\
2471 'mp_compute_loop_coefs.inc')).read()
2472
2473
2474
2475 context = self.get_context(matrix_element)
2476 file=self.split_HELASCALLS(writer,replace_dict,\
2477 'mp_helas_calls_split.inc',file,born_ct_helas_calls,\
2478 'mp_born_ct_helas_calls','mp_helas_calls_ampb',
2479 required_so_broadcaster = 'MP_CT_REQ_SO_DONE',
2480 continue_label = 2000,
2481 momenta_array_name = 'MP_P',
2482 context=context)
2483 file=self.split_HELASCALLS(writer,replace_dict,\
2484 'mp_helas_calls_split.inc',file,uvct_helas_calls,\
2485 'mp_uvct_helas_calls','mp_helas_calls_uvct',
2486 required_so_broadcaster = 'MP_UVCT_REQ_SO_DONE',
2487 continue_label = 3000,
2488 momenta_array_name = 'MP_P',
2489 context=context)
2490 file=self.split_HELASCALLS(writer,replace_dict,\
2491 'mp_helas_calls_split.inc',file,coef_construction,\
2492 'mp_coef_construction','mp_coef_construction',
2493 required_so_broadcaster = 'MP_LOOP_REQ_SO_DONE',
2494 continue_label = 4000,
2495 momenta_array_name = 'MP_P',
2496 context=context)
2497
2498 replace_dict['mp_coef_merging']='\n'.join(coef_merging)
2499
2500 file = file % replace_dict
2501
2502
2503 writer.writelines(file,context=context)
2504
2506 """Writes out the files (Loop|Born)ColorFlowMatrix.dat corresponding
2507 to the color coefficients for JAMP(L|B)*JAMP(L|B)."""
2508
2509 res = []
2510 for line in range(len(col_matrix._col_basis1)):
2511 numerators = []
2512 denominators = []
2513 for row in range(len(col_matrix._col_basis2)):
2514 coeff = col_matrix.col_matrix_fixed_Nc[(line,row)]
2515 numerators.append('%6r'%coeff[0].numerator)
2516 denominators.append('%6r'%(
2517 coeff[0].denominator*(-1 if coeff[1] else 1)))
2518 res.append(' '.join(numerators))
2519 res.append(' '.join(denominators))
2520
2521 res.append('EOF')
2522
2523 writer.writelines('\n'.join(res))
2524
2527 """ Writes the file '(Loop|Born)ColorFlowCoefs.dat using the coefficients
2528 list of the color_amplitudes in the argument of this function."""
2529
2530 my_cs = color.ColorString()
2531
2532 res = []
2533
2534 for jamp_number, coeff_list in enumerate(color_amplitudes):
2535 my_cs.from_immutable(sorted(color_basis.keys())[jamp_number])
2536
2537 ordered_cs = color.ColorFactor([my_cs]).full_simplify()[0]
2538 res.append('%d # Coefficient for flow number %d with expr. %s'\
2539 %(len(coeff_list), jamp_number+1, repr(ordered_cs)))
2540
2541 line_element = []
2542
2543 for (coefficient, amp_number) in coeff_list:
2544 coef = self.cat_coeff(\
2545 coefficient[0],coefficient[1],coefficient[2],coefficient[3])
2546 line_element.append((coef[0].numerator,
2547 coef[0].denominator*(-1 if coef[1] else 1),amp_number))
2548
2549 line_element.sort(key=lambda el:el[2])
2550
2551 for i in range(3):
2552 res.append(' '.join('%6r'%elem[i] for elem in line_element))
2553
2554 res.append('EOF')
2555 writer.writelines('\n'.join(res))
2556
2558 """Writes the file compute_color_flows.f which uses the AMPL results
2559 from a common block to project them onto the color flow space so as
2560 to compute the JAMP quantities. For loop induced processes, this file
2561 will also contain a subroutine computing AMPL**2 for madevent
2562 multichanneling."""
2563
2564 loop_col_amps = matrix_element.get_loop_color_amplitudes()
2565 matrix_element.rep_dict['nLoopFlows'] = len(loop_col_amps)
2566
2567 dat_writer = open(pjoin('..','MadLoop5_resources',
2568 '%(proc_prefix)sLoopColorFlowCoefs.dat'
2569 %matrix_element.rep_dict),'w')
2570 self.write_color_flow_coefs_data_file(dat_writer,
2571 loop_col_amps, matrix_element.get('loop_color_basis'))
2572 dat_writer.close()
2573
2574 dat_writer = open(pjoin('..','MadLoop5_resources',
2575 '%(proc_prefix)sLoopColorFlowMatrix.dat'
2576 %matrix_element.rep_dict),'w')
2577 self.write_color_matrix_data_file(dat_writer,
2578 matrix_element.get('color_matrix'))
2579 dat_writer.close()
2580
2581 if matrix_element.get('processes')[0].get('has_born'):
2582 born_col_amps = matrix_element.get_born_color_amplitudes()
2583 matrix_element.rep_dict['nBornFlows'] = len(born_col_amps)
2584 dat_writer = open(pjoin('..','MadLoop5_resources',
2585 '%(proc_prefix)sBornColorFlowCoefs.dat'
2586 %matrix_element.rep_dict),'w')
2587 self.write_color_flow_coefs_data_file(dat_writer,
2588 born_col_amps, matrix_element.get('born_color_basis'))
2589 dat_writer.close()
2590
2591 dat_writer = open(pjoin('..','MadLoop5_resources',
2592 '%(proc_prefix)sBornColorFlowMatrix.dat'
2593 %matrix_element.rep_dict),'w')
2594 self.write_color_matrix_data_file(dat_writer,
2595 color_amp.ColorMatrix(matrix_element.get('born_color_basis')))
2596 dat_writer.close()
2597 else:
2598 matrix_element.rep_dict['nBornFlows'] = 0
2599
2600 replace_dict = copy.copy(matrix_element.rep_dict)
2601
2602
2603
2604 if self.get_context(matrix_element)['MadEventOutput']:
2605 self.get_amp2_lines(matrix_element, replace_dict, config_map)
2606 else:
2607 replace_dict['config_map_definition'] = ''
2608 replace_dict['config_index_map_definition'] = ''
2609 replace_dict['nmultichannels'] = 0
2610 replace_dict['nmultichannel_configs'] = 0
2611
2612
2613
2614 matrix_element.rep_dict['nmultichannels'] = \
2615 replace_dict['nmultichannels']
2616 matrix_element.rep_dict['nmultichannel_configs'] = \
2617 replace_dict['nmultichannel_configs']
2618
2619
2620 file = open(os.path.join(self.template_dir,\
2621 'compute_color_flows.inc')).read()%replace_dict
2622
2623 writer.writelines(file,context=self.get_context(matrix_element))
2624
2626 """ From the list of matrix element, or the single matrix element, derive
2627 the global quantities to write in global_coef_specs.inc"""
2628
2629 if isinstance(matrix_element_list, (group_subprocs.SubProcessGroupList,
2630 loop_helas_objects.LoopHelasProcess)):
2631 matrix_element_list = matrix_element_list.get_matrix_elements()
2632
2633 if isinstance(matrix_element_list, list):
2634 me_list = matrix_element_list
2635 else:
2636 me_list = [matrix_element_list]
2637
2638 if output_path is None:
2639 out_path = pjoin(self.dir_path,'SubProcesses','global_specs.inc')
2640 else:
2641 out_path = output_path
2642
2643 open(out_path,'w').write(
2644 """ integer MAXNEXTERNAL
2645 parameter(MAXNEXTERNAL=%d)
2646 integer OVERALLMAXRANK
2647 parameter(OVERALLMAXRANK=%d)
2648 integer NPROCS
2649 parameter(NPROCS=%d)"""%(
2650 max(me.get_nexternal_ninitial()[0] for me in me_list),
2651 max(me.get_max_loop_rank() for me in me_list),
2652 len(me_list)))
2653
2654
2655 - def fix_coef_specs(self, overall_max_lwf_spin, overall_max_loop_vert_rank):
2656 """ If processes with different maximum loop wavefunction size or
2657 different maximum loop vertex rank have to be output together, then
2658 the file 'coef.inc' in the HELAS Source folder must contain the overall
2659 maximum of these quantities. It is not safe though, and the user has
2660 been appropriatly warned at the output stage """
2661
2662
2663 coef_specs_path=os.path.join(self.dir_path,'Source','DHELAS',\
2664 'coef_specs.inc')
2665 os.remove(coef_specs_path)
2666
2667 spin_to_wf_size = {1:4,2:4,3:4,4:16,5:16}
2668 overall_max_lwf_size = spin_to_wf_size[overall_max_lwf_spin]
2669 overall_max_loop_vert_coefs = q_polynomial.get_number_of_coefs_for_rank(
2670 overall_max_loop_vert_rank)
2671
2672 IncWriter=writers.FortranWriter(coef_specs_path,'w')
2673 IncWriter.writelines("""INTEGER MAXLWFSIZE
2674 PARAMETER (MAXLWFSIZE=%(max_lwf_size)d)
2675 INTEGER VERTEXMAXCOEFS
2676 PARAMETER (VERTEXMAXCOEFS=%(vertex_max_coefs)d)"""\
2677 %{'max_lwf_size':overall_max_lwf_size,
2678 'vertex_max_coefs':overall_max_loop_vert_coefs})
2679 IncWriter.close()
2680
2683 """ Sets up the replacement dictionary for the writeout of the steering
2684 file check_sa.f"""
2685 if len(squared_orders)<1:
2686 matrix_element.rep_dict['print_so_loop_results']=\
2687 "write(*,*) 'No split orders defined.'"
2688 elif len(squared_orders)==1:
2689 matrix_element.rep_dict['set_coupling_target']=''
2690 matrix_element.rep_dict['print_so_loop_results']=\
2691 "write(*,*) 'All loop contributions are of split orders (%s)'"%(
2692 ' '.join(['%s=%d'%(split_orders[i],squared_orders[0][i]) \
2693 for i in range(len(split_orders))]))
2694 else:
2695 matrix_element.rep_dict['set_coupling_target']='\n'.join([
2696 '# Here we leave the default target squared split order to -1, meaning that we'+
2697 ' aim at computing all individual contributions. You can choose otherwise.',
2698 'call %(proc_prefix)sSET_COUPLINGORDERS_TARGET(-1)'%matrix_element.rep_dict])
2699 matrix_element.rep_dict['print_so_loop_results'] = '\n'.join([
2700 '\n'.join(["write(*,*) '%dL) Loop ME for orders (%s) :'"%((j+1),(' '.join(
2701 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]))),
2702 "IF (PREC_FOUND(%d).NE.-1.0d0) THEN"%(j+1),
2703 "write(*,*) ' > accuracy = ',PREC_FOUND(%d)"%(j+1),
2704 "ELSE",
2705 "write(*,*) ' > accuracy = NA'",
2706 "ENDIF",
2707 "write(*,*) ' > finite = ',MATELEM(1,%d)"%(j+1),
2708 "write(*,*) ' > 1eps = ',MATELEM(2,%d)"%(j+1),
2709 "write(*,*) ' > 2eps = ',MATELEM(3,%d)"%(j+1)
2710 ]) for j, so in enumerate(squared_orders)])
2711 matrix_element.rep_dict['write_so_loop_results'] = '\n'.join(
2712 ["write (69,*) 'Split_Orders_Names %s'"%(' '.join(split_orders))]+
2713 ['\n'.join([
2714 "write (69,*) 'Loop_SO_Results %s'"%(' '.join(
2715 ['%d'%so_value for so_value in so])),
2716 "write (69,*) 'SO_Loop ACC ',PREC_FOUND(%d)"%(j+1),
2717 "write (69,*) 'SO_Loop FIN ',MATELEM(1,%d)"%(j+1),
2718 "write (69,*) 'SO_Loop 1EPS ',MATELEM(2,%d)"%(j+1),
2719 "write (69,*) 'SO_Loop 2EPS ',MATELEM(3,%d)"%(j+1),
2720 ]) for j, so in enumerate(squared_orders)])
2721
2722
2723 squared_born_so_orders = []
2724 for i, amp_order in enumerate(amps_orders['born_amp_orders']):
2725 for j in range(0,i+1):
2726 key = tuple([ord1 + ord2 for ord1,ord2 in \
2727 zip(amp_order[0],amps_orders['born_amp_orders'][j][0])])
2728 if not key in squared_born_so_orders:
2729 squared_born_so_orders.append(key)
2730 if len(squared_born_so_orders)<1:
2731 matrix_element.rep_dict['print_so_born_results'] = ''
2732 elif len(squared_born_so_orders)==1:
2733 matrix_element.rep_dict['print_so_born_results'] = \
2734 "write(*,*) 'All Born contributions are of split orders (%s)'"%(
2735 ' '.join(['%s=%d'%(split_orders[i],squared_born_so_orders[0][i])
2736 for i in range(len(split_orders))]))
2737 else:
2738 matrix_element.rep_dict['print_so_born_results'] = '\n'.join([
2739 "write(*,*) '%dB) Born ME for orders (%s) = ',MATELEM(0,%d)"%(j+1,' '.join(
2740 ['%s=%d'%(split_orders[i],so[i]) for i in range(len(split_orders))]),j+1)
2741 for j, so in enumerate(squared_born_so_orders)])
2742 matrix_element.rep_dict['write_so_born_results'] = '\n'.join(
2743 ['\n'.join([
2744 "write (69,*) 'Born_SO_Results %s'"%(' '.join(
2745 ['%d'%so_value for so_value in so])),
2746 "write (69,*) 'SO_Born BORN ',MATELEM(0,%d)"%(j+1),
2747 ]) for j, so in enumerate(squared_born_so_orders)])
2748
2749
2750 matrix_element.rep_dict['print_so_born_results'] += \
2751 '\nwrite (*,*) "---------------------------------"'
2752 matrix_element.rep_dict['print_so_loop_results'] += \
2753 '\nwrite (*,*) "---------------------------------"'
2754
2756 """Write the file 'tir_cache_size.inc' which sets the size of the TIR
2757 cache the the user wishes to employ and the default value for it.
2758 This can have an impact on MadLoop speed when using stability checks
2759 but also impacts in a non-negligible way MadLoop's memory footprint.
2760 It is therefore important that the user can chose its size."""
2761
2762
2763
2764
2765
2766 tir_cach_size = "parameter(TIR_CACHE_SIZE=1)"
2767 writer.writelines(tir_cach_size)
2768
2769 - def write_loopmatrix(self, writer, matrix_element, fortran_model, \
2770 write_auxiliary_files=True,):
2771 """Create the loop_matrix.f file."""
2772
2773 if not matrix_element.get('processes') or \
2774 not matrix_element.get('diagrams'):
2775 return 0
2776
2777
2778 writers.FortranWriter.downcase = False
2779
2780
2781
2782
2783
2784
2785
2786 squared_orders, amps_orders = matrix_element.get_split_orders_mapping()
2787
2788
2789
2790 sqso_contribs = [sqso[0] for sqso in squared_orders]
2791 split_orders = matrix_element.get('processes')[0].get('split_orders')
2792
2793
2794
2795 self.setup_check_sa_replacement_dictionary(matrix_element,
2796 split_orders,sqso_contribs,amps_orders)
2797
2798
2799
2800 overall_so_basis = list(set(
2801 [born_so[0] for born_so in amps_orders['born_amp_orders']]+
2802 [born_so[0] for born_so in amps_orders['loop_amp_orders']]))
2803
2804 order_hierarchy = matrix_element.get('processes')[0]\
2805 .get('model').get('order_hierarchy')
2806 if set(order_hierarchy.keys()).union(set(split_orders))==\
2807 set(order_hierarchy.keys()):
2808 overall_so_basis.sort(key= lambda so:
2809 sum([order_hierarchy[split_orders[i]]*order_power for \
2810 i, order_power in enumerate(so)]))
2811
2812
2813
2814 matrix_element.rep_dict['split_order_str_list'] = str(split_orders)
2815 matrix_element.rep_dict['nSO'] = len(split_orders)
2816 matrix_element.rep_dict['nSquaredSO'] = len(sqso_contribs)
2817 matrix_element.rep_dict['nAmpSO'] = len(overall_so_basis)
2818
2819 writers.FortranWriter('nsquaredSO.inc').writelines(
2820 """INTEGER NSQUAREDSO
2821 PARAMETER (NSQUAREDSO=%d)"""%matrix_element.rep_dict['nSquaredSO'])
2822
2823 replace_dict = copy.copy(matrix_element.rep_dict)
2824
2825
2826 replace_dict['ampsplitorders'] = '\n'.join(self.get_split_orders_lines(\
2827 overall_so_basis,'AMPSPLITORDERS'))
2828 replace_dict['SquaredSO'] = '\n'.join(self.get_split_orders_lines(\
2829 sqso_contribs,'SQPLITORDERS'))
2830
2831
2832 replace_dict['chosen_so_configs'] = self.set_chosen_SO_index(
2833 matrix_element.get('processes')[0],sqso_contribs)
2834
2835
2836
2837 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['loop_amp_orders'])
2838 for SO in amps_orders['loop_amp_orders']:
2839 for amp_number in SO[1]:
2840 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1
2841
2842 replace_dict['loopAmpSO'] = '\n'.join(self.format_integer_list(
2843 ampSO_list,'LOOPAMPORDERS'))
2844 ampSO_list=[-1]*sum(len(el[1]) for el in amps_orders['born_amp_orders'])
2845 for SO in amps_orders['born_amp_orders']:
2846 for amp_number in SO[1]:
2847 ampSO_list[amp_number-1]=overall_so_basis.index(SO[0])+1
2848 replace_dict['BornAmpSO'] = '\n'.join(self.format_integer_list(
2849 ampSO_list,'BORNAMPORDERS'))
2850
2851
2852
2853 looplibs_av=['.TRUE.']
2854
2855
2856 for tir_lib in ['pjfry','iregi','golem','samurai','ninja','collier']:
2857 looplibs_av.append('.TRUE.' if tir_lib in self.all_tir and \
2858 self.tir_available_dict[tir_lib] else '.FALSE.')
2859 replace_dict['data_looplibs_av']=','.join(looplibs_av)
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869 replace_dict['hel_offset'] = 10000
2870
2871
2872
2873 den_factor_line = self.get_den_factor_line(matrix_element)
2874 replace_dict['den_factor_line'] = den_factor_line
2875
2876
2877
2878 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
2879 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\
2880 matrix_element.get_beams_hel_avg_factor()
2881
2882 if write_auxiliary_files:
2883
2884 (CMNum,CMDenom) = self.get_color_matrix(matrix_element)
2885 CMWriter=open(pjoin('..','MadLoop5_resources',
2886 '%(proc_prefix)sColorNumFactors.dat'%matrix_element.rep_dict),'w')
2887 for ColorLine in CMNum:
2888 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
2889 CMWriter.close()
2890 CMWriter=open(pjoin('..','MadLoop5_resources',
2891 '%(proc_prefix)sColorDenomFactors.dat'%matrix_element.rep_dict),'w')
2892 for ColorLine in CMDenom:
2893 CMWriter.write(' '.join(['%d'%C for C in ColorLine])+'\n')
2894 CMWriter.close()
2895
2896
2897 HelConfigs=matrix_element.get_helicity_matrix()
2898 HelConfigWriter=open(pjoin('..','MadLoop5_resources',
2899 '%(proc_prefix)sHelConfigs.dat'%matrix_element.rep_dict),'w')
2900 for HelConfig in HelConfigs:
2901 HelConfigWriter.write(' '.join(['%d'%H for H in HelConfig])+'\n')
2902 HelConfigWriter.close()
2903
2904
2905 born_ct_helas_calls, uvct_helas_calls = \
2906 fortran_model.get_born_ct_helas_calls(matrix_element,
2907 squared_orders=squared_orders,split_orders=split_orders)
2908 coef_construction, coef_merging = fortran_model.get_coef_construction_calls(\
2909 matrix_element,group_loops=self.group_loops,
2910 squared_orders=squared_orders,split_orders=split_orders)
2911
2912 loop_CT_calls = fortran_model.get_loop_CT_calls(matrix_element,\
2913 group_loops=self.group_loops,
2914 squared_orders=squared_orders, split_orders=split_orders)
2915
2916 coef_construction = [c % matrix_element.rep_dict for c
2917 in coef_construction]
2918 loop_CT_calls = [lc % matrix_element.rep_dict for lc in loop_CT_calls]
2919
2920 file = open(os.path.join(self.template_dir,\
2921 'loop_matrix_standalone.inc')).read()
2922
2923
2924
2925 context = self.get_context(matrix_element)
2926 file=self.split_HELASCALLS(writer,replace_dict,\
2927 'helas_calls_split.inc',file,born_ct_helas_calls,\
2928 'born_ct_helas_calls','helas_calls_ampb',
2929 required_so_broadcaster = 'CT_REQ_SO_DONE',
2930 continue_label = 2000, context = context)
2931 file=self.split_HELASCALLS(writer,replace_dict,\
2932 'helas_calls_split.inc',file,uvct_helas_calls,\
2933 'uvct_helas_calls','helas_calls_uvct',
2934 required_so_broadcaster = 'UVCT_REQ_SO_DONE',
2935 continue_label = 3000, context=context)
2936 file=self.split_HELASCALLS(writer,replace_dict,\
2937 'helas_calls_split.inc',file,coef_construction,\
2938 'coef_construction','coef_construction',
2939 required_so_broadcaster = 'LOOP_REQ_SO_DONE',
2940 continue_label = 4000, context=context)
2941 file=self.split_HELASCALLS(writer,replace_dict,\
2942 'helas_calls_split.inc',file,loop_CT_calls,\
2943 'loop_CT_calls','loop_CT_calls',
2944 required_so_broadcaster = 'CTCALL_REQ_SO_DONE',
2945 continue_label = 5000, context=context)
2946
2947
2948
2949 matrix_element.rep_dict['loop_CT_calls']=replace_dict['loop_CT_calls']
2950 matrix_element.rep_dict['born_ct_helas_calls']=replace_dict['born_ct_helas_calls']
2951 matrix_element.rep_dict['uvct_helas_calls']=replace_dict['uvct_helas_calls']
2952 matrix_element.rep_dict['coef_construction']=replace_dict['coef_construction']
2953
2954 replace_dict['coef_merging']='\n'.join(coef_merging)
2955
2956 file = file % replace_dict
2957 number_of_calls = len(filter(lambda call: call.find('CALL LOOP') != 0, \
2958 loop_CT_calls))
2959 if writer:
2960
2961 writer.writelines(file,context=context)
2962 return number_of_calls
2963 else:
2964
2965 return number_of_calls, file
2966
2967
2968
2969
2972 """Class to take care of exporting a set of loop matrix elements in the
2973 Fortran format."""
2974
2975 default_opt = {'clean': False, 'complex_mass':False,
2976 'export_format':'madloop_matchbox', 'mp':True,
2977 'loop_dir':'', 'cuttools_dir':'',
2978 'fortran_compiler':'gfortran',
2979 'output_dependencies':'external',
2980 'sa_symmetry':True}
2981
2982
2983
2989
2990
2995
2996 - def get_ME_identifier(self, matrix_element, group_number = None, group_elem_number = None):
2997 """ To not mix notations between borns and virtuals we call it here also MG5 """
2998 return 'MG5_%d_'%matrix_element.get('processes')[0].get('id')
2999
3000
3001
3002
3003
3005 """ A class to specify all the functions common to LoopInducedExporterMEGroup
3006 and LoopInducedExporterMENoGroup (but not relevant for the original
3007 Madevent exporters)"""
3008
3009 madloop_makefile_name = 'makefile_MadLoop'
3010
3011
3013 """ Initialize the process, setting the proc characteristics."""
3014 super(LoopInducedExporterME, self).__init__(*args, **opts)
3015 self.proc_characteristic['loop_induced'] = True
3016
3017 - def get_context(self,*args,**opts):
3018 """ Make sure that the contextual variable MadEventOutput is set to
3019 True for this exporter"""
3020
3021 context = super(LoopInducedExporterME,self).get_context(*args,**opts)
3022 context['MadEventOutput'] = True
3023 return context
3024
3025
3027 """ Returns the list of libraries to be compiling when compiling the
3028 SOURCE directory. It is different for loop_induced processes and
3029 also depends on the value of the 'output_dependencies' option"""
3030
3031 libraries_list = super(LoopInducedExporterME,self).\
3032 get_source_libraries_list()
3033
3034 if self.dependencies=='internal':
3035 libraries_list.append('$(LIBDIR)libcts.$(libext)')
3036 libraries_list.append('$(LIBDIR)libiregi.$(libext)')
3037
3038 return libraries_list
3039
3046
3057
3058
3059
3060
3061
3062 - def finalize(self, matrix_elements, history, mg5options, flaglist):
3063 """Function to finalize v4 directory, for inheritance.
3064 """
3065
3066 self.proc_characteristic['loop_induced'] = True
3067
3068
3069
3070
3071
3072
3073
3074
3075
3076 self.write_global_specs(matrix_elements)
3077
3079 """Write the file 'tir_cache_size.inc' which sets the size of the TIR
3080 cache the the user wishes to employ and the default value for it.
3081 This can have an impact on MadLoop speed when using stability checks
3082 but also impacts in a non-negligible way MadLoop's memory footprint.
3083 It is therefore important that the user can chose its size."""
3084
3085
3086
3087
3088
3089 tir_cach_size = "parameter(TIR_CACHE_SIZE=2)"
3090 writer.writelines(tir_cach_size)
3091
3092 - def write_matrix_element_v4(self, writer, matrix_element, fortran_model,
3093 proc_id = None, config_map = [], subproc_number = None):
3094 """ Write it the wrapper to call the ML5 subroutine in the library."""
3095
3096
3097 if not matrix_element.get('processes') or \
3098 not matrix_element.get('diagrams'):
3099 return 0
3100
3101 if not isinstance(writer, writers.FortranWriter):
3102 raise writers.FortranWriter.FortranWriterError(\
3103 "writer not FortranWriter")
3104
3105 replace_dict = copy.copy(matrix_element.rep_dict)
3106
3107
3108 info_lines = self.get_mg5_info_lines()
3109 replace_dict['info_lines'] = info_lines
3110
3111
3112 process_lines = self.get_process_info_lines(matrix_element)
3113 replace_dict['process_lines'] = process_lines
3114
3115
3116
3117
3118
3119 if proc_id is None:
3120 replace_dict['proc_id'] = ''
3121 else:
3122 replace_dict['proc_id'] = proc_id
3123
3124
3125 replace_dict['hel_avg_factor'] = matrix_element.get_hel_avg_factor()
3126 replace_dict['beamone_helavgfactor'], replace_dict['beamtwo_helavgfactor'] =\
3127 matrix_element.get_beams_hel_avg_factor()
3128
3129
3130 helicity_lines = self.get_helicity_lines(matrix_element)
3131 replace_dict['helicity_lines'] = helicity_lines
3132
3133
3134
3135 ndiags = len(matrix_element.get('diagrams'))
3136 replace_dict['ndiags'] = ndiags
3137
3138
3139 replace_dict['define_iconfigs_lines'] = \
3140 """INTEGER MAPCONFIG(0:LMAXCONFIGS), ICONFIG
3141 COMMON/TO_MCONFIGS/MAPCONFIG, ICONFIG"""
3142
3143 if proc_id:
3144
3145
3146 replace_dict['define_iconfigs_lines'] += \
3147 """\nINTEGER SUBDIAG(MAXSPROC),IB(2)
3148 COMMON/TO_SUB_DIAG/SUBDIAG,IB"""
3149
3150 replace_dict['configID_in_matrix'] = "SUBDIAG(%s)"%proc_id
3151 else:
3152
3153
3154 replace_dict['configID_in_matrix'] = "MAPCONFIG(ICONFIG)"
3155
3156
3157 replace_dict['ml_prefix'] = \
3158 self.get_ME_identifier(matrix_element, subproc_number, proc_id)
3159
3160
3161 ncolor = max(1, len(matrix_element.get('color_basis')))
3162 replace_dict['ncolor'] = ncolor
3163
3164 n_tot_diags = len(matrix_element.get_loop_diagrams())
3165 replace_dict['n_tot_diags'] = n_tot_diags
3166
3167 file = open(pjoin(_file_path, \
3168 'iolibs/template_files/%s' % self.matrix_file)).read()
3169 file = file % replace_dict
3170
3171
3172 writer.writelines(file)
3173
3174 return 0, ncolor
3175
3177 """Make sure the function is implemented in the daughters"""
3178
3179 raise NotImplemented, 'The function get_amp2_lines must be called in '+\
3180 ' the daugthers of LoopInducedExporterME'
3181
3182
3183
3184
3187 """Class to take care of exporting a set of grouped loop induced matrix
3188 elements"""
3189
3190 matrix_file = "matrix_loop_induced_madevent_group.inc"
3191
3197
3204
3216
3230
3233 """Generate the Pn directory for a subprocess group in MadEvent,
3234 including the necessary matrix_N.f files, configs.inc and various
3235 other helper files"""
3236
3237
3238 calls = 0
3239 matrix_elements = subproc_group.get('matrix_elements')
3240 for ime, matrix_element in enumerate(matrix_elements):
3241 self.unique_id +=1
3242 calls += self.generate_loop_subprocess(matrix_element,fortran_model,
3243 group_number = group_number, proc_id = str(ime+1),
3244
3245 config_map = subproc_group.get('diagram_maps')[ime],
3246 unique_id=self.unique_id)
3247
3248
3249 export_v4.ProcessExporterFortranMEGroup.generate_subprocess_directory(
3250 self, subproc_group,fortran_model,group_number)
3251
3252 return calls
3253
3255 """Return the various replacement dictionary inputs necessary for the
3256 multichanneling amp2 definition for the loop-induced MadEvent output.
3257 """
3258
3259 if not config_map:
3260 raise MadGraph5Error, 'A multi-channeling configuration map is '+\
3261 ' necessary for the MadEvent Loop-induced output with grouping.'
3262
3263 nexternal, ninitial = matrix_element.get_nexternal_ninitial()
3264
3265 ret_lines = []
3266
3267
3268
3269 if isinstance(matrix_element, loop_helas_objects.LoopHelasMatrixElement):
3270 diagrams = matrix_element.get_loop_diagrams()
3271 else:
3272 diagrams = matrix_element.get('diagrams')
3273
3274
3275
3276
3277
3278 config_index_map = {}
3279
3280
3281 loop_amp_ID_to_config = {}
3282
3283
3284 config_to_diag_dict = {}
3285 for idiag, diag in enumerate(diagrams):
3286 try:
3287 config_to_diag_dict[config_map[idiag]].append(idiag)
3288 except KeyError:
3289 config_to_diag_dict[config_map[idiag]] = [idiag]
3290
3291 for config in sorted(config_to_diag_dict.keys()):
3292 config_index_map[config] = (config_to_diag_dict[config][0] + 1)
3293
3294
3295
3296 CT_amp_numbers = [a.get('number') for a in \
3297 sum([diagrams[idiag].get_ct_amplitudes() for \
3298 idiag in config_to_diag_dict[config]], [])]
3299
3300 for CT_amp_number in CT_amp_numbers:
3301 loop_amp_ID_to_config[CT_amp_number] = config
3302
3303
3304 loop_amp_numbers = [a.get('amplitudes')[0].get('number')
3305 for a in sum([diagrams[idiag].get_loop_amplitudes() for \
3306 idiag in config_to_diag_dict[config]], [])]
3307
3308 for loop_amp_number in loop_amp_numbers:
3309 loop_amp_ID_to_config[loop_amp_number] = config
3310
3311
3312
3313
3314
3315
3316 n_configs = max(config_index_map.keys())
3317 replace_dict['nmultichannel_configs'] = n_configs
3318
3319
3320
3321 conf_list = [(config_index_map[i] if i in config_index_map else 0) \
3322 for i in range(1,n_configs+1)]
3323
3324
3325 replace_dict['nmultichannels'] = len([_ for _ in conf_list if _!=0])
3326
3327
3328 res_list = []
3329 chunk_size = 6
3330 for k in xrange(0, len(conf_list), chunk_size):
3331 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \
3332 (k + 1, min(k + chunk_size, len(conf_list)),
3333 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]])))
3334
3335 replace_dict['config_index_map_definition'] = '\n'.join(res_list)
3336
3337 res_list = []
3338 n_loop_amps = max(loop_amp_ID_to_config.keys())
3339 amp_list = [loop_amp_ID_to_config[i] for i in \
3340 sorted(loop_amp_ID_to_config.keys()) if i!=0]
3341 chunk_size = 6
3342 for k in xrange(0, len(amp_list), chunk_size):
3343 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \
3344 (k + 1, min(k + chunk_size, len(amp_list)),
3345 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]])))
3346
3347 replace_dict['config_map_definition'] = '\n'.join(res_list)
3348
3349 return
3350
3351
3352
3353
3356 """Class to take care of exporting a set of individual loop induced matrix
3357 elements"""
3358
3359 matrix_file = "matrix_loop_induced_madevent.inc"
3360
3366
3373
3385
3398
3400 """Generate the Pn directory for a subprocess group in MadEvent,
3401 including the necessary matrix_N.f files, configs.inc and various
3402 other helper files"""
3403
3404 self.unique_id += 1
3405
3406 calls = self.generate_loop_subprocess(matrix_element,fortran_model,
3407 group_number = me_number,
3408 unique_id=self.unique_id)
3409
3410
3411
3412 calls += export_v4.ProcessExporterFortranME.generate_subprocess_directory(
3413 self, matrix_element, fortran_model, me_number)
3414 return calls
3415
3417 """Return the amp2(i) = sum(amp for diag(i))^2 lines"""
3418
3419 if config_map:
3420 raise MadGraph5Error, 'A configuration map should not be specified'+\
3421 ' for the Loop induced exporter without grouping.'
3422
3423 nexternal, ninitial = matrix_element.get_nexternal_ninitial()
3424
3425 vert_list = [max(diag.get_vertex_leg_numbers()) for diag in \
3426 matrix_element.get('diagrams') if diag.get_vertex_leg_numbers()!=[]]
3427 minvert = min(vert_list) if vert_list!=[] else 0
3428
3429
3430
3431
3432
3433 config_index_map = {}
3434
3435
3436 loop_amp_ID_to_config = {}
3437
3438 n_configs = 0
3439 for idiag, diag in enumerate(matrix_element.get('diagrams')):
3440
3441 use_for_multichanneling = True
3442 if diag.get_vertex_leg_numbers()!=[] and max(diag.get_vertex_leg_numbers()) > minvert:
3443 use_for_multichanneling = False
3444 curr_config = 0
3445 else:
3446 n_configs += 1
3447 curr_config = n_configs
3448
3449 if not use_for_multichanneling:
3450 if 0 not in config_index_map:
3451 config_index_map[0] = idiag + 1
3452 else:
3453 config_index_map[curr_config] = idiag + 1
3454
3455 CT_amps = [ a.get('number') for a in diag.get_ct_amplitudes()]
3456 for CT_amp in CT_amps:
3457 loop_amp_ID_to_config[CT_amp] = curr_config
3458
3459 Loop_amps = [a.get('amplitudes')[0].get('number')
3460 for a in diag.get_loop_amplitudes()]
3461 for Loop_amp in Loop_amps:
3462 loop_amp_ID_to_config[Loop_amp] = curr_config
3463
3464
3465 n_configs = len([k for k in config_index_map.keys() if k!=0])
3466 replace_dict['nmultichannel_configs'] = n_configs
3467
3468
3469
3470 replace_dict['nmultichannels'] = n_configs
3471
3472 res_list = []
3473 conf_list = [config_index_map[i] for i in sorted(config_index_map.keys())
3474 if i!=0]
3475 chunk_size = 6
3476 for k in xrange(0, len(conf_list), chunk_size):
3477 res_list.append("DATA (config_index_map(i),i=%6r,%6r) /%s/" % \
3478 (k + 1, min(k + chunk_size, len(conf_list)),
3479 ','.join(["%6r" % i for i in conf_list[k:k + chunk_size]])))
3480
3481 replace_dict['config_index_map_definition'] = '\n'.join(res_list)
3482
3483 res_list = []
3484 n_loop_amps = max(loop_amp_ID_to_config.keys())
3485 amp_list = [loop_amp_ID_to_config[i] for i in \
3486 sorted(loop_amp_ID_to_config.keys()) if i!=0]
3487 chunk_size = 6
3488 for k in xrange(0, len(amp_list), chunk_size):
3489 res_list.append("DATA (CONFIG_MAP(i),i=%6r,%6r) /%s/" % \
3490 (k + 1, min(k + chunk_size, len(amp_list)),
3491 ','.join(["%6r" % i for i in amp_list[k:k + chunk_size]])))
3492
3493 replace_dict['config_map_definition'] = '\n'.join(res_list)
3494