1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 import array
23 import copy
24 import fractions
25 import itertools
26 import logging
27 import math
28 import os
29 import sys
30 import re
31 import shutil
32 import random
33 import glob
34 import re
35 import subprocess
36 import time
37 import datetime
38 import errno
39 import pickle
40
41
42
43 import aloha
44 import aloha.aloha_writers as aloha_writers
45 import aloha.create_aloha as create_aloha
46
47 import madgraph.iolibs.export_python as export_python
48 import madgraph.iolibs.helas_call_writers as helas_call_writers
49 import models.import_ufo as import_ufo
50 import madgraph.iolibs.save_load_object as save_load_object
51 import madgraph.iolibs.file_writers as writers
52
53 import madgraph.core.base_objects as base_objects
54 import madgraph.core.color_algebra as color
55 import madgraph.core.color_amp as color_amp
56 import madgraph.core.helas_objects as helas_objects
57 import madgraph.core.diagram_generation as diagram_generation
58
59 import madgraph.various.rambo as rambo
60 import madgraph.various.misc as misc
61 import madgraph.various.progressbar as pbar
62 import madgraph.various.banner as bannermod
63 import madgraph.various.progressbar as pbar
64
65 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
66 import madgraph.loop.loop_helas_objects as loop_helas_objects
67 import madgraph.loop.loop_base_objects as loop_base_objects
68 import models.check_param_card as check_param_card
69
70 from madgraph.interface.madevent_interface import MadLoopInitializer
71 from madgraph.interface.common_run_interface import AskforEditCard
72 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
73
74 from madgraph.iolibs.files import cp
75
76 import StringIO
77 import models.model_reader as model_reader
78 import aloha.template_files.wavefunctions as wavefunctions
79 from aloha.template_files.wavefunctions import \
80 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
81
82 ADDED_GLOBAL = []
83
84 temp_dir_prefix = "TMP_CHECK"
85
86 pjoin = os.path.join
89 for value in list(to_clean):
90 del globals()[value]
91 to_clean.remove(value)
92
97 """ Just an 'option container' to mimick the interface which is passed to the
98 tests. We put in only what is now used from interface by the test:
99 cmd.options['fortran_compiler']
100 cmd.options['complex_mass_scheme']
101 cmd._mgme_dir"""
102 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
103 fortran_compiler = 'gfortran' ):
104 self._mgme_dir = mgme_dir
105 self.options = {}
106 self.options['complex_mass_scheme']=complex_mass_scheme
107 self.options['fortran_compiler']=fortran_compiler
108
109
110
111
112
113 logger = logging.getLogger('madgraph.various.process_checks')
118 """boost the set momenta in the 'boost direction' by the 'beta'
119 factor"""
120
121 boost_p = []
122 gamma = 1/ math.sqrt(1 - beta**2)
123 for imp in p:
124 bosst_p = imp[boost_direction]
125 E, px, py, pz = imp
126 boost_imp = []
127
128 boost_imp.append(gamma * E - gamma * beta * bosst_p)
129
130 if boost_direction == 1:
131 boost_imp.append(-gamma * beta * E + gamma * px)
132 else:
133 boost_imp.append(px)
134
135 if boost_direction == 2:
136 boost_imp.append(-gamma * beta * E + gamma * py)
137 else:
138 boost_imp.append(py)
139
140 if boost_direction == 3:
141 boost_imp.append(-gamma * beta * E + gamma * pz)
142 else:
143 boost_imp.append(pz)
144
145 boost_p.append(boost_imp)
146
147 return boost_p
148
153 """Class taking care of matrix element evaluation, storing
154 relevant quantities for speedup."""
155
156 - def __init__(self, model , param_card = None,
157 auth_skipping = False, reuse = True, cmd = FakeInterface()):
158 """Initialize object with stored_quantities, helas_writer,
159 model, etc.
160 auth_skipping = True means that any identical matrix element will be
161 evaluated only once
162 reuse = True means that the matrix element corresponding to a
163 given process can be reused (turn off if you are using
164 different models for the same process)"""
165
166 self.cmd = cmd
167
168
169 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
170
171
172 self.full_model = model_reader.ModelReader(model)
173 try:
174 self.full_model.set_parameters_and_couplings(param_card)
175 except MadGraph5Error:
176 if isinstance(param_card, (str,file)):
177 raise
178 logger.warning('param_card present in the event file not compatible.'+
179 ' We will use the default one.')
180 self.full_model.set_parameters_and_couplings()
181
182 self.auth_skipping = auth_skipping
183 self.reuse = reuse
184 self.cmass_scheme = cmd.options['complex_mass_scheme']
185 self.store_aloha = []
186 self.stored_quantities = {}
187
188
189
190
191 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
192 gauge_check=False, auth_skipping=None, output='m2',
193 options=None):
194 """Calculate the matrix element and evaluate it for a phase space point
195 output is either m2, amp, jamp
196 """
197
198 if full_model:
199 self.full_model = full_model
200 process = matrix_element.get('processes')[0]
201 model = process.get('model')
202
203 if "matrix_elements" not in self.stored_quantities:
204 self.stored_quantities['matrix_elements'] = []
205 matrix_methods = {}
206
207 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
208
209 matrix = eval("Matrix_%s()" % process.shell_string())
210 me_value = matrix.smatrix(p, self.full_model)
211 if output == "m2":
212 return matrix.smatrix(p, self.full_model), matrix.amp2
213 else:
214 m2 = matrix.smatrix(p, self.full_model)
215 return {'m2': m2, output:getattr(matrix, output)}
216 if (auth_skipping or self.auth_skipping) and matrix_element in \
217 self.stored_quantities['matrix_elements']:
218
219 logger.info("Skipping %s, " % process.nice_string() + \
220 "identical matrix element already tested" \
221 )
222 return None
223
224 self.stored_quantities['matrix_elements'].append(matrix_element)
225
226
227
228
229 if "list_colorize" not in self.stored_quantities:
230 self.stored_quantities["list_colorize"] = []
231 if "list_color_basis" not in self.stored_quantities:
232 self.stored_quantities["list_color_basis"] = []
233 if "list_color_matrices" not in self.stored_quantities:
234 self.stored_quantities["list_color_matrices"] = []
235
236 col_basis = color_amp.ColorBasis()
237 new_amp = matrix_element.get_base_amplitude()
238 matrix_element.set('base_amplitude', new_amp)
239 colorize_obj = col_basis.create_color_dict_list(new_amp)
240
241 try:
242
243
244
245 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
246 except ValueError:
247
248
249 self.stored_quantities['list_colorize'].append(colorize_obj)
250 col_basis.build()
251 self.stored_quantities['list_color_basis'].append(col_basis)
252 col_matrix = color_amp.ColorMatrix(col_basis)
253 self.stored_quantities['list_color_matrices'].append(col_matrix)
254 col_index = -1
255
256
257 matrix_element.set('color_basis',
258 self.stored_quantities['list_color_basis'][col_index])
259 matrix_element.set('color_matrix',
260 self.stored_quantities['list_color_matrices'][col_index])
261
262
263 if "used_lorentz" not in self.stored_quantities:
264 self.stored_quantities["used_lorentz"] = []
265
266 me_used_lorentz = set(matrix_element.get_used_lorentz())
267 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
268 if lorentz not in self.store_aloha]
269
270 aloha_model = create_aloha.AbstractALOHAModel(model.get('name'))
271 aloha_model.add_Lorentz_object(model.get('lorentz'))
272 aloha_model.compute_subset(me_used_lorentz)
273
274
275 aloha_routines = []
276 for routine in aloha_model.values():
277 aloha_routines.append(routine.write(output_dir = None,
278 mode='mg5',
279 language = 'Python'))
280 for routine in aloha_model.external_routines:
281 aloha_routines.append(
282 open(aloha_model.locate_external(routine, 'Python')).read())
283
284
285 previous_globals = list(globals().keys())
286 for routine in aloha_routines:
287 exec(routine, globals())
288 for key in globals().keys():
289 if key not in previous_globals:
290 ADDED_GLOBAL.append(key)
291
292
293 self.store_aloha.extend(me_used_lorentz)
294
295 exporter = export_python.ProcessExporterPython(matrix_element,
296 self.helas_writer)
297 try:
298 matrix_methods = exporter.get_python_matrix_methods(\
299 gauge_check=gauge_check)
300
301 except helas_call_writers.HelasWriterError, error:
302 logger.info(error)
303 return None
304
305
306
307 if self.reuse:
308
309 exec(matrix_methods[process.shell_string()], globals())
310 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
311 else:
312
313 exec(matrix_methods[process.shell_string()])
314
315 if not p:
316 p, w_rambo = self.get_momenta(process, options)
317
318 exec("data = Matrix_%s()" % process.shell_string())
319 if output == "m2":
320 return data.smatrix(p, self.full_model), data.amp2
321 else:
322 m2 = data.smatrix(p,self.full_model)
323 return {'m2': m2, output:getattr(data, output)}
324
325 @staticmethod
327 """ Check whether the specified kinematic point passes isolation cuts
328 """
329
330 def Pt(pmom):
331 """ Computes the pt of a 4-momentum"""
332 return math.sqrt(pmom[1]**2+pmom[2]**2)
333
334 def DeltaR(p1,p2):
335 """ Computes the DeltaR between two 4-momenta"""
336
337 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
338 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
339 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
340 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
341
342 phi1=math.atan2(p1[2],p1[1])
343 phi2=math.atan2(p2[2],p2[1])
344 dphi=abs(phi2-phi1)
345
346 dphi=abs(abs(dphi-math.pi)-math.pi)
347
348 return math.sqrt(dphi**2+(eta2-eta1)**2)
349
350 for i, pmom in enumerate(pmoms[2:]):
351
352 if Pt(pmom)<ptcut:
353 return False
354
355 for pmom2 in pmoms[3+i:]:
356 if DeltaR(pmom,pmom2)<drcut:
357 return False
358 return True
359
360
361
362
363 - def get_momenta(self, process, options=None, special_mass=None):
364 """Get a point in phase space for the external states in the given
365 process, with the CM energy given. The incoming particles are
366 assumed to be oriented along the z axis, with particle 1 along the
367 positive z axis.
368 For the CMS check, one must be able to chose the mass of the special
369 resonance particle with id = -1, and the special_mass option allows
370 to specify it."""
371
372 if not options:
373 energy=1000
374 events=None
375 else:
376 energy = options['energy']
377 events = options['events']
378 to_skip = 0
379
380 if not (isinstance(process, base_objects.Process) and \
381 isinstance(energy, (float,int))):
382 raise rambo.RAMBOError, "Not correct type for arguments to get_momenta"
383
384
385 sorted_legs = sorted(process.get('legs'), lambda l1, l2:\
386 l1.get('number') - l2.get('number'))
387
388
389 if events:
390 ids = [l.get('id') for l in sorted_legs]
391 import MadSpin.decay as madspin
392 if not hasattr(self, 'event_file'):
393 fsock = open(events)
394 self.event_file = madspin.Event(fsock)
395
396 skip = 0
397 while self.event_file.get_next_event() != 'no_event':
398 event = self.event_file.particle
399
400 event_ids = [p['pid'] for p in event.values()]
401 if event_ids == ids:
402 skip += 1
403 if skip > to_skip:
404 break
405 else:
406 raise MadGraph5Error, 'No compatible events for %s' % ids
407 p = []
408 for part in event.values():
409 m = part['momentum']
410 p.append([m.E, m.px, m.py, m.pz])
411 return p, 1
412
413 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
414 nfinal = len(sorted_legs) - nincoming
415
416
417 mass = []
418 for l in sorted_legs:
419 if l.get('id') != 0:
420 mass_string = self.full_model.get_particle(l.get('id')).get('mass')
421 mass.append(self.full_model.get('parameter_dict')[mass_string].real)
422 else:
423 if isinstance(special_mass, float):
424 mass.append(special_mass)
425 else:
426 raise Exception, "A 'special_mass' option must be specified"+\
427 " in get_momenta when a leg with id=-10 is present (for CMS check)"
428
429
430
431
432
433
434
435
436
437 energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
438
439
440
441
442
443
444
445 if nfinal == 1:
446 p = []
447 energy = mass[-1]
448 p.append([energy/2,0,0,energy/2])
449 p.append([energy/2,0,0,-energy/2])
450 p.append([mass[-1],0,0,0])
451 return p, 1.0
452
453 e2 = energy**2
454 m1 = mass[0]
455 p = []
456
457 masses = rambo.FortranList(nfinal)
458 for i in range(nfinal):
459 masses[i+1] = mass[nincoming + i]
460
461 if nincoming == 1:
462
463 p.append([abs(m1), 0., 0., 0.])
464 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
465
466 for i in range(1, nfinal+1):
467 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
468 p_rambo[(2,i)], p_rambo[(3,i)]]
469 p.append(momi)
470
471 return p, w_rambo
472
473 if nincoming != 2:
474 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
475
476 if nfinal == 1:
477 energy = masses[1]
478 if masses[1] == 0.0:
479 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
480 ' state particle massless is invalid')
481
482 e2 = energy**2
483 m2 = mass[1]
484
485 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
486 2*m1**2*m2**2 + m2**4) / (4*e2))
487 e1 = math.sqrt(mom**2+m1**2)
488 e2 = math.sqrt(mom**2+m2**2)
489
490 p.append([e1, 0., 0., mom])
491 p.append([e2, 0., 0., -mom])
492
493 if nfinal == 1:
494 p.append([energy, 0., 0., 0.])
495 return p, 1.
496
497 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
498
499
500 for i in range(1, nfinal+1):
501 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
502 p_rambo[(2,i)], p_rambo[(3,i)]]
503 p.append(momi)
504
505 return p, w_rambo
506
512 """Class taking care of matrix element evaluation for loop processes."""
513
514 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
515 cmd=FakeInterface(),*args,**kwargs):
516 """Allow for initializing the MG5 root where the temporary fortran
517 output for checks is placed."""
518
519 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
520
521 self.mg_root=self.cmd._mgme_dir
522
523 if output_path is None:
524 self.output_path = self.cmd._mgme_dir
525 else:
526 self.output_path = output_path
527
528 self.cuttools_dir=cuttools_dir
529 self.tir_dir=tir_dir
530 self.loop_optimized_output = cmd.options['loop_optimized_output']
531
532
533 self.proliferate=True
534
535
536
537
538 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
539 gauge_check=False, auth_skipping=None, output='m2',
540 PS_name = None, MLOptions={}):
541 """Calculate the matrix element and evaluate it for a phase space point
542 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
543 empty lists at this point.
544 If PS_name is not none the written out PS.input will be saved in
545 the file PS.input_<PS_name> as well."""
546
547 process = matrix_element.get('processes')[0]
548 model = process.get('model')
549
550 if options and 'split_orders' in options.keys():
551 split_orders = options['split_orders']
552 else:
553 split_orders = -1
554
555 if "loop_matrix_elements" not in self.stored_quantities:
556 self.stored_quantities['loop_matrix_elements'] = []
557
558 if (auth_skipping or self.auth_skipping) and matrix_element in \
559 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
560
561 logger.info("Skipping %s, " % process.nice_string() + \
562 "identical matrix element already tested" )
563 return None
564
565
566 if not p:
567 p, w_rambo = self.get_momenta(process, options=options)
568
569 if matrix_element in [el[0] for el in \
570 self.stored_quantities['loop_matrix_elements']]:
571 export_dir=self.stored_quantities['loop_matrix_elements'][\
572 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
573 ].index(matrix_element)][1]
574 logger.debug("Reusing generated output %s"%str(export_dir))
575 else:
576 export_dir=pjoin(self.output_path,temp_dir_prefix)
577 if os.path.isdir(export_dir):
578 if not self.proliferate:
579 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
580 else:
581 id=1
582 while os.path.isdir(pjoin(self.output_path,\
583 '%s_%i'%(temp_dir_prefix,id))):
584 id+=1
585 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
586
587 if self.proliferate:
588 self.stored_quantities['loop_matrix_elements'].append(\
589 (matrix_element,export_dir))
590
591
592
593 import madgraph.loop.loop_exporters as loop_exporters
594 if self.loop_optimized_output:
595 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
596 else:
597 exporter_class=loop_exporters.LoopProcessExporterFortranSA
598
599 MLoptions = {'clean': True,
600 'complex_mass': self.cmass_scheme,
601 'export_format':'madloop',
602 'mp':True,
603 'SubProc_prefix':'P',
604 'compute_color_flows': not process.get('has_born'),
605 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
606 'cuttools_dir': self.cuttools_dir,
607 'fortran_compiler': self.cmd.options['fortran_compiler'],
608 'output_dependencies': self.cmd.options['output_dependencies']}
609
610 MLoptions.update(self.tir_dir)
611
612 FortranExporter = exporter_class(\
613 self.mg_root, export_dir, MLoptions)
614 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
615 FortranExporter.copy_v4template(modelname=model.get('name'))
616 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
617 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
618 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
619 for c in l]))
620 FortranExporter.convert_model_to_mg4(model,wanted_lorentz,wanted_couplings)
621 FortranExporter.finalize_v4_directory(None,"",False,False,'gfortran')
622
623 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
624 split_orders=split_orders)
625
626 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
627 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
628
629 if gauge_check:
630 file_path, orig_file_content, new_file_content = \
631 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
632 ['helas_calls_ampb_1.f','loop_matrix.f'])
633 file = open(file_path,'w')
634 file.write(new_file_content)
635 file.close()
636 if self.loop_optimized_output:
637 mp_file_path, mp_orig_file_content, mp_new_file_content = \
638 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
639 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
640 mp_file = open(mp_file_path,'w')
641 mp_file.write(mp_new_file_content)
642 mp_file.close()
643
644
645 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
646 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
647
648
649 if gauge_check:
650 file = open(file_path,'w')
651 file.write(orig_file_content)
652 file.close()
653 if self.loop_optimized_output:
654 mp_file = open(mp_file_path,'w')
655 mp_file.write(mp_orig_file_content)
656 mp_file.close()
657
658
659 if not self.proliferate:
660 shutil.rmtree(export_dir)
661
662 if output == "m2":
663
664
665 return finite_m2, []
666 else:
667 return {'m2': finite_m2, output:[]}
668
669 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
670 DoubleCheckHelicityFilter=False, MLOptions={}):
671 """ Set parameters in MadLoopParams.dat suited for these checks.MP
672 stands for multiple precision and can either be a bool or an integer
673 to specify the mode."""
674
675
676 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
677 MLCard = bannermod.MadLoopParam(file)
678
679 if isinstance(mp,bool):
680 mode = 4 if mp else 1
681 else:
682 mode = mp
683
684 for key, value in MLOptions.items():
685 if key == "MLReductionLib":
686 if isinstance(value, int):
687 ml_reds = str(value)
688 if isinstance(value,list):
689 if len(value)==0:
690 ml_reds = '1'
691 else:
692 ml_reds="|".join([str(vl) for vl in value])
693 elif isinstance(value, str):
694 ml_reds = value
695 elif isinstance(value, int):
696 ml_reds = str(value)
697 else:
698 raise MadGraph5Error, 'The argument %s '%str(value)+\
699 ' in fix_MadLoopParamCard must be a string, integer'+\
700 ' or a list.'
701 MLCard.set("MLReductionLib",ml_reds)
702 elif key == 'ImprovePS':
703 MLCard.set('ImprovePSPoint',2 if value else -1)
704 elif key == 'ForceMP':
705 mode = 4
706 elif key in MLCard:
707 MLCard.set(key,value)
708 else:
709 raise Exception, 'The MadLoop options %s specified in function'%key+\
710 ' fix_MadLoopParamCard does not correspond to an option defined'+\
711 ' MadLoop nor is it specially handled in this function.'
712 if not mode is None:
713 MLCard.set('CTModeRun',mode)
714 MLCard.set('CTModeInit',mode)
715 MLCard.set('UseLoopFilter',loop_filter)
716 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
717 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
718
719 @classmethod
720 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
721 verbose=True, format='tuple', skip_compilation=False):
722 """Compile and run ./check, then parse the output and return the result
723 for process with id = proc_id and PSpoint if specified.
724 If PS_name is not none the written out PS.input will be saved in
725 the file PS.input_<PS_name> as well"""
726 if verbose:
727 sys.stdout.write('.')
728 sys.stdout.flush()
729
730 shell_name = None
731 directories = glob.glob(pjoin(working_dir, 'SubProcesses',
732 'P%i_*' % proc_id))
733 if directories and os.path.isdir(directories[0]):
734 shell_name = os.path.basename(directories[0])
735
736
737 if not shell_name:
738 logging.info("Directory hasn't been created for process %s" %proc)
739 return ((0.0, 0.0, 0.0, 0.0, 0), [])
740
741 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
742
743 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
744 if not skip_compilation:
745
746 if os.path.isfile(pjoin(dir_name,'check')):
747 os.remove(pjoin(dir_name,'check'))
748 try:
749 os.remove(pjoin(dir_name,'check_sa.o'))
750 os.remove(pjoin(dir_name,'loop_matrix.o'))
751 except OSError:
752 pass
753
754 devnull = open(os.devnull, 'w')
755 retcode = subprocess.call(['make','check'],
756 cwd=dir_name, stdout=devnull, stderr=devnull)
757 devnull.close()
758
759 if retcode != 0:
760 logging.info("Error while executing make in %s" % shell_name)
761 return ((0.0, 0.0, 0.0, 0.0, 0), [])
762
763
764 if PSpoint:
765 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
766
767
768 if not PS_name is None:
769 misc.write_PS_input(pjoin(dir_name, \
770 'PS.input_%s'%PS_name),PSpoint)
771
772 try:
773 output = subprocess.Popen('./check',
774 cwd=dir_name,
775 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
776 output.read()
777 output.close()
778 if os.path.exists(pjoin(dir_name,'result.dat')):
779 return cls.parse_check_output(file(pjoin(dir_name,\
780 'result.dat')),format=format)
781 else:
782 logging.warning("Error while looking for file %s"%str(os.path\
783 .join(dir_name,'result.dat')))
784 return ((0.0, 0.0, 0.0, 0.0, 0), [])
785 except IOError:
786 logging.warning("Error while executing ./check in %s" % shell_name)
787 return ((0.0, 0.0, 0.0, 0.0, 0), [])
788
789 @classmethod
791 """Parse the output string and return a pair where first four values are
792 the finite, born, single and double pole of the ME and the fourth is the
793 GeV exponent and the second value is a list of 4 momenta for all particles
794 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
795
796 res_dict = {'res_p':[],
797 'born':0.0,
798 'finite':0.0,
799 '1eps':0.0,
800 '2eps':0.0,
801 'gev_pow':0,
802 'export_format':'Default',
803 'accuracy':0.0,
804 'return_code':0,
805 'Split_Orders_Names':[],
806 'Loop_SO_Results':[],
807 'Born_SO_Results':[],
808 'Born_kept':[],
809 'Loop_kept':[]
810 }
811 res_p = []
812
813
814
815 if isinstance(output,file) or isinstance(output,list):
816 text=output
817 elif isinstance(output,str):
818 text=output.split('\n')
819 else:
820 raise MadGraph5Error, 'Type for argument output not supported in'+\
821 ' parse_check_output.'
822 for line in text:
823 splitline=line.split()
824 if len(splitline)==0:
825 continue
826 elif splitline[0]=='PS':
827 res_p.append([float(s) for s in splitline[1:]])
828 elif splitline[0]=='ASO2PI':
829 res_dict['alphaS_over_2pi']=float(splitline[1])
830 elif splitline[0]=='BORN':
831 res_dict['born']=float(splitline[1])
832 elif splitline[0]=='FIN':
833 res_dict['finite']=float(splitline[1])
834 elif splitline[0]=='1EPS':
835 res_dict['1eps']=float(splitline[1])
836 elif splitline[0]=='2EPS':
837 res_dict['2eps']=float(splitline[1])
838 elif splitline[0]=='EXP':
839 res_dict['gev_pow']=int(splitline[1])
840 elif splitline[0]=='Export_Format':
841 res_dict['export_format']=splitline[1]
842 elif splitline[0]=='ACC':
843 res_dict['accuracy']=float(splitline[1])
844 elif splitline[0]=='RETCODE':
845 res_dict['return_code']=int(splitline[1])
846 elif splitline[0]=='Split_Orders_Names':
847 res_dict['Split_Orders_Names']=splitline[1:]
848 elif splitline[0] in ['Born_kept', 'Loop_kept']:
849 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
850 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
851
852
853
854
855 res_dict[splitline[0]].append(\
856 ([int(el) for el in splitline[1:]],{}))
857 elif splitline[0]=='SO_Loop':
858 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
859 float(splitline[2])
860 elif splitline[0]=='SO_Born':
861 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
862 float(splitline[2])
863
864 res_dict['res_p'] = res_p
865
866 if format=='tuple':
867 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
868 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
869 else:
870 return res_dict
871
872 @staticmethod
874 """ Changes the file model_functions.f in the SOURCE of the process output
875 so as to change how logarithms are analytically continued and see how
876 it impacts the CMS check."""
877 valid_modes = ['default','recompile']
878 if not (mode in valid_modes or (isinstance(mode, list) and
879 len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
880 raise MadGraph5Error("Mode '%s' not reckonized"%mode+
881 " in function apply_log_tweak.")
882
883 model_path = pjoin(proc_path,'Source','MODEL')
884 directories = glob.glob(pjoin(proc_path,'SubProcesses','P0_*'))
885 if directories and os.path.isdir(directories[0]):
886 exe_path = directories[0]
887 else:
888 raise MadGraph5Error, 'Could not find a process executable '+\
889 'directory in %s'%proc_dir
890 bu_path = pjoin(model_path, 'model_functions.f__backUp__')
891
892 if mode=='default':
893
894 if not os.path.isfile(bu_path):
895 raise MadGraph5Error, 'Back up file %s could not be found.'%bu_path
896 shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
897 return
898
899 if mode=='recompile':
900 try:
901 os.remove(pjoin(model_path,'model_functions.o'))
902 os.remove(pjoin(proc_path,'lib','libmodel.a'))
903 except:
904 pass
905 misc.compile(cwd=model_path)
906
907 try:
908 os.remove(pjoin(exe_path,'check'))
909 except:
910 pass
911 misc.compile(arg=['check'], cwd=exe_path)
912 return
913
914 if mode[0]==mode[1]:
915 return
916
917
918 mp_prefix = 'MP_'
919 target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
920
921
922 if not os.path.isfile(bu_path):
923 shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
924 model_functions = open(pjoin(model_path,'model_functions.f'),'r')
925
926 new_model_functions = []
927 has_replaced = False
928 just_replaced = False
929 find_one_replacement= False
930 mp_mode = None
931 suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
932 replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
933 for line in model_functions:
934
935 if just_replaced:
936 if not re.match(r'\s{6}', line):
937 continue
938 else:
939 just_replaced = False
940 if mp_mode is None:
941
942 new_model_functions.append(line)
943 if (target_line%mp_prefix).lower() in line.lower():
944 mp_mode = mp_prefix
945 elif (target_line%'').lower() in line.lower():
946 mp_mode = ''
947 else:
948
949 if not has_replaced and re.match(replace_regex%mp_mode,line,
950 re.IGNORECASE):
951
952 if mode[0]=='log':
953 if mp_mode=='':
954 new_line =\
955 """ if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
956 reg%s=log(arg) %s TWOPII
957 else
958 reg%s=log(arg)
959 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
960 else:
961 new_line =\
962 """ if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
963 mp_reg%s=log(arg) %s TWOPII
964 else
965 mp_reg%s=log(arg)
966 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
967 else:
968 new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
969 ('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
970 new_model_functions.append(new_line)
971 just_replaced = True
972 has_replaced = True
973 find_one_replacement = True
974 else:
975 new_model_functions.append(line)
976 if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
977 mp_mode = None
978 has_replaced = False
979
980 if not find_one_replacement:
981 logger.warning('No replacement was found/performed for token '+
982 "'%s->%s'."%(mode[0],mode[1]))
983 else:
984 open(pjoin(model_path,'model_functions.f'),'w').\
985 write(''.join(new_model_functions))
986 return
987
989 """ Modify loop_matrix.f so to have one external massless gauge boson
990 polarization vector turned into its momentum. It is not a pretty and
991 flexible solution but it works for this particular case."""
992
993 shell_name = None
994 directories = glob.glob(pjoin(working_dir,'P0_*'))
995 if directories and os.path.isdir(directories[0]):
996 shell_name = os.path.basename(directories[0])
997
998 dir_name = pjoin(working_dir, shell_name)
999
1000
1001 ind=0
1002 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
1003 file_names[ind])):
1004 ind += 1
1005 if ind==len(file_names):
1006 raise Exception, "No helas calls output file found."
1007
1008 helas_file_name=pjoin(dir_name,file_names[ind])
1009 file = open(pjoin(dir_name,helas_file_name), 'r')
1010
1011 helas_calls_out=""
1012 original_file=""
1013 gaugeVectorRegExp=re.compile(\
1014 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
1015 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
1016 foundGauge=False
1017
1018 for line in file:
1019 helas_calls_out+=line
1020 original_file+=line
1021 if line.find("INCLUDE 'coupl.inc'") != -1 or \
1022 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
1023 helas_calls_out+=" INTEGER WARDINT\n"
1024 if not foundGauge:
1025 res=gaugeVectorRegExp.search(line)
1026 if res!=None:
1027 foundGauge=True
1028 helas_calls_out+=" DO WARDINT=1,4\n"
1029 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
1030 if not mp:
1031 helas_calls_out+=\
1032 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
1033 else:
1034 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1035 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1036 helas_calls_out+=" ENDDO\n"
1037 file.close()
1038
1039 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1040
1045 """Class taking care of matrix element evaluation and running timing for
1046 loop processes."""
1047
1051
1052 @classmethod
1054 """ Return a dictionary of the parameter of the MadLoopParamCard.
1055 The key is the name of the parameter and the value is the corresponding
1056 string read from the card."""
1057
1058 return bannermod.MadLoopParam(MLCardPath)
1059
1060
1061 @classmethod
1063 """ Set the parameters in MadLoopParamCard to the values specified in
1064 the dictionary params.
1065 The key is the name of the parameter and the value is the corresponding
1066 string to write in the card."""
1067
1068 MLcard = bannermod.MadLoopParam(MLCardPath)
1069 for key,value in params.items():
1070 MLcard.set(key, value, ifnotdefault=False)
1071 MLcard.write(MLCardPath, commentdefault=True)
1072
1074 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1075 Notice this only affects the double precision evaluation which is
1076 normally fine as we do not make the timing check on mp."""
1077
1078 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1079 loop_matrix = file.read()
1080 file.close()
1081
1082 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1083 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1084 if skip else '.FALSE.'), loop_matrix)
1085 file.write(loop_matrix)
1086 file.close()
1087
1089 """ Edit loop_matrix.f in order to set the flag which stops the
1090 execution after booting the program (i.e. reading the color data)."""
1091
1092 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1093 loop_matrix = file.read()
1094 file.close()
1095
1096 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1097 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1098 if bootandstop else '.FALSE.'), loop_matrix)
1099 file.write(loop_matrix)
1100 file.close()
1101
1102 - def setup_process(self, matrix_element, export_dir, reusing = False,
1103 param_card = None, MLOptions={},clean=True):
1104 """ Output the matrix_element in argument and perform the initialization
1105 while providing some details about the output in the dictionary returned.
1106 Returns None if anything fails"""
1107
1108 infos={'Process_output': None,
1109 'HELAS_MODEL_compilation' : None,
1110 'dir_path' : None,
1111 'Initialization' : None,
1112 'Process_compilation' : None}
1113
1114 if not reusing and clean:
1115 if os.path.isdir(export_dir):
1116 clean_up(self.output_path)
1117 if os.path.isdir(export_dir):
1118 raise InvalidCmd(\
1119 "The directory %s already exist. Please remove it."\
1120 %str(export_dir))
1121 else:
1122 if not os.path.isdir(export_dir):
1123 raise InvalidCmd(\
1124 "Could not find the directory %s to reuse."%str(export_dir))
1125
1126
1127 if not reusing and clean:
1128 model = matrix_element['processes'][0].get('model')
1129
1130
1131 import madgraph.loop.loop_exporters as loop_exporters
1132 if self.loop_optimized_output:
1133 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1134 else:
1135 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1136
1137 MLoptions = {'clean': True,
1138 'complex_mass': self.cmass_scheme,
1139 'export_format':'madloop',
1140 'mp':True,
1141 'SubProc_prefix':'P',
1142 'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
1143 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1144 'cuttools_dir': self.cuttools_dir,
1145 'fortran_compiler':self.cmd.options['fortran_compiler'],
1146 'output_dependencies':self.cmd.options['output_dependencies']}
1147
1148 MLoptions.update(self.tir_dir)
1149
1150 start=time.time()
1151 FortranExporter = exporter_class(self.mg_root, export_dir, MLoptions)
1152 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1153 FortranExporter.copy_v4template(modelname=model.get('name'))
1154 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
1155 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1156 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1157 for c in l]))
1158 FortranExporter.convert_model_to_mg4(self.full_model,wanted_lorentz,wanted_couplings)
1159 infos['Process_output'] = time.time()-start
1160 start=time.time()
1161 FortranExporter.finalize_v4_directory(None,"",False,False,'gfortran')
1162 infos['HELAS_MODEL_compilation'] = time.time()-start
1163
1164
1165 if param_card != None:
1166 if isinstance(param_card, str):
1167 cp(pjoin(param_card),\
1168 pjoin(export_dir,'Cards','param_card.dat'))
1169 else:
1170 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1171
1172
1173
1174 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1175 read_ps = False, npoints = 4)
1176 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1177 mp = False, loop_filter = True,MLOptions=MLOptions)
1178
1179 shell_name = None
1180 directories = glob.glob(pjoin(export_dir, 'SubProcesses','P0_*'))
1181 if directories and os.path.isdir(directories[0]):
1182 shell_name = os.path.basename(directories[0])
1183 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1184 infos['dir_path']=dir_name
1185
1186 attempts = [3,15]
1187
1188 try:
1189 os.remove(pjoin(dir_name,'check'))
1190 os.remove(pjoin(dir_name,'check_sa.o'))
1191 except OSError:
1192 pass
1193
1194 nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
1195 pjoin(export_dir,'SubProcesses'),infos,\
1196 req_files = ['HelFilter.dat','LoopFilter.dat'],
1197 attempts = attempts)
1198 if attempts is None:
1199 logger.error("Could not compile the process %s,"%shell_name+\
1200 " try to generate it via the 'generate' command.")
1201 return None
1202 if nPS_necessary is None:
1203 logger.error("Could not initialize the process %s"%shell_name+\
1204 " with %s PS points."%max(attempts))
1205 return None
1206 elif nPS_necessary > min(attempts):
1207 logger.warning("Could not initialize the process %s"%shell_name+\
1208 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1209
1210 return infos
1211
1212 - def time_matrix_element(self, matrix_element, reusing = False,
1213 param_card = None, keep_folder = False, options=None,
1214 MLOptions = {}):
1215 """ Output the matrix_element in argument and give detail information
1216 about the timing for its output and running"""
1217
1218 if options and 'split_orders' in options.keys():
1219 split_orders = options['split_orders']
1220 else:
1221 split_orders = -1
1222
1223 assert ((not reusing and isinstance(matrix_element, \
1224 helas_objects.HelasMatrixElement)) or (reusing and
1225 isinstance(matrix_element, base_objects.Process)))
1226 if not reusing:
1227 proc_name = matrix_element['processes'][0].shell_string()[2:]
1228 else:
1229 proc_name = matrix_element.shell_string()[2:]
1230
1231 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1232 temp_dir_prefix+"_%s"%proc_name)
1233
1234 res_timings = self.setup_process(matrix_element,export_dir, \
1235 reusing, param_card,MLOptions = MLOptions)
1236
1237 if res_timings == None:
1238 return None
1239 dir_name=res_timings['dir_path']
1240
1241 def check_disk_usage(path):
1242 return subprocess.Popen("du -shc -L "+str(path), \
1243 stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
1244
1245
1246
1247
1248 res_timings['du_source']=check_disk_usage(pjoin(\
1249 export_dir,'Source','*','*.f'))
1250 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1251 res_timings['du_color']=check_disk_usage(pjoin(dir_name,'*.dat'))
1252 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1253
1254 if not res_timings['Initialization']==None:
1255 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1256 else:
1257
1258
1259 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1260 read_ps = False, npoints = 3, hel_config = -1,
1261 split_orders=split_orders)
1262 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1263 time_per_ps_estimate = run_time/3.0
1264
1265 self.boot_time_setup(dir_name,bootandstop=True)
1266 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1267 res_timings['Booting_time'] = run_time
1268 self.boot_time_setup(dir_name,bootandstop=False)
1269
1270
1271 contributing_hel=0
1272 n_contrib_hel=0
1273 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1274 proc_prefix = proc_prefix_file.read()
1275 proc_prefix_file.close()
1276 helicities = file(pjoin(dir_name,'MadLoop5_resources',
1277 '%sHelFilter.dat'%proc_prefix)).read().split()
1278 for i, hel in enumerate(helicities):
1279 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1280 if contributing_hel==0:
1281 contributing_hel=i+1
1282 n_contrib_hel += 1
1283
1284 if contributing_hel==0:
1285 logger.error("Could not find a contributing helicity "+\
1286 "configuration for process %s."%proc_name)
1287 return None
1288
1289 res_timings['n_contrib_hel']=n_contrib_hel
1290 res_timings['n_tot_hel']=len(helicities)
1291
1292
1293 target_pspoints_number = max(int(15.0/time_per_ps_estimate)+1,5)
1294
1295 logger.info("Checking timing for process %s "%proc_name+\
1296 "with %d PS points."%target_pspoints_number)
1297
1298 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1299 read_ps = False, npoints = target_pspoints_number*2, \
1300 hel_config = contributing_hel, split_orders=split_orders)
1301 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1302 if compile_time == None: return None
1303 res_timings['run_polarized_total']=\
1304 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1305
1306 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1307 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1308 split_orders=split_orders)
1309 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
1310 checkRam=True)
1311 if compile_time == None: return None
1312 res_timings['run_unpolarized_total']=\
1313 (run_time-res_timings['Booting_time'])/target_pspoints_number
1314 res_timings['ram_usage'] = ram_usage
1315
1316 if not self.loop_optimized_output:
1317 return res_timings
1318
1319
1320
1321
1322
1323 self.skip_loop_evaluation_setup(dir_name,skip=True)
1324
1325 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1326 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1327 split_orders=split_orders)
1328 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1329 if compile_time == None: return None
1330 res_timings['run_unpolarized_coefs']=\
1331 (run_time-res_timings['Booting_time'])/target_pspoints_number
1332
1333 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1334 read_ps = False, npoints = target_pspoints_number*2, \
1335 hel_config = contributing_hel, split_orders=split_orders)
1336 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1337 if compile_time == None: return None
1338 res_timings['run_polarized_coefs']=\
1339 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1340
1341
1342 self.skip_loop_evaluation_setup(dir_name,skip=False)
1343
1344 return res_timings
1345
1346
1347
1348
1349
1350 - def check_matrix_element_stability(self, matrix_element,options=None,
1351 infos_IN = None, param_card = None, keep_folder = False,
1352 MLOptions = {}):
1353 """ Output the matrix_element in argument, run in for nPoints and return
1354 a dictionary containing the stability information on each of these points.
1355 If infos are provided, then the matrix element output is skipped and
1356 reused from a previous run and the content of infos.
1357 """
1358
1359 if not options:
1360 reusing = False
1361 nPoints = 100
1362 split_orders = -1
1363 else:
1364 reusing = options['reuse']
1365 nPoints = options['npoints']
1366 split_orders = options['split_orders']
1367
1368 assert ((not reusing and isinstance(matrix_element, \
1369 helas_objects.HelasMatrixElement)) or (reusing and
1370 isinstance(matrix_element, base_objects.Process)))
1371
1372
1373 def format_PS_point(ps, rotation=0):
1374 """ Write out the specified PS point to the file dir_path/PS.input
1375 while rotating it if rotation!=0. We consider only rotations of 90
1376 but one could think of having rotation of arbitrary angle too.
1377 The first two possibilities, 1 and 2 are a rotation and boost
1378 along the z-axis so that improve_ps can still work.
1379 rotation=0 => No rotation
1380 rotation=1 => Z-axis pi/2 rotation
1381 rotation=2 => Z-axis pi/4 rotation
1382 rotation=3 => Z-axis boost
1383 rotation=4 => (x'=z,y'=-x,z'=-y)
1384 rotation=5 => (x'=-z,y'=y,z'=x)"""
1385 if rotation==0:
1386 p_out=copy.copy(ps)
1387 elif rotation==1:
1388 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1389 elif rotation==2:
1390 sq2 = math.sqrt(2.0)
1391 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1392 elif rotation==3:
1393 p_out = boost_momenta(ps, 3)
1394
1395
1396 elif rotation==4:
1397 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1398 elif rotation==5:
1399 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1400 else:
1401 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1402
1403 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1404
1405 def pick_PS_point(proc, options):
1406 """ Randomly generate a PS point and make sure it is eligible. Then
1407 return it. Users can edit the cuts here if they want."""
1408
1409 p, w_rambo = self.get_momenta(proc, options)
1410 if options['events']:
1411 return p
1412
1413 while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
1414 p, w_rambo = self.get_momenta(proc, options)
1415
1416
1417
1418
1419 if len(p)==3:
1420 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1421 return p
1422
1423
1424
1425
1426 accuracy_threshold=1.0e-1
1427
1428
1429
1430 num_rotations = 1
1431
1432 if "MLReductionLib" not in MLOptions:
1433 tools=[1]
1434 else:
1435 tools=MLOptions["MLReductionLib"]
1436 tools=list(set(tools))
1437
1438 tool_var={'pjfry':2,'golem':4}
1439 for tool in ['pjfry','golem']:
1440 tool_dir='%s_dir'%tool
1441 if not tool_dir in self.tir_dir:
1442 continue
1443 tool_libpath=self.tir_dir[tool_dir]
1444 tool_libname="lib%s.a"%tool
1445 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1446 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1447 if tool_var[tool] in tools:
1448 tools.remove(tool_var[tool])
1449 if not tools:
1450 return None
1451
1452 if not reusing:
1453 process = matrix_element['processes'][0]
1454 else:
1455 process = matrix_element
1456 proc_name = process.shell_string()[2:]
1457 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1458 temp_dir_prefix+"_%s"%proc_name)
1459
1460 tools_name={1:'CutTools',2:'PJFry++',3:'IREGI',4:'Golem95'}
1461 return_dict={}
1462 return_dict['Stability']={}
1463 infos_save={'Process_output': None,
1464 'HELAS_MODEL_compilation' : None,
1465 'dir_path' : None,
1466 'Initialization' : None,
1467 'Process_compilation' : None}
1468
1469 for tool in tools:
1470 tool_name=tools_name[tool]
1471
1472
1473
1474
1475
1476 DP_stability = []
1477 QP_stability = []
1478
1479 Unstable_PS_points = []
1480
1481 Exceptional_PS_points = []
1482
1483 MLoptions={}
1484 MLoptions["MLReductionLib"]=tool
1485 clean=(tool==tools[0])
1486 if infos_IN==None or (tool_name not in infos_IN):
1487 infos=infos_IN
1488 else:
1489 infos=infos_IN[tool_name]
1490
1491 if not infos:
1492 infos = self.setup_process(matrix_element,export_dir, \
1493 reusing, param_card,MLoptions,clean)
1494 if not infos:
1495 return None
1496
1497 if clean:
1498 infos_save['Process_output']=infos['Process_output']
1499 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1500 infos_save['dir_path']=infos['dir_path']
1501 infos_save['Process_compilation']=infos['Process_compilation']
1502 else:
1503 if not infos['Process_output']:
1504 infos['Process_output']=infos_save['Process_output']
1505 if not infos['HELAS_MODEL_compilation']:
1506 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1507 if not infos['dir_path']:
1508 infos['dir_path']=infos_save['dir_path']
1509 if not infos['Process_compilation']:
1510 infos['Process_compilation']=infos_save['Process_compilation']
1511
1512 dir_path=infos['dir_path']
1513
1514
1515 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1516 data_i = 0
1517
1518 if reusing:
1519
1520 data_i=0
1521 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1522 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1523 saved_run = save_load_object.load_from_file(pickle_path)
1524 if data_i>0:
1525 logger.info("Loading additional data stored in %s."%
1526 str(pickle_path))
1527 logger.info("Loaded data moved to %s."%str(pjoin(
1528 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1529 shutil.move(pickle_path,
1530 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1531 DP_stability.extend(saved_run['DP_stability'])
1532 QP_stability.extend(saved_run['QP_stability'])
1533 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1534 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1535 data_i += 1
1536
1537 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1538 'QP_stability':QP_stability,
1539 'Unstable_PS_points':Unstable_PS_points,
1540 'Exceptional_PS_points':Exceptional_PS_points}
1541
1542 if nPoints==0:
1543 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1544
1545 if data_i>1:
1546 save_load_object.save_to_file(pjoin(dir_path,
1547 savefile%'_0'),return_dict['Stability'][tool_name])
1548 continue
1549 else:
1550 logger.info("ERROR: Not reusing a directory and the number"+\
1551 " of point for the check is zero.")
1552 return None
1553
1554 logger.info("Checking stability of process %s "%proc_name+\
1555 "with %d PS points by %s."%(nPoints,tool_name))
1556 if infos['Initialization'] != None:
1557 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1558 sec_needed = int(time_per_ps_estimate*nPoints*4)
1559 else:
1560 sec_needed = 0
1561
1562 progress_bar = None
1563 time_info = False
1564 if sec_needed>5:
1565 time_info = True
1566 logger.info("This check should take about "+\
1567 "%s to run. Started on %s."%(\
1568 str(datetime.timedelta(seconds=sec_needed)),\
1569 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1570 if logger.getEffectiveLevel()<logging.WARNING and \
1571 (sec_needed>5 or (reusing and infos['Initialization'] == None)):
1572 widgets = ['Stability check:', pbar.Percentage(), ' ',
1573 pbar.Bar(),' ', pbar.ETA(), ' ']
1574 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1575 fd=sys.stdout)
1576 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1577 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1578
1579
1580
1581 try:
1582 os.remove(pjoin(dir_path,'check'))
1583 os.remove(pjoin(dir_path,'check_sa.o'))
1584 except OSError:
1585 pass
1586
1587 devnull = open(os.devnull, 'w')
1588 retcode = subprocess.call(['make','check'],
1589 cwd=dir_path, stdout=devnull, stderr=devnull)
1590 devnull.close()
1591 if retcode != 0:
1592 logging.info("Error while executing make in %s" % dir_path)
1593 return None
1594
1595
1596
1597
1598 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1599
1600
1601 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1602 checkerName = 'StabilityCheckDriver.f'
1603 else:
1604 checkerName = 'StabilityCheckDriver_loop_induced.f'
1605
1606 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1607 checkerName),'r') as checkerFile:
1608 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1609 checkerToWrite = checkerFile.read()%{'proc_prefix':
1610 proc_prefix.read()}
1611 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1612 checkerFile.write(checkerToWrite)
1613 checkerFile.close()
1614
1615
1616
1617
1618
1619 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1620 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1621 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1622 os.remove(pjoin(dir_path,'loop_matrix.o'))
1623 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1624 mode='fortran', job_specs = False)
1625
1626
1627
1628
1629 if len(process['legs'])==3:
1630 self.fix_MadLoopParamCard(dir_path, mp=False,
1631 loop_filter=False, DoubleCheckHelicityFilter=True)
1632
1633 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1634 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1635 cwd=dir_path)
1636 start_index = len(DP_stability)
1637 if progress_bar!=None:
1638 progress_bar.start()
1639
1640
1641 interrupted = False
1642
1643
1644 retry = 0
1645
1646 i=start_index
1647 if options and 'events' in options and options['events']:
1648
1649 import MadSpin.decay as madspin
1650 fsock = open(options['events'])
1651 self.event_file = madspin.Event(fsock)
1652 while i<(start_index+nPoints):
1653
1654 qp_dict={}
1655 dp_dict={}
1656 UPS = None
1657 EPS = None
1658
1659 if retry==0:
1660 p = pick_PS_point(process, options)
1661
1662 try:
1663 if progress_bar!=None:
1664 progress_bar.update(i+1-start_index)
1665
1666 PSPoint = format_PS_point(p,0)
1667 dp_res=[]
1668 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1669 split_orders=split_orders))
1670 dp_dict['CTModeA']=dp_res[-1]
1671 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1672 split_orders=split_orders))
1673 dp_dict['CTModeB']=dp_res[-1]
1674 for rotation in range(1,num_rotations+1):
1675 PSPoint = format_PS_point(p,rotation)
1676 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1677 split_orders=split_orders))
1678 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1679
1680 if any([not res for res in dp_res]):
1681 return None
1682 dp_accuracy =((max(dp_res)-min(dp_res))/
1683 abs(sum(dp_res)/len(dp_res)))
1684 dp_dict['Accuracy'] = dp_accuracy
1685 if dp_accuracy>accuracy_threshold:
1686 if tool==1:
1687
1688 UPS = [i,p]
1689 qp_res=[]
1690 PSPoint = format_PS_point(p,0)
1691 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1692 split_orders=split_orders))
1693 qp_dict['CTModeA']=qp_res[-1]
1694 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1695 split_orders=split_orders))
1696 qp_dict['CTModeB']=qp_res[-1]
1697 for rotation in range(1,num_rotations+1):
1698 PSPoint = format_PS_point(p,rotation)
1699 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1700 split_orders=split_orders))
1701 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1702
1703 if any([not res for res in qp_res]):
1704 return None
1705
1706 qp_accuracy = ((max(qp_res)-min(qp_res))/
1707 abs(sum(qp_res)/len(qp_res)))
1708 qp_dict['Accuracy']=qp_accuracy
1709 if qp_accuracy>accuracy_threshold:
1710 EPS = [i,p]
1711 else:
1712
1713
1714 UPS = [i,p]
1715
1716 except KeyboardInterrupt:
1717 interrupted = True
1718 break
1719 except IOError, e:
1720 if e.errno == errno.EINTR:
1721 if retry==100:
1722 logger.error("Failed hundred times consecutively because"+
1723 " of system call interruptions.")
1724 raise
1725 else:
1726 logger.debug("Recovered from a system call interruption."+\
1727 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1728
1729 time.sleep(0.5)
1730
1731 retry = retry+1
1732
1733 try:
1734 StabChecker.kill()
1735 except Exception:
1736 pass
1737 StabChecker = subprocess.Popen(\
1738 [pjoin(dir_path,'StabilityCheckDriver')],
1739 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1740 stderr=subprocess.PIPE, cwd=dir_path)
1741 continue
1742 else:
1743 raise
1744
1745
1746
1747 retry = 0
1748
1749 i=i+1
1750
1751
1752 DP_stability.append(dp_dict)
1753 QP_stability.append(qp_dict)
1754 if not EPS is None:
1755 Exceptional_PS_points.append(EPS)
1756 if not UPS is None:
1757 Unstable_PS_points.append(UPS)
1758
1759 if progress_bar!=None:
1760 progress_bar.finish()
1761 if time_info:
1762 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1763 "%d-%m-%Y %H:%M"))
1764
1765
1766 if not interrupted:
1767 StabChecker.stdin.write('y\n')
1768 else:
1769 StabChecker.kill()
1770
1771
1772
1773
1774
1775
1776
1777 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1778 return_dict['Stability'][tool_name])
1779
1780 if interrupted:
1781 break
1782
1783 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1784 reusing else matrix_element
1785 return return_dict
1786
1787 @classmethod
1788 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1789 split_orders=-1):
1790 """ This version of get_me_value is simplified for the purpose of this
1791 class. No compilation is necessary. The CT mode can be specified."""
1792
1793
1794 StabChecker.stdin.write('\x1a')
1795 StabChecker.stdin.write('1\n')
1796 StabChecker.stdin.write('%d\n'%mode)
1797 StabChecker.stdin.write('%s\n'%PSpoint)
1798 StabChecker.stdin.write('%.16E\n'%mu_r)
1799 StabChecker.stdin.write('%d\n'%hel)
1800 StabChecker.stdin.write('%d\n'%split_orders)
1801
1802 try:
1803 while True:
1804 output = StabChecker.stdout.readline()
1805 if output==' ##TAG#RESULT_START#TAG##\n':
1806 break
1807 res = ""
1808 while True:
1809 output = StabChecker.stdout.readline()
1810 if output==' ##TAG#RESULT_STOP#TAG##\n':
1811 break
1812 else:
1813 res += output
1814 return cls.parse_check_output(res,format='tuple')[0][0]
1815 except IOError as e:
1816 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1817 raise e
1818
1821 """ Perform a python evaluation of the matrix element independently for
1822 all possible helicity configurations for a fixed number of points N and
1823 returns the average for each in the format [[hel_config, eval],...].
1824 This is used to determine what are the vanishing and dependent helicity
1825 configurations at generation time and accordingly setup the output.
1826 This is not yet implemented at LO."""
1827
1828
1829 assert isinstance(process,base_objects.Process)
1830 assert process.get('perturbation_couplings')==[]
1831
1832 N_eval=50
1833
1834 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
1835 auth_skipping = False, reuse = True)
1836
1837 amplitude = diagram_generation.Amplitude(process)
1838 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
1839
1840 cumulative_helEvals = []
1841
1842 for i in range(N_eval):
1843 p, w_rambo = evaluator.get_momenta(process)
1844 helEvals = evaluator.evaluate_matrix_element(\
1845 matrix_element, p = p, output = 'helEvals')['helEvals']
1846 if cumulative_helEvals==[]:
1847 cumulative_helEvals=copy.copy(helEvals)
1848 else:
1849 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
1850 enumerate(cumulative_helEvals)]
1851
1852
1853 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
1854
1855
1856
1857 clean_added_globals(ADDED_GLOBAL)
1858
1859 return cumulative_helEvals
1860
1863 """A wrapper function for running an iteration of a function over
1864 a multiprocess, without having to first create a process list
1865 (which makes a big difference for very large multiprocesses.
1866 stored_quantities is a dictionary for any quantities that we want
1867 to reuse between runs."""
1868
1869 model = multiprocess.get('model')
1870 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
1871 if not leg.get('state')]
1872 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
1873 if leg.get('state')]
1874
1875 id_anti_id_dict = {}
1876 for id in set(tuple(sum(isids+fsids, []))):
1877 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
1878 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
1879 sorted_ids = []
1880 results = []
1881 for is_prod in apply(itertools.product, isids):
1882 for fs_prod in apply(itertools.product, fsids):
1883
1884
1885 if check_already_checked(is_prod, fs_prod, sorted_ids,
1886 multiprocess, model, id_anti_id_dict):
1887 continue
1888
1889 process = multiprocess.get_process_with_legs(base_objects.LegList(\
1890 [base_objects.Leg({'id': id, 'state':False}) for \
1891 id in is_prod] + \
1892 [base_objects.Leg({'id': id, 'state':True}) for \
1893 id in fs_prod]))
1894
1895 if opt is not None:
1896 if isinstance(opt, dict):
1897 try:
1898 value = opt[process.base_string()]
1899 except Exception:
1900 continue
1901 result = function(process, stored_quantities, value, options=options)
1902 else:
1903 result = function(process, stored_quantities, opt, options=options)
1904 else:
1905 result = function(process, stored_quantities, options=options)
1906
1907 if result:
1908 results.append(result)
1909
1910 return results
1911
1912
1913
1914
1915
1916 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
1917 id_anti_id_dict = {}):
1918 """Check if process already checked, if so return True, otherwise add
1919 process and antiprocess to sorted_ids."""
1920
1921
1922 if id_anti_id_dict:
1923 is_ids = [id_anti_id_dict[id] for id in \
1924 is_ids]
1925 else:
1926 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
1927 is_ids]
1928
1929 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
1930 [process.get('id')])
1931
1932 if ids in sorted_ids:
1933
1934 return True
1935
1936
1937 sorted_ids.append(ids)
1938
1939
1940 return False
1941
1947 """ Generate a loop matrix element from the process definition, and returns
1948 it along with the timing information dictionary.
1949 If reuse is True, it reuses the already output directory if found.
1950 There is the possibility of specifying the proc_name."""
1951
1952 assert isinstance(process_definition,
1953 (base_objects.ProcessDefinition,base_objects.Process))
1954 assert process_definition.get('perturbation_couplings')!=[]
1955
1956 if isinstance(process_definition,base_objects.ProcessDefinition):
1957 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
1958 raise InvalidCmd("This check can only be performed on single "+
1959 " processes. (i.e. without multiparticle labels).")
1960
1961 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
1962 if not leg.get('state')]
1963 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
1964 if leg.get('state')]
1965
1966
1967 process = process_definition.get_process(isids,fsids)
1968 else:
1969 process = process_definition
1970
1971 if not output_path is None:
1972 root_path = output_path
1973 else:
1974 root_path = cmd._mgme_dir
1975
1976 timing = {'Diagrams_generation': None,
1977 'n_loops': None,
1978 'HelasDiagrams_generation': None,
1979 'n_loop_groups': None,
1980 'n_loop_wfs': None,
1981 'loop_wfs_ranks': None}
1982
1983 if proc_name:
1984 proc_dir = pjoin(root_path,proc_name)
1985 else:
1986 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
1987 '_'.join(process.shell_string().split('_')[1:])))
1988 if reuse and os.path.isdir(proc_dir):
1989 logger.info("Reusing directory %s"%str(proc_dir))
1990
1991 return timing, process
1992
1993 logger.info("Generating p%s"%process_definition.nice_string()[1:])
1994
1995 start=time.time()
1996 try:
1997 amplitude = loop_diagram_generation.LoopAmplitude(process,
1998 loop_filter=loop_filter)
1999 except InvalidCmd:
2000
2001
2002 return time.time()-start, None
2003 if not amplitude.get('diagrams'):
2004
2005 return time.time()-start, None
2006
2007
2008
2009 loop_optimized_output = cmd.options['loop_optimized_output']
2010 timing['Diagrams_generation']=time.time()-start
2011 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2012 start=time.time()
2013
2014 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2015 optimized_output = loop_optimized_output,gen_color=True)
2016
2017
2018
2019 matrix_element.compute_all_analytic_information()
2020 timing['HelasDiagrams_generation']=time.time()-start
2021
2022 if loop_optimized_output:
2023 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2024 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2025 ldiag.get('loop_wavefunctions')]
2026 timing['n_loop_wfs']=len(lwfs)
2027 timing['loop_wfs_ranks']=[]
2028 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2029 for l in lwfs])+1):
2030 timing['loop_wfs_ranks'].append(\
2031 len([1 for l in lwfs if \
2032 l.get_analytic_info('wavefunction_rank')==rank]))
2033
2034 return timing, matrix_element
2035
2036
2037
2038
2039 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2040 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2041 """For a single loop process, check both its timings and then its stability
2042 in one go without regenerating it."""
2043
2044 if 'reuse' not in options:
2045 keep_folder=False
2046 else:
2047 keep_folder = options['reuse']
2048
2049 model=process_definition.get('model')
2050
2051 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2052 keep_folder,output_path=output_path,cmd=cmd)
2053 reusing = isinstance(matrix_element, base_objects.Process)
2054 options['reuse'] = reusing
2055 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2056 model=model, output_path=output_path, cmd=cmd)
2057
2058 if not myProfiler.loop_optimized_output:
2059 MLoptions={}
2060 else:
2061 MLoptions=MLOptions
2062 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2063 param_card, keep_folder=keep_folder,options=options,
2064 MLOptions = MLoptions)
2065
2066 if timing2 == None:
2067 return None, None
2068
2069
2070 timing = dict(timing1.items()+timing2.items())
2071 stability = myProfiler.check_matrix_element_stability(matrix_element,
2072 options=options, infos_IN=timing,param_card=param_card,
2073 keep_folder = keep_folder,
2074 MLOptions = MLoptions)
2075 if stability == None:
2076 return None, None
2077 else:
2078 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2079 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2080 return timing, stability
2081
2082
2083
2084
2085 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2086 options=None,nPoints=100, output_path=None,
2087 cmd = FakeInterface(), MLOptions = {}):
2088 """For a single loop process, give a detailed summary of the generation and
2089 execution timing."""
2090
2091 if "reuse" in options:
2092 reuse=options['reuse']
2093 else:
2094 reuse=False
2095
2096 reuse=options['reuse']
2097 keep_folder = reuse
2098 model=process_definition.get('model')
2099
2100 timing, matrix_element = generate_loop_matrix_element(process_definition,
2101 reuse, output_path=output_path, cmd=cmd)
2102 reusing = isinstance(matrix_element, base_objects.Process)
2103 options['reuse'] = reusing
2104 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2105 output_path=output_path,model=model,cmd=cmd)
2106
2107 if not myStabilityChecker.loop_optimized_output:
2108 MLoptions = {}
2109 else:
2110 MLoptions = MLOptions
2111 if "MLReductionLib" not in MLOptions:
2112 MLoptions["MLReductionLib"] = []
2113 if cuttools:
2114 MLoptions["MLReductionLib"].extend([1])
2115 if "iregi_dir" in tir:
2116 MLoptions["MLReductionLib"].extend([3])
2117 if "pjfry_dir" in tir:
2118 MLoptions["MLReductionLib"].extend([2])
2119 if "golem_dir" in tir:
2120 MLoptions["MLReductionLib"].extend([4])
2121
2122 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2123 options=options,param_card=param_card,
2124 keep_folder=keep_folder,
2125 MLOptions=MLoptions)
2126
2127 if stability == None:
2128 return None
2129 else:
2130 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2131 return stability
2132
2133
2134
2135
2136 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2137 output_path=None, options={}, cmd = FakeInterface(),
2138 MLOptions = {}):
2139 """For a single loop process, give a detailed summary of the generation and
2140 execution timing."""
2141
2142 if 'reuse' not in options:
2143 keep_folder = False
2144 else:
2145 keep_folder = options['reuse']
2146 model=process_definition.get('model')
2147 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2148 keep_folder, output_path=output_path, cmd=cmd)
2149 reusing = isinstance(matrix_element, base_objects.Process)
2150 options['reuse'] = reusing
2151 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2152 output_path=output_path, cmd=cmd)
2153
2154 if not myTimer.loop_optimized_output:
2155 MLoptions = {}
2156 else:
2157 MLoptions = MLOptions
2158 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2159 keep_folder = keep_folder, options=options,
2160 MLOptions = MLoptions)
2161
2162 if timing2 == None:
2163 return None
2164 else:
2165
2166 res = dict(timing1.items()+timing2.items())
2167 res['loop_optimized_output']=myTimer.loop_optimized_output
2168 return res
2169
2170
2171
2172
2173 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2174 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2175 """Check processes by generating them with all possible orderings
2176 of particles (which means different diagram building and Helas
2177 calls), and comparing the resulting matrix element values."""
2178
2179 cmass_scheme = cmd.options['complex_mass_scheme']
2180 if isinstance(processes, base_objects.ProcessDefinition):
2181
2182
2183 multiprocess = processes
2184 model = multiprocess.get('model')
2185
2186
2187 if multiprocess.get('perturbation_couplings')==[]:
2188 evaluator = MatrixElementEvaluator(model,
2189 auth_skipping = True, reuse = False, cmd = cmd)
2190 else:
2191 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2192 model=model, auth_skipping = True,
2193 reuse = False, output_path=output_path, cmd = cmd)
2194
2195 results = run_multiprocs_no_crossings(check_process,
2196 multiprocess,
2197 evaluator,
2198 quick,
2199 options)
2200
2201 if "used_lorentz" not in evaluator.stored_quantities:
2202 evaluator.stored_quantities["used_lorentz"] = []
2203
2204 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2205
2206 clean_up(output_path)
2207
2208 return results, evaluator.stored_quantities["used_lorentz"]
2209
2210 elif isinstance(processes, base_objects.Process):
2211 processes = base_objects.ProcessList([processes])
2212 elif isinstance(processes, base_objects.ProcessList):
2213 pass
2214 else:
2215 raise InvalidCmd("processes is of non-supported format")
2216
2217 if not processes:
2218 raise InvalidCmd("No processes given")
2219
2220 model = processes[0].get('model')
2221
2222
2223 if processes[0].get('perturbation_couplings')==[]:
2224 evaluator = MatrixElementEvaluator(model, param_card,
2225 auth_skipping = True, reuse = False, cmd = cmd)
2226 else:
2227 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2228 model=model,param_card=param_card,
2229 auth_skipping = True, reuse = False,
2230 output_path=output_path, cmd = cmd)
2231
2232
2233
2234 sorted_ids = []
2235 comparison_results = []
2236
2237
2238 for process in processes:
2239
2240
2241 if check_already_checked([l.get('id') for l in process.get('legs') if \
2242 not l.get('state')],
2243 [l.get('id') for l in process.get('legs') if \
2244 l.get('state')],
2245 sorted_ids, process, model):
2246 continue
2247
2248 res = check_process(process, evaluator, quick, options)
2249 if res:
2250 comparison_results.append(res)
2251
2252 if "used_lorentz" not in evaluator.stored_quantities:
2253 evaluator.stored_quantities["used_lorentz"] = []
2254
2255 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2256
2257 clean_up(output_path)
2258
2259 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2260
2262 """Check the helas calls for a process by generating the process
2263 using all different permutations of the process legs (or, if
2264 quick, use a subset of permutations), and check that the matrix
2265 element is invariant under this."""
2266
2267 model = process.get('model')
2268
2269
2270 for i, leg in enumerate(process.get('legs')):
2271 leg.set('number', i+1)
2272
2273 logger.info("Checking crossings of %s" % \
2274 process.nice_string().replace('Process:', 'process'))
2275
2276 process_matrix_elements = []
2277
2278
2279
2280 if quick:
2281 leg_positions = [[] for leg in process.get('legs')]
2282 quick = range(1,len(process.get('legs')) + 1)
2283
2284 values = []
2285
2286
2287 number_checked=0
2288 for legs in itertools.permutations(process.get('legs')):
2289
2290 order = [l.get('number') for l in legs]
2291 if quick:
2292 found_leg = True
2293 for num in quick:
2294
2295
2296 leg_position = legs.index([l for l in legs if \
2297 l.get('number') == num][0])
2298
2299 if not leg_position in leg_positions[num-1]:
2300 found_leg = False
2301 leg_positions[num-1].append(leg_position)
2302
2303 if found_leg:
2304 continue
2305
2306
2307
2308 if quick and process.get('perturbation_couplings') and number_checked >3:
2309 continue
2310
2311 legs = base_objects.LegList(legs)
2312
2313 if order != range(1,len(legs) + 1):
2314 logger.info("Testing permutation: %s" % \
2315 order)
2316
2317 newproc = copy.copy(process)
2318 newproc.set('legs',legs)
2319
2320
2321 try:
2322 if newproc.get('perturbation_couplings')==[]:
2323 amplitude = diagram_generation.Amplitude(newproc)
2324 else:
2325
2326 loop_base_objects.cutting_method = 'optimal' if \
2327 number_checked%2 == 0 else 'default'
2328 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2329 except InvalidCmd:
2330 result=False
2331 else:
2332 result = amplitude.get('diagrams')
2333
2334 loop_base_objects.cutting_method = 'optimal'
2335
2336 if not result:
2337
2338 logging.info("No diagrams for %s" % \
2339 process.nice_string().replace('Process', 'process'))
2340 break
2341
2342 if order == range(1,len(legs) + 1):
2343
2344 p, w_rambo = evaluator.get_momenta(process, options)
2345
2346
2347 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2348 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2349 gen_color=False)
2350 else:
2351 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2352 optimized_output=evaluator.loop_optimized_output)
2353
2354
2355
2356
2357 if amplitude.get('process').get('has_born'):
2358
2359
2360 if matrix_element in process_matrix_elements:
2361
2362
2363 continue
2364
2365 process_matrix_elements.append(matrix_element)
2366
2367 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2368 options=options)
2369 if res == None:
2370 break
2371
2372 values.append(res[0])
2373 number_checked += 1
2374
2375
2376
2377 if abs(max(values)) + abs(min(values)) > 0 and \
2378 2 * abs(max(values) - min(values)) / \
2379 (abs(max(values)) + abs(min(values))) > 0.01:
2380 break
2381
2382
2383 if not values:
2384 return None
2385
2386
2387
2388 diff = 0
2389 if abs(max(values)) + abs(min(values)) > 0:
2390 diff = 2* abs(max(values) - min(values)) / \
2391 (abs(max(values)) + abs(min(values)))
2392
2393
2394 if process.get('perturbation_couplings'):
2395 passed = diff < 1.e-5
2396 else:
2397 passed = diff < 1.e-8
2398
2399 return {"process": process,
2400 "momenta": p,
2401 "values": values,
2402 "difference": diff,
2403 "passed": passed}
2404
2406 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2407 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2408
2409 if mg_root is None:
2410 pass
2411
2412 directories = glob.glob(pjoin(mg_root, '%s*'%temp_dir_prefix))
2413 if directories != []:
2414 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2415 for dir in directories:
2416
2417 if os.path.isdir(pjoin(dir,'SubProcesses')):
2418 shutil.rmtree(dir)
2419
2428
2429 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2430 """Present the results from a timing and stability consecutive check"""
2431
2432 opt = timing['loop_optimized_output']
2433
2434 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2435 ' output:\n'
2436 text += output_timings(myprocdef,timing)
2437
2438 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2439 ' output:\n'
2440 text += output_stability(stability,output_path, reusing=reusing)
2441
2442 mode = 'optimized' if opt else 'default'
2443 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2444 %(mode,stability['Process'].shell_string()))
2445 logFile = open(logFilePath, 'w')
2446 logFile.write(text)
2447 logFile.close()
2448 logger.info('Log of this profile check was output to file %s'\
2449 %str(logFilePath))
2450 return text
2451
2453 """Present the result of a stability check in a nice format.
2454 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2455 under the MadGraph5_aMC@NLO root folder (output_path)"""
2456
2457 def accuracy(eval_list):
2458 """ Compute the accuracy from different evaluations."""
2459 return (2.0*(max(eval_list)-min(eval_list))/
2460 abs(max(eval_list)+min(eval_list)))
2461
2462 def best_estimate(eval_list):
2463 """ Returns the best estimate from different evaluations."""
2464 return (max(eval_list)+min(eval_list))/2.0
2465
2466 def loop_direction_test_power(eval_list):
2467 """ Computes the loop direction test power P is computed as follow:
2468 P = accuracy(loop_dir_test) / accuracy(all_test)
2469 So that P is large if the loop direction test is effective.
2470 The tuple returned is (log(median(P)),log(min(P)),frac)
2471 where frac is the fraction of events with powers smaller than -3
2472 which means events for which the reading direction test shows an
2473 accuracy three digits higher than it really is according to the other
2474 tests."""
2475 powers=[]
2476 for eval in eval_list:
2477 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2478
2479 other_evals = [eval[key] for key in eval.keys() if key not in \
2480 ['CTModeB','Accuracy']]
2481 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2482 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2483
2484 n_fail=0
2485 for p in powers:
2486 if (math.log(p)/math.log(10))<-3:
2487 n_fail+=1
2488
2489 if len(powers)==0:
2490 return (None,None,None)
2491
2492 return (math.log(median(powers))/math.log(10),
2493 math.log(min(powers))/math.log(10),
2494 n_fail/len(powers))
2495
2496 def test_consistency(dp_eval_list, qp_eval_list):
2497 """ Computes the consistency test C from the DP and QP evaluations.
2498 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2499 So a consistent test would have C as close to one as possible.
2500 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2501 consistencies = []
2502 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2503 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2504 if key!='Accuracy']
2505 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2506 if key!='Accuracy']
2507 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2508 accuracy(dp_evals)!=0.0:
2509 consistencies.append(accuracy(dp_evals)/(abs(\
2510 best_estimate(qp_evals)-best_estimate(dp_evals))))
2511
2512 if len(consistencies)==0:
2513 return (None,None,None)
2514
2515 return (math.log(median(consistencies))/math.log(10),
2516 math.log(min(consistencies))/math.log(10),
2517 math.log(max(consistencies))/math.log(10))
2518
2519 def median(orig_list):
2520 """ Find the median of a sorted float list. """
2521 list=copy.copy(orig_list)
2522 list.sort()
2523 if len(list)%2==0:
2524 return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0
2525 else:
2526 return list[int((len(list)-1)/2)]
2527
2528
2529 f = format_output
2530
2531 opt = stability['loop_optimized_output']
2532
2533 mode = 'optimized' if opt else 'default'
2534 process = stability['Process']
2535 res_str = "Stability checking for %s (%s mode)\n"\
2536 %(process.nice_string()[9:],mode)
2537
2538 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2539 %(mode,process.shell_string())), 'w')
2540
2541 logFile.write('Stability check results\n\n')
2542 logFile.write(res_str)
2543 data_plot_dict={}
2544 accuracy_dict={}
2545 nPSmax=0
2546 max_acc=0.0
2547 min_acc=1.0
2548 if stability['Stability']:
2549 toolnames= stability['Stability'].keys()
2550 toolnamestr=" | ".join(tn+
2551 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2552 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2553 for key,stab in stability['Stability'].items()]
2554 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2555 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2556 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2557 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2558 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2559 len_PS=["%i"%len(evals)+\
2560 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2561 len_PS_str=" | ".join(len_PS)
2562 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2563 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2564 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2565 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2566 pmedminlist=[]
2567 pfraclist=[]
2568 for key,stab in stability['Stability'].items():
2569 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2570 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2571 pfrac_str = f(pfrac,'%.2e')
2572 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2573 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2574 pmedminlist_str=" | ".join(pmedminlist)
2575 pfraclist_str=" | ".join(pfraclist)
2576 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2577 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2578 len_UPS=["%i"%len(upup)+\
2579 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2580 len_UPS_str=" | ".join(len_UPS)
2581 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2582 res_str_i += \
2583 """
2584 = Legend for the statistics of the stability tests. (all log below ar log_10)
2585 The loop direction test power P is computed as follow:
2586 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2587 So that log(P) is positive if the loop direction test is effective.
2588 The tuple printed out is (log(median(P)),log(min(P)))
2589 The consistency test C is computed when QP evaluations are available:
2590 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2591 So a consistent test would have log(C) as close to zero as possible.
2592 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2593 res_str+=res_str_i
2594 for key in stability['Stability'].keys():
2595 toolname=key
2596 stab=stability['Stability'][key]
2597 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2598
2599 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2600 stab['QP_stability']]
2601 nPS = len(DP_stability)
2602 if nPS>nPSmax:nPSmax=nPS
2603 UPS = stab['Unstable_PS_points']
2604 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2605 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2606 EPS = stab['Exceptional_PS_points']
2607 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2608 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2609 res_str_i = ""
2610
2611 if len(UPS)>0:
2612 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2613 %(len(UPS),nPS,toolname)
2614 prefix = 'DP' if toolname=='CutTools' else ''
2615 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2616 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2617 res_str_i += "|= %s Max accuracy............... %s\n"\
2618 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2619 res_str_i += "|= %s Min accuracy............... %s\n"\
2620 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2621 (pmed,pmin,pfrac)=loop_direction_test_power(\
2622 [stab['DP_stability'][U[0]] for U in UPS])
2623 if toolname=='CutTools':
2624 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2625 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2626 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2627 %f(pfrac,'%.2e')
2628 res_str_i += "|= QP Median accuracy............ %s\n"\
2629 %f(median(UPS_stability_QP),'%.2e')
2630 res_str_i += "|= QP Max accuracy............... %s\n"\
2631 %f(min(UPS_stability_QP),'%.2e')
2632 res_str_i += "|= QP Min accuracy............... %s\n"\
2633 %f(max(UPS_stability_QP),'%.2e')
2634 (pmed,pmin,pfrac)=loop_direction_test_power(\
2635 [stab['QP_stability'][U[0]] for U in UPS])
2636 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2637 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2638 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2639 (pmed,pmin,pmax)=test_consistency(\
2640 [stab['DP_stability'][U[0]] for U in UPS],
2641 [stab['QP_stability'][U[0]] for U in UPS])
2642 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2643 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2644 if len(EPS)==0:
2645 res_str_i += "= Number of Exceptional PS points : 0\n"
2646 if len(EPS)>0:
2647 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2648 %(len(EPS),nPS,toolname)
2649 res_str_i += "|= DP Median accuracy............ %s\n"\
2650 %f(median(EPS_stability_DP),'%.2e')
2651 res_str_i += "|= DP Max accuracy............... %s\n"\
2652 %f(min(EPS_stability_DP),'%.2e')
2653 res_str_i += "|= DP Min accuracy............... %s\n"\
2654 %f(max(EPS_stability_DP),'%.2e')
2655 pmed,pmin,pfrac=loop_direction_test_power(\
2656 [stab['DP_stability'][E[0]] for E in EPS])
2657 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2658 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2659 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2660 %f(pfrac,'%.2e')
2661 res_str_i += "|= QP Median accuracy............ %s\n"\
2662 %f(median(EPS_stability_QP),'%.2e')
2663 res_str_i += "|= QP Max accuracy............... %s\n"\
2664 %f(min(EPS_stability_QP),'%.2e')
2665 res_str_i += "|= QP Min accuracy............... %s\n"\
2666 %f(max(EPS_stability_QP),'%.2e')
2667 pmed,pmin,pfrac=loop_direction_test_power(\
2668 [stab['QP_stability'][E[0]] for E in EPS])
2669 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2670 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2671 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2672
2673 logFile.write(res_str_i)
2674
2675 if len(EPS)>0:
2676 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2677 %(len(EPS),toolname))
2678 for i, eps in enumerate(EPS):
2679 logFile.write('\nEPS #%i\n'%(i+1))
2680 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2681 for p in eps[1]]))
2682 logFile.write('\n DP accuracy : %.3e\n'%DP_stability[eps[0]])
2683 logFile.write(' QP accuracy : %.3e\n'%QP_stability[eps[0]])
2684 if len(UPS)>0:
2685 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2686 %(len(UPS),toolname))
2687 for i, ups in enumerate(UPS):
2688 logFile.write('\nUPS #%i\n'%(i+1))
2689 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2690 for p in ups[1]]))
2691 logFile.write('\n DP accuracy : %.3e\n'%DP_stability[ups[0]])
2692 logFile.write(' QP accuracy : %.3e\n'%QP_stability[ups[0]])
2693
2694 logFile.write('\nData entries for the stability plot.\n')
2695 logFile.write('First row is a maximal accuracy delta, second is the '+\
2696 'fraction of events with DP accuracy worse than delta.\n\n')
2697
2698 if max(DP_stability)>0.0:
2699 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2700 if min_digit_acc>=0:
2701 min_digit_acc = min_digit_acc+1
2702 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2703 else:
2704 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2705 ' is output then.'
2706 logFile.write('Perfect accuracy over all the trial PS points.')
2707 res_str +=res_str_i
2708 continue
2709
2710 accuracy_dict[toolname]=accuracies
2711 if max(accuracies) > max_acc: max_acc=max(accuracies)
2712 if min(accuracies) < min_acc: min_acc=min(accuracies)
2713 data_plot=[]
2714 for acc in accuracies:
2715 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2716 /float(len(DP_stability)))
2717 data_plot_dict[toolname]=data_plot
2718
2719 logFile.writelines('%.3e %.3e\n'%(accuracies[i], data_plot[i]) for i in \
2720 range(len(accuracies)))
2721 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2722 %(nPS,toolname))
2723 logFile.write('First row is DP, second is QP (if available).\n\n')
2724 logFile.writelines('%.3e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2725 else '%.3e\n'%QP_stability[i]) for i in range(nPS))
2726 res_str+=res_str_i
2727 logFile.close()
2728 res_str += "\n= Stability details of the run are output to the file"+\
2729 " stability_%s_%s.log\n"%(mode,process.shell_string())
2730
2731
2732
2733
2734 if any(isinstance(handler,logging.FileHandler) for handler in \
2735 logging.getLogger('madgraph').handlers):
2736 return res_str
2737
2738 try:
2739 import matplotlib.pyplot as plt
2740 colorlist=['b','r','g','y']
2741 for i,key in enumerate(data_plot_dict.keys()):
2742 color=colorlist[i]
2743 data_plot=data_plot_dict[key]
2744 accuracies=accuracy_dict[key]
2745 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2746 label=key)
2747 plt.axis([min_acc,max_acc,\
2748 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2749 plt.yscale('log')
2750 plt.xscale('log')
2751 plt.title('Stability plot for %s (%s mode, %d points)'%\
2752 (process.nice_string()[9:],mode,nPSmax))
2753 plt.ylabel('Fraction of events')
2754 plt.xlabel('Maximal precision')
2755 plt.legend()
2756 if not reusing:
2757 logger.info('Some stability statistics will be displayed once you '+\
2758 'close the plot window')
2759 plt.show()
2760 else:
2761 fig_output_file = str(pjoin(output_path,
2762 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2763 logger.info('Stability plot output to file %s. '%fig_output_file)
2764 plt.savefig(fig_output_file)
2765 return res_str
2766 except Exception as e:
2767 if isinstance(e, ImportError):
2768 res_str += "\n= Install matplotlib to get a "+\
2769 "graphical display of the results of this check."
2770 else:
2771 res_str += "\n= Could not produce the stability plot because of "+\
2772 "the following error: %s"%str(e)
2773 return res_str
2774
2776 """Present the result of a timings check in a nice format """
2777
2778
2779 f = format_output
2780 loop_optimized_output = timings['loop_optimized_output']
2781
2782 res_str = "%s \n"%process.nice_string()
2783 try:
2784 gen_total = timings['HELAS_MODEL_compilation']+\
2785 timings['HelasDiagrams_generation']+\
2786 timings['Process_output']+\
2787 timings['Diagrams_generation']+\
2788 timings['Process_compilation']+\
2789 timings['Initialization']
2790 except TypeError:
2791 gen_total = None
2792 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2793 res_str += "|= Diagrams generation....... %s\n"\
2794 %f(timings['Diagrams_generation'],'%.3gs')
2795 res_str += "|= Helas Diagrams generation. %s\n"\
2796 %f(timings['HelasDiagrams_generation'],'%.3gs')
2797 res_str += "|= Process output............ %s\n"\
2798 %f(timings['Process_output'],'%.3gs')
2799 res_str += "|= HELAS+model compilation... %s\n"\
2800 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2801 res_str += "|= Process compilation....... %s\n"\
2802 %f(timings['Process_compilation'],'%.3gs')
2803 res_str += "|= Initialization............ %s\n"\
2804 %f(timings['Initialization'],'%.3gs')
2805
2806 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
2807 %(timings['run_unpolarized_total']*1000.0)
2808 if loop_optimized_output:
2809 coef_time=timings['run_unpolarized_coefs']*1000.0
2810 loop_time=(timings['run_unpolarized_total']-\
2811 timings['run_unpolarized_coefs'])*1000.0
2812 total=coef_time+loop_time
2813 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2814 %(coef_time,int(round(100.0*coef_time/total)))
2815 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2816 %(loop_time,int(round(100.0*loop_time/total)))
2817 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
2818 %(timings['run_polarized_total']*1000.0)
2819 if loop_optimized_output:
2820 coef_time=timings['run_polarized_coefs']*1000.0
2821 loop_time=(timings['run_polarized_total']-\
2822 timings['run_polarized_coefs'])*1000.0
2823 total=coef_time+loop_time
2824 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2825 %(coef_time,int(round(100.0*coef_time/total)))
2826 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2827 %(loop_time,int(round(100.0*loop_time/total)))
2828 res_str += "\n= Miscellaneous ========================\n"
2829 res_str += "|= Number of hel. computed... %s/%s\n"\
2830 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2831 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2832 if loop_optimized_output:
2833 res_str += "|= Number of loop groups..... %s\n"\
2834 %f(timings['n_loop_groups'],'%d')
2835 res_str += "|= Number of loop wfs........ %s\n"\
2836 %f(timings['n_loop_wfs'],'%d')
2837 if timings['loop_wfs_ranks']!=None:
2838 for i, r in enumerate(timings['loop_wfs_ranks']):
2839 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2840 res_str += "|= Loading time (Color data). ~%.3gms\n"\
2841 %(timings['Booting_time']*1000.0)
2842 res_str += "|= Maximum RAM usage (rss)... %s\n"\
2843 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
2844 res_str += "\n= Output disk size =====================\n"
2845 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
2846 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
2847 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
2848 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
2849
2850 return res_str
2851
2853 """Present the results of a comparison in a nice list format
2854 mode short: return the number of fail process
2855 """
2856 proc_col_size = 17
2857 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
2858 if pert_coupl:
2859 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
2860 else:
2861 process_header = "Process"
2862
2863 if len(process_header) + 1 > proc_col_size:
2864 proc_col_size = len(process_header) + 1
2865
2866 for proc in comparison_results:
2867 if len(proc['process'].base_string()) + 1 > proc_col_size:
2868 proc_col_size = len(proc['process'].base_string()) + 1
2869
2870 col_size = 18
2871
2872 pass_proc = 0
2873 fail_proc = 0
2874 no_check_proc = 0
2875
2876 failed_proc_list = []
2877 no_check_proc_list = []
2878
2879 res_str = fixed_string_length(process_header, proc_col_size) + \
2880 fixed_string_length("Min element", col_size) + \
2881 fixed_string_length("Max element", col_size) + \
2882 fixed_string_length("Relative diff.", col_size) + \
2883 "Result"
2884
2885 for result in comparison_results:
2886 proc = result['process'].base_string()
2887 values = result['values']
2888
2889 if len(values) <= 1:
2890 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
2891 " * No permutations, process not checked *"
2892 no_check_proc += 1
2893 no_check_proc_list.append(result['process'].nice_string())
2894 continue
2895
2896 passed = result['passed']
2897
2898 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
2899 fixed_string_length("%1.10e" % min(values), col_size) + \
2900 fixed_string_length("%1.10e" % max(values), col_size) + \
2901 fixed_string_length("%1.10e" % result['difference'],
2902 col_size)
2903 if passed:
2904 pass_proc += 1
2905 res_str += "Passed"
2906 else:
2907 fail_proc += 1
2908 failed_proc_list.append(result['process'].nice_string())
2909 res_str += "Failed"
2910
2911 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
2912 (pass_proc, pass_proc + fail_proc,
2913 fail_proc, pass_proc + fail_proc)
2914
2915 if fail_proc != 0:
2916 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
2917 if no_check_proc != 0:
2918 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
2919
2920 return res_str
2921
2923 """Helper function to fix the length of a string by cutting it
2924 or adding extra space."""
2925
2926 if len(mystr) > length:
2927 return mystr[0:length]
2928 else:
2929 return mystr + " " * (length - len(mystr))
2930
2931
2932
2933
2934
2935 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
2936 options=None, output_path=None, cmd = FakeInterface()):
2937 """Check gauge invariance of the processes by using the BRS check.
2938 For one of the massless external bosons (e.g. gluon or photon),
2939 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
2940 """
2941 cmass_scheme = cmd.options['complex_mass_scheme']
2942 if isinstance(processes, base_objects.ProcessDefinition):
2943
2944
2945 multiprocess = processes
2946
2947 model = multiprocess.get('model')
2948
2949 if multiprocess.get('perturbation_couplings')==[]:
2950 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
2951 auth_skipping = True, reuse = False)
2952 else:
2953 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2954 cmd=cmd,model=model, param_card=param_card,
2955 auth_skipping = False, reuse = False,
2956 output_path=output_path)
2957
2958 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
2959
2960 logger.info('Set All width to zero for non complex mass scheme checks')
2961 for particle in evaluator.full_model.get('particles'):
2962 if particle.get('width') != 'ZERO':
2963 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
2964 results = run_multiprocs_no_crossings(check_gauge_process,
2965 multiprocess,
2966 evaluator,
2967 options=options
2968 )
2969
2970 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2971
2972 clean_up(output_path)
2973
2974 return results
2975
2976 elif isinstance(processes, base_objects.Process):
2977 processes = base_objects.ProcessList([processes])
2978 elif isinstance(processes, base_objects.ProcessList):
2979 pass
2980 else:
2981 raise InvalidCmd("processes is of non-supported format")
2982
2983 assert processes, "No processes given"
2984
2985 model = processes[0].get('model')
2986
2987
2988 if processes[0].get('perturbation_couplings')==[]:
2989 evaluator = MatrixElementEvaluator(model, param_card,
2990 auth_skipping = True, reuse = False,
2991 cmd = cmd)
2992 else:
2993 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2994 model=model, param_card=param_card,
2995 auth_skipping = False, reuse = False,
2996 output_path=output_path, cmd = cmd)
2997 comparison_results = []
2998 comparison_explicit_flip = []
2999
3000
3001 for process in processes:
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011 result = check_gauge_process(process, evaluator,options=options)
3012 if result:
3013 comparison_results.append(result)
3014
3015 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3016
3017 clean_up(output_path)
3018
3019 return comparison_results
3020
3023 """Check gauge invariance for the process, unless it is already done."""
3024
3025 model = process.get('model')
3026
3027
3028 found_gauge = False
3029 for i, leg in enumerate(process.get('legs')):
3030 part = model.get_particle(leg.get('id'))
3031 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3032 found_gauge = True
3033 break
3034 if not found_gauge:
3035 logger.info("No ward identity for %s" % \
3036 process.nice_string().replace('Process', 'process'))
3037
3038 return None
3039
3040 for i, leg in enumerate(process.get('legs')):
3041 leg.set('number', i+1)
3042
3043 logger.info("Checking ward identities for %s" % \
3044 process.nice_string().replace('Process', 'process'))
3045
3046 legs = process.get('legs')
3047
3048
3049 try:
3050 if process.get('perturbation_couplings')==[]:
3051 amplitude = diagram_generation.Amplitude(process)
3052 else:
3053 amplitude = loop_diagram_generation.LoopAmplitude(process)
3054 except InvalidCmd:
3055 logging.info("No diagrams for %s" % \
3056 process.nice_string().replace('Process', 'process'))
3057 return None
3058 if not amplitude.get('diagrams'):
3059
3060 logging.info("No diagrams for %s" % \
3061 process.nice_string().replace('Process', 'process'))
3062 return None
3063
3064 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3065 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3066 gen_color = False)
3067 else:
3068 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3069 optimized_output=evaluator.loop_optimized_output)
3070
3071
3072
3073
3074
3075
3076
3077 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3078 output='jamp', options=options)
3079
3080 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3081 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3082 gen_color = False)
3083
3084 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3085 output='jamp', options=options)
3086
3087 if mvalue and mvalue['m2']:
3088 return {'process':process,'value':mvalue,'brs':brsvalue}
3089
3091 """Present the results of a comparison in a nice list format"""
3092
3093 proc_col_size = 17
3094
3095 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3096
3097
3098 if pert_coupl:
3099 threshold=1e-5
3100 else:
3101 threshold=1e-10
3102
3103 if pert_coupl:
3104 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3105 else:
3106 process_header = "Process"
3107
3108 if len(process_header) + 1 > proc_col_size:
3109 proc_col_size = len(process_header) + 1
3110
3111 for one_comp in comparison_results:
3112 proc = one_comp['process'].base_string()
3113 mvalue = one_comp['value']
3114 brsvalue = one_comp['brs']
3115 if len(proc) + 1 > proc_col_size:
3116 proc_col_size = len(proc) + 1
3117
3118 col_size = 18
3119
3120 pass_proc = 0
3121 fail_proc = 0
3122
3123 failed_proc_list = []
3124 no_check_proc_list = []
3125
3126 res_str = fixed_string_length(process_header, proc_col_size) + \
3127 fixed_string_length("matrix", col_size) + \
3128 fixed_string_length("BRS", col_size) + \
3129 fixed_string_length("ratio", col_size) + \
3130 "Result"
3131
3132 for one_comp in comparison_results:
3133 proc = one_comp['process'].base_string()
3134 mvalue = one_comp['value']
3135 brsvalue = one_comp['brs']
3136 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3137 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3138 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3139 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3140 fixed_string_length("%1.10e" % ratio, col_size)
3141
3142 if ratio > threshold:
3143 fail_proc += 1
3144 proc_succeed = False
3145 failed_proc_list.append(proc)
3146 res_str += "Failed"
3147 else:
3148 pass_proc += 1
3149 proc_succeed = True
3150 res_str += "Passed"
3151
3152
3153
3154
3155
3156 if len(mvalue['jamp'])!=0:
3157 for k in range(len(mvalue['jamp'][0])):
3158 m_sum = 0
3159 brs_sum = 0
3160
3161 for j in range(len(mvalue['jamp'])):
3162
3163 m_sum += abs(mvalue['jamp'][j][k])**2
3164 brs_sum += abs(brsvalue['jamp'][j][k])**2
3165
3166
3167 if not m_sum:
3168 continue
3169 ratio = abs(brs_sum) / abs(m_sum)
3170
3171 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3172 fixed_string_length("%1.10e" % m_sum, col_size) + \
3173 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3174 fixed_string_length("%1.10e" % ratio, col_size)
3175
3176 if ratio > 1e-15:
3177 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3178 fail_proc += 1
3179 pass_proc -= 1
3180 failed_proc_list.append(proc)
3181 res_str += tmp_str + "Failed"
3182 elif not proc_succeed:
3183 res_str += tmp_str + "Passed"
3184
3185
3186 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3187 (pass_proc, pass_proc + fail_proc,
3188 fail_proc, pass_proc + fail_proc)
3189
3190 if fail_proc != 0:
3191 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3192
3193 if output=='text':
3194 return res_str
3195 else:
3196 return fail_proc
3197
3198
3199
3200 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3201 reuse = False, output_path=None, cmd = FakeInterface()):
3202 """ Check if the square matrix element (sum over helicity) is lorentz
3203 invariant by boosting the momenta with different value."""
3204
3205 cmass_scheme = cmd.options['complex_mass_scheme']
3206 if isinstance(processes, base_objects.ProcessDefinition):
3207
3208
3209 multiprocess = processes
3210 model = multiprocess.get('model')
3211
3212 if multiprocess.get('perturbation_couplings')==[]:
3213 evaluator = MatrixElementEvaluator(model,
3214 cmd= cmd, auth_skipping = False, reuse = True)
3215 else:
3216 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3217 model=model, auth_skipping = False, reuse = True,
3218 output_path=output_path, cmd = cmd)
3219
3220 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3221
3222 logger.info('Set All width to zero for non complex mass scheme checks')
3223 for particle in evaluator.full_model.get('particles'):
3224 if particle.get('width') != 'ZERO':
3225 evaluator.full_model.get('parameter_dict')[\
3226 particle.get('width')] = 0.
3227
3228 results = run_multiprocs_no_crossings(check_lorentz_process,
3229 multiprocess,
3230 evaluator,
3231 options=options)
3232
3233 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3234
3235 clean_up(output_path)
3236
3237 return results
3238
3239 elif isinstance(processes, base_objects.Process):
3240 processes = base_objects.ProcessList([processes])
3241 elif isinstance(processes, base_objects.ProcessList):
3242 pass
3243 else:
3244 raise InvalidCmd("processes is of non-supported format")
3245
3246 assert processes, "No processes given"
3247
3248 model = processes[0].get('model')
3249
3250
3251 if processes[0].get('perturbation_couplings')==[]:
3252 evaluator = MatrixElementEvaluator(model, param_card,
3253 auth_skipping = False, reuse = True,
3254 cmd=cmd)
3255 else:
3256 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3257 model=model,param_card=param_card,
3258 auth_skipping = False, reuse = True,
3259 output_path=output_path, cmd = cmd)
3260
3261 comparison_results = []
3262
3263
3264 for process in processes:
3265
3266
3267
3268
3269
3270
3271
3272
3273
3274 result = check_lorentz_process(process, evaluator,options=options)
3275 if result:
3276 comparison_results.append(result)
3277
3278 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3279
3280 clean_up(output_path)
3281
3282 return comparison_results
3283
3286 """Check gauge invariance for the process, unless it is already done."""
3287
3288 amp_results = []
3289 model = process.get('model')
3290
3291 for i, leg in enumerate(process.get('legs')):
3292 leg.set('number', i+1)
3293
3294 logger.info("Checking lorentz transformations for %s" % \
3295 process.nice_string().replace('Process:', 'process'))
3296
3297 legs = process.get('legs')
3298
3299
3300 try:
3301 if process.get('perturbation_couplings')==[]:
3302 amplitude = diagram_generation.Amplitude(process)
3303 else:
3304 amplitude = loop_diagram_generation.LoopAmplitude(process)
3305 except InvalidCmd:
3306 logging.info("No diagrams for %s" % \
3307 process.nice_string().replace('Process', 'process'))
3308 return None
3309
3310 if not amplitude.get('diagrams'):
3311
3312 logging.info("No diagrams for %s" % \
3313 process.nice_string().replace('Process', 'process'))
3314 return None
3315
3316
3317 p, w_rambo = evaluator.get_momenta(process, options)
3318
3319
3320 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3321 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3322 gen_color = True)
3323 else:
3324 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3325 optimized_output = evaluator.loop_optimized_output)
3326
3327 MLOptions = {'ImprovePS':True,'ForceMP':True}
3328 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3329 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3330 auth_skipping = True, options=options)
3331 else:
3332 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3333 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3334 options = options)
3335
3336 if data and data['m2']:
3337 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3338 results = [data]
3339 else:
3340 results = [('Original evaluation',data)]
3341 else:
3342 return {'process':process, 'results':'pass'}
3343
3344
3345
3346
3347 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3348 for boost in range(1,4):
3349 boost_p = boost_momenta(p, boost)
3350 results.append(evaluator.evaluate_matrix_element(matrix_element,
3351 p=boost_p,output='jamp'))
3352 else:
3353
3354 boost_p = boost_momenta(p, 3)
3355 results.append(('Z-axis boost',
3356 evaluator.evaluate_matrix_element(matrix_element, options=options,
3357 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3358
3359
3360
3361
3362 if not options['events']:
3363 boost_p = boost_momenta(p, 1)
3364 results.append(('X-axis boost',
3365 evaluator.evaluate_matrix_element(matrix_element, options=options,
3366 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3367 boost_p = boost_momenta(p, 2)
3368 results.append(('Y-axis boost',
3369 evaluator.evaluate_matrix_element(matrix_element,options=options,
3370 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3371
3372
3373 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3374 results.append(('Z-axis pi/2 rotation',
3375 evaluator.evaluate_matrix_element(matrix_element,options=options,
3376 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3377
3378 sq2 = math.sqrt(2.0)
3379 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3380 results.append(('Z-axis pi/4 rotation',
3381 evaluator.evaluate_matrix_element(matrix_element,options=options,
3382 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3383
3384
3385 return {'process': process, 'results': results}
3386
3387
3388
3389
3390 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3391 options=None, tir={}, output_path=None,
3392 cuttools="", reuse=False, cmd = FakeInterface()):
3393 """Check gauge invariance of the processes by flipping
3394 the gauge of the model
3395 """
3396
3397 mg_root = cmd._mgme_dir
3398
3399 cmass_scheme = cmd.options['complex_mass_scheme']
3400
3401 if isinstance(processes_unit, base_objects.ProcessDefinition):
3402
3403
3404 multiprocess_unit = processes_unit
3405 model = multiprocess_unit.get('model')
3406
3407
3408
3409 loop_optimized_bu = cmd.options['loop_optimized_output']
3410 if processes_unit.get('squared_orders'):
3411 if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
3412 cmd.options['loop_optimized_output'] = True
3413 else:
3414 raise InvalidCmd("The gauge test cannot be performed for "+
3415 " a process with more than QCD corrections and which"+
3416 " specifies squared order constraints.")
3417 else:
3418 cmd.options['loop_optimized_output'] = False
3419
3420 aloha.unitary_gauge = True
3421 if processes_unit.get('perturbation_couplings')==[]:
3422 evaluator = MatrixElementEvaluator(model, param_card,
3423 cmd=cmd,auth_skipping = False, reuse = True)
3424 else:
3425 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3426 cmd=cmd, model=model,
3427 param_card=param_card,
3428 auth_skipping = False,
3429 output_path=output_path,
3430 reuse = False)
3431 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3432 logger.info('Set All width to zero for non complex mass scheme checks')
3433 for particle in evaluator.full_model.get('particles'):
3434 if particle.get('width') != 'ZERO':
3435 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3436
3437 output_u = run_multiprocs_no_crossings(get_value,
3438 multiprocess_unit,
3439 evaluator,
3440 options=options)
3441
3442 clean_added_globals(ADDED_GLOBAL)
3443
3444 if processes_unit.get('perturbation_couplings')!=[]:
3445 clean_up(output_path)
3446
3447 momentum = {}
3448 for data in output_u:
3449 momentum[data['process']] = data['p']
3450
3451 multiprocess_feynm = processes_feynm
3452 model = multiprocess_feynm.get('model')
3453
3454
3455 aloha.unitary_gauge = False
3456
3457
3458 cmd.options['loop_optimized_output'] = True
3459 if processes_feynm.get('perturbation_couplings')==[]:
3460 evaluator = MatrixElementEvaluator(model, param_card,
3461 cmd= cmd, auth_skipping = False, reuse = False)
3462 else:
3463 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3464 cmd= cmd, model=model,
3465 param_card=param_card,
3466 auth_skipping = False,
3467 output_path=output_path,
3468 reuse = False)
3469
3470 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3471
3472 for particle in evaluator.full_model.get('particles'):
3473 if particle.get('width') != 'ZERO':
3474 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3475
3476 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3477 evaluator, momentum,
3478 options=options)
3479 output = [processes_unit]
3480 for data in output_f:
3481 local_dico = {}
3482 local_dico['process'] = data['process']
3483 local_dico['value_feynm'] = data['value']
3484 local_dico['value_unit'] = [d['value'] for d in output_u
3485 if d['process'] == data['process']][0]
3486 output.append(local_dico)
3487
3488 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3489
3490 clean_up(output_path)
3491
3492
3493 cmd.options['loop_optimized_output'] = loop_optimized_bu
3494
3495 return output
3496
3497
3498
3499
3500 else:
3501 raise InvalidCmd("processes is of non-supported format")
3502
3508 """Check complex mass scheme consistency in the offshell region of s-channels
3509 detected for this process, by varying the expansion paramer consistently
3510 with the corresponding width and making sure that the difference between
3511 the complex mass-scheme and the narrow-width approximation is higher order.
3512 """
3513
3514 if not isinstance(process_line, str):
3515 raise InvalidCmd("Proces definition must be given as a stirng for this check")
3516
3517
3518 cmd.do_set('complex_mass_scheme False', log=False)
3519
3520 multiprocess_nwa = cmd.extract_process(process_line)
3521
3522
3523 has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
3524 'decays.py'))
3525
3526
3527 missing_perturbations = cmd._curr_model.get_coupling_orders()-\
3528 set(multiprocess_nwa.get('perturbation_couplings'))
3529
3530 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3531 len(missing_perturbations)>0:
3532 logger.warning("------------------------------------------------------")
3533 logger.warning("The process considered does not specify the following "+
3534 "type of loops to be included : %s"%str(list(missing_perturbations)))
3535 logger.warning("Consequently, the CMS check will be unsuccessful if the"+
3536 " process involves any resonating particle whose LO decay is "+
3537 "mediated by one of these orders.")
3538 logger.warning("You can use the syntax '[virt=all]' to automatically"+
3539 " include all loops supported by the model.")
3540 logger.warning("------------------------------------------------------")
3541
3542 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3543 len(multiprocess_nwa.get('legs'))<=4:
3544 logger.warning("------------------------------------------------------")
3545 logger.warning("Processes with four or less external states are typically not"+\
3546 " sensitive to incorrect Complex Mass Scheme implementations.")
3547 logger.warning("You can test this sensitivity by making sure that the"+
3548 " same check on the leading-order counterpart of this process *fails*"+
3549 " when using the option '--diff_lambda_power=2'.")
3550 logger.warning("If it does not, then consider adding a massless "+
3551 "gauge vector to the external states.")
3552 logger.warning("------------------------------------------------------")
3553
3554 if options['recompute_width']=='auto':
3555 if multiprocess_nwa.get('perturbation_couplings')!=[]:
3556
3557 options['recompute_width'] = 'first_time'
3558 else:
3559 options['recompute_width'] = 'never'
3560
3561
3562 if options['recompute_width'] in ['first_time', 'always'] and \
3563 not has_FRdecay and not 'cached_widths' in options:
3564 logger.info('The LO widths will need to be recomputed but the '+
3565 'model considered does not appear to have a decay module.\nThe widths'+
3566 ' will need to be computed numerically and it will slow down the test.\n'+
3567 'Consider using a param_card already specifying correct LO widths and'+
3568 " adding the option --recompute_width=never when doing this check.")
3569
3570 if options['recompute_width']=='never' and \
3571 any(order in multiprocess_nwa.get('perturbation_couplings') for order in
3572 options['expansion_orders']):
3573 logger.warning('You chose not to recompute the widths while including'+
3574 ' loop corrections. The check will be successful only if the width'+\
3575 ' specified in the default param_card is LO accurate (Remember that'+\
3576 ' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
3577 ' respectively by default).')
3578
3579
3580
3581
3582
3583 if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
3584 modelname = cmd._curr_model.get('modelpath+restriction')
3585 with misc.MuteLogger(['madgraph'], ['INFO']):
3586 model = import_ufo.import_model(modelname, decay=True,
3587 complex_mass_scheme=False)
3588 multiprocess_nwa.set('model', model)
3589
3590 run_options = copy.deepcopy(options)
3591
3592
3593 if options['seed'] > 0:
3594 random.seed(options['seed'])
3595
3596
3597 run_options['param_card'] = param_card
3598 if isinstance(cmd, FakeInterface):
3599 raise MadGraph5Error, "Check CMS cannot be run with a FakeInterface."
3600 run_options['cmd'] = cmd
3601 run_options['MLOptions'] = MLOptions
3602 if output_path:
3603 run_options['output_path'] = output_path
3604 else:
3605 run_options['output_path'] = cmd._mgme_dir
3606
3607
3608 run_options['has_FRdecay'] = has_FRdecay
3609
3610
3611 if 'cached_widths' not in run_options:
3612 run_options['cached_widths'] = {}
3613
3614
3615 run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
3616
3617 if options['tweak']['name']:
3618 logger.info("Now running the CMS check for tweak '%s'"\
3619 %options['tweak']['name'])
3620
3621 model = multiprocess_nwa.get('model')
3622
3623 for particle in model.get('particles'):
3624 mass_param = model.get_parameter(particle.get('mass'))
3625 if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
3626 if model.get('name') not in ['sm','loop_sm']:
3627 logger.warning("The mass '%s' of particle '%s' is not an external"%\
3628 (model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
3629 " parameter as required by this check. \nMG5_aMC will try to"+\
3630 " modify the model to remedy the situation. No guarantee.")
3631 status = model.change_electroweak_mode(set(['mz','mw','alpha']))
3632 if not status:
3633 raise InvalidCmd('The EW scheme could apparently not be changed'+\
3634 ' so as to have the W-boson mass external. The check cannot'+\
3635 ' proceed.')
3636 break
3637
3638 veto_orders = [order for order in model.get('coupling_orders') if \
3639 order not in options['expansion_orders']]
3640 if len(veto_orders)>0:
3641 logger.warning('You did not define any parameter scaling rule for the'+\
3642 " coupling orders %s. They will be "%','.join(veto_orders))+\
3643 "forced to zero in the tests. Consider adding the scaling rule to"+\
3644 "avoid this. (see option '--cms' in 'help check')"
3645 for order in veto_orders:
3646 multiprocess_nwa.get('orders')[order]==0
3647 multiprocess_nwa.set('perturbation_couplings', [order for order in
3648 multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
3649
3650 if multiprocess_nwa.get('perturbation_couplings')==[]:
3651 evaluator = MatrixElementEvaluator(model, param_card,
3652 cmd=cmd,auth_skipping = False, reuse = True)
3653 else:
3654 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3655 cmd=cmd, model=model,
3656 param_card=param_card,
3657 auth_skipping = False,
3658 output_path=output_path,
3659 reuse = False)
3660
3661 cached_information = []
3662 output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3663 multiprocess_nwa,
3664 evaluator,
3665
3666
3667
3668
3669
3670 opt = cached_information,
3671 options=run_options)
3672
3673
3674 clean_added_globals(ADDED_GLOBAL)
3675
3676
3677 cmd.do_set('complex_mass_scheme True', log=False)
3678
3679
3680 multiprocess_cms = cmd.extract_process(process_line)
3681 model = multiprocess_cms.get('model')
3682
3683 if len(veto_orders)>0:
3684 for order in veto_orders:
3685 multiprocess_cms.get('orders')[order]==0
3686 multiprocess_cms.set('perturbation_couplings', [order for order in
3687 multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
3688
3689 if multiprocess_cms.get('perturbation_couplings')==[]:
3690 evaluator = MatrixElementEvaluator(model, param_card,
3691 cmd=cmd,auth_skipping = False, reuse = True)
3692 else:
3693 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3694 cmd=cmd, model=model,
3695 param_card=param_card,
3696 auth_skipping = False,
3697 output_path=output_path,
3698 reuse = False)
3699
3700 output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3701 multiprocess_cms,
3702 evaluator,
3703
3704 opt = dict(cached_information),
3705 options=run_options)
3706
3707 if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
3708
3709 clean_up(output_path)
3710
3711
3712
3713
3714 result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
3715
3716 result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
3717 for i, proc_res in enumerate(output_nwa):
3718 result['ordered_processes'].append(proc_res[0])
3719 result[proc_res[0]] = {
3720 'NWA':proc_res[1]['resonances_result'],
3721 'CMS':output_cms[i][1]['resonances_result'],
3722 'born_order':proc_res[1]['born_order'],
3723 'loop_order':proc_res[1]['loop_order']}
3724
3725
3726
3727 options['cached_widths'] = run_options['cached_widths']
3728
3729
3730 result['recompute_width'] = options['recompute_width']
3731 result['has_FRdecay'] = has_FRdecay
3732 result['widths_computed'] = []
3733 cached_widths = sorted(options['cached_widths'].items(), key=lambda el: \
3734 abs(el[0][0]))
3735 for (pdg, lambda_value), width in cached_widths:
3736 if lambda_value != 1.0:
3737 continue
3738 result['widths_computed'].append((model.get_particle(pdg).get_name(),
3739 width))
3740
3741
3742 clean_added_globals(ADDED_GLOBAL)
3743
3744 return result
3745
3750 """Check CMS for the process in argument. The options 'opt' is quite important.
3751 When opt is a list, it means that we are doing NWA and we are filling the
3752 list with the following tuple
3753 ('proc_name',({'ParticlePDG':ParticlePDG,
3754 'FinalStateMothersNumbers':set([]),
3755 'PS_point_used':[]},...))
3756 When opt is a dictionary, we are in the CMS mode and it will be reused then.
3757 """
3758
3759
3760
3761 NLO = process.get('perturbation_couplings') != []
3762
3763 def glue_momenta(production, decay):
3764 """ Merge together the kinematics for the production of particle
3765 positioned last in the 'production' array with the 1>N 'decay' kinematic'
3766 provided where the decay particle is first."""
3767
3768 from MadSpin.decay import momentum
3769
3770 full = production[:-1]
3771
3772
3773
3774
3775
3776 for p in decay[1:]:
3777 bp = momentum(*p).boost(momentum(*production[-1]))
3778 full.append([bp.E,bp.px,bp.py,bp.pz])
3779
3780 return full
3781
3782 def find_resonances(diagrams):
3783 """ Find all the resonances in the matrix element in argument """
3784
3785 model = process['model']
3786 resonances_found = []
3787
3788 for ll, diag in enumerate(diagrams):
3789 for amp in diag.get('amplitudes'):
3790
3791
3792 s_channels, t_channels = amp.\
3793 get_s_and_t_channels(process.get_ninitial(), model, 0)
3794
3795
3796 replacement_dict = {}
3797 for s_channel in s_channels:
3798 new_resonance = {
3799 'ParticlePDG':s_channel.get('legs')[-1].get('id'),
3800 'FSMothersNumbers':[],
3801 'PS_point_used':[]}
3802 for leg in s_channel.get('legs')[:-1]:
3803 if leg.get('number')>0:
3804 new_resonance['FSMothersNumbers'].append(
3805 leg.get('number'))
3806 else:
3807 try:
3808 new_resonance['FSMothersNumbers'].extend(
3809 replacement_dict[leg.get('number')])
3810 except KeyError:
3811 raise Exception, 'The following diagram '+\
3812 'is malformed:'+diag.nice_string()
3813
3814 replacement_dict[s_channel.get('legs')[-1].get('number')] = \
3815 new_resonance['FSMothersNumbers']
3816 new_resonance['FSMothersNumbers'] = set(
3817 new_resonance['FSMothersNumbers'])
3818 if new_resonance not in resonances_found:
3819 resonances_found.append(new_resonance)
3820
3821
3822 kept_resonances = []
3823 for resonance in resonances_found:
3824
3825 if resonance['ParticlePDG'] == 0:
3826 continue
3827
3828
3829 if abs(resonance['ParticlePDG']) in \
3830 [abs(l.get('id')) for l in process.get('legs')]:
3831 continue
3832
3833 mass_string = evaluator.full_model.get_particle(
3834 resonance['ParticlePDG']).get('mass')
3835 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
3836
3837 if mass==0.0:
3838 continue
3839
3840 width_string = evaluator.full_model.get_particle(
3841 resonance['ParticlePDG']).get('width')
3842 width = evaluator.full_model.get('parameter_dict')[width_string].real
3843
3844
3845 if width==0.0:
3846 continue
3847
3848 final_state_energy = sum(
3849 evaluator.full_model.get('parameter_dict')[
3850 evaluator.full_model.get_particle(l.get('id')).get('mass')].real
3851 for l in process.get('legs') if l.get('number') in
3852 resonance['FSMothersNumbers'])
3853
3854
3855 special_mass = (1.0 + options['offshellness'])*mass
3856
3857
3858 if special_mass<final_state_energy:
3859 raise InvalidCmd('The offshellness specified (%s) is such'\
3860 %options['offshellness']+' that the resulting kinematic is '+\
3861 'impossible for resonance %s %s.'%(evaluator.full_model.
3862 get_particle(resonance['ParticlePDG']).get_name(),
3863 str(list(resonance['FSMothersNumbers']))))
3864 continue
3865
3866
3867 kept_resonances.append(resonance)
3868
3869 for resonance in kept_resonances:
3870
3871 set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
3872
3873
3874
3875 return tuple(kept_resonances)
3876
3877 def set_PSpoint(resonance, force_other_res_offshell=[],
3878 allow_energy_increase=1.5, isolation_cuts=True):
3879 """ Starting from the specified resonance, construct a phase space point
3880 for it and possibly also enforce other resonances to be onshell. Possibly
3881 allow to progressively increase enregy by steps of the integer specified
3882 (negative float to forbid it) and possible enforce default isolation cuts
3883 as well."""
3884
3885 def invmass(momenta):
3886 """ Computes the invariant mass of a list of momenta."""
3887 ptot = [sum(p[i] for p in momenta) for i in range(4)]
3888 return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
3889
3890 model = evaluator.full_model
3891 def getmass(pdg):
3892 """ Returns the mass of a particle given the current model and its
3893 pdg given in argument."""
3894 return model.get('parameter_dict')[
3895 model.get_particle(pdg).get('mass')].real
3896
3897 N_trials = 0
3898 max_trial = 1e4
3899 nstep_for_energy_increase = 1e3
3900 PS_point_found = None
3901 if options['offshellness'] > 0.0:
3902 offshellness = options['offshellness']
3903 else:
3904
3905
3906
3907
3908 offshellness = (0.25*(options['offshellness']+1.0))-1.0
3909
3910
3911
3912
3913 if options['offshellness'] < 0.0:
3914 energy_increase = math.sqrt(allow_energy_increase)
3915 else:
3916 energy_increase = allow_energy_increase
3917
3918 other_res_offshell = [res for res in force_other_res_offshell if
3919 res!=resonance]
3920
3921
3922
3923 all_other_res_masses = [getmass(res['ParticlePDG'])
3924 for res in other_res_offshell]
3925 resonance_mass = getmass(resonance['ParticlePDG'])
3926
3927 str_res = '%s %s'%(model.get_particle(
3928 resonance['ParticlePDG']).get_name(),
3929 str(list(resonance['FSMothersNumbers'])))
3930 leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
3931
3932
3933
3934 daughter_masses = sum(getmass(leg_number_to_leg[\
3935 number].get('id')) for number in resonance['FSMothersNumbers'])
3936 min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
3937
3938
3939
3940 min_energy = max(sum(getmass(l.get('id')) for l in \
3941 process.get('legs') if l.get('state')==True),
3942 sum(getmass(l.get('id')) for l in \
3943 process.get('legs') if l.get('state')==False))
3944
3945
3946
3947 daughter_offshellnesses = [(1.0+options['offshellness'])*mass
3948 for i, mass in enumerate(all_other_res_masses) if
3949 other_res_offshell[i]['FSMothersNumbers'].issubset(
3950 resonance['FSMothersNumbers'])]
3951
3952 if options['offshellness'] >= 0.0:
3953
3954 if len(daughter_offshellnesses)>0:
3955 max_mass = max(daughter_offshellnesses)
3956
3957 offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
3958 options['offshellness'])
3959
3960 max_mass = max([(1.0+options['offshellness'])*mass for mass in \
3961 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
3962
3963
3964
3965 target = max(min_energy*1.2,max_mass*2.0)
3966 if target > options['energy']:
3967 logger.warning("The user-defined energy %f seems "%options['energy']+
3968 " insufficient to reach the minimum propagator invariant mass "+
3969 "%f required for the chosen offshellness %f."%(max_mass,
3970 options['offshellness']) + " Energy reset to %f."%target)
3971 options['energy'] = target
3972
3973 else:
3974 if len(daughter_offshellnesses) > 0:
3975 min_mass = min(daughter_offshellnesses)
3976
3977 offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
3978 options['offshellness'])
3979
3980
3981
3982 if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
3983 msg = 'The resonance %s cannot accomodate'%str_res+\
3984 ' an offshellness of %f because the daughter'%options['offshellness']+\
3985 ' masses are %f.'%daughter_masses
3986 if options['offshellness']<min_offshellnes:
3987 msg += ' Try again with an offshellness'+\
3988 ' smaller (in absolute value) of at least %f.'%min_offshellnes
3989 else:
3990 msg += ' Try again with a smalled offshellness (in absolute value).'
3991 raise InvalidCmd(msg)
3992
3993 min_mass = min([(1.0+options['offshellness'])*mass for mass in \
3994 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
3995
3996
3997 if 2.0*min_mass < options['energy']:
3998 new_energy = max(min_energy*1.2, 2.0*min_mass)
3999 logger.warning("The user-defined energy %f seems "%options['energy']+
4000 " too large to not overshoot the maximum propagator invariant mass "+
4001 "%f required for the chosen offshellness %f."%(min_mass,
4002 options['offshellness']) + " Energy reset to %f."%new_energy)
4003 options['energy'] = new_energy
4004
4005 if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
4006 logger.debug("The target energy is not compatible with the mass"+
4007 " of the external states for this process (%f). It is "%min_mass+
4008 "unlikely that a valid kinematic configuration will be found.")
4009
4010 if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
4011 options['offshellness']>0.0 and offshellness>options['offshellness']:
4012 logger.debug("Offshellness increased to %f"%offshellness+
4013 " so as to try to find a kinematical configuration with"+
4014 " offshellness at least equal to %f"%options['offshellness']+
4015 " for all resonances.")
4016
4017 start_energy = options['energy']
4018 while N_trials<max_trial:
4019 N_trials += 1
4020 if N_trials%nstep_for_energy_increase==0:
4021 if allow_energy_increase > 0.0:
4022 old_offshellness = offshellness
4023 if offshellness > 0.0:
4024 options['energy'] *= energy_increase
4025 offshellness *= energy_increase
4026 else:
4027 options['energy'] = max(options['energy']/energy_increase,
4028 min_energy*1.2)
4029 offshellness = max(min_offshellnes,
4030 ((offshellness+1.0)/energy_increase)-1.0)
4031 if old_offshellness!=offshellness:
4032 logger.debug('Trying to find a valid kinematic'+\
4033 " configuration for resonance '%s'"%str_res+\
4034 ' with increased offshellness %f'%offshellness)
4035
4036 candidate = get_PSpoint_for_resonance(resonance, offshellness)
4037 pass_offshell_test = True
4038 for i, res in enumerate(other_res_offshell):
4039
4040 if offshellness > 0.0:
4041 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
4042 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4043 pass_offshell_test = False
4044 break
4045 else:
4046 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
4047 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4048 pass_offshell_test = False
4049 break
4050 if not pass_offshell_test:
4051 continue
4052
4053 if isolation_cuts:
4054
4055 if not evaluator.pass_isolation_cuts(candidate,
4056 ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
4057 continue
4058 PS_point_found = candidate
4059 break
4060
4061
4062 options['energy'] = start_energy
4063
4064 if PS_point_found is None:
4065 err_msg = 'Could not find a valid PS point in %d'%max_trial+\
4066 ' trials. Try increasing the energy, modify the offshellness '+\
4067 'or relax some constraints.'
4068 if options['offshellness']<0.0:
4069 err_msg +='Try with a positive offshellness instead (or a '+\
4070 'negative one of smaller absolute value)'
4071 raise InvalidCmd, err_msg
4072 else:
4073
4074
4075 resonance['offshellnesses'] = []
4076 all_other_res_masses = [resonance_mass] + all_other_res_masses
4077 other_res_offshell = [resonance] + other_res_offshell
4078 for i, res in enumerate(other_res_offshell):
4079 if i==0:
4080 res_str = 'self'
4081 else:
4082 res_str = '%s %s'%(model.get_particle(
4083 res['ParticlePDG']).get_name(),
4084 str(list(res['FSMothersNumbers'])))
4085 resonance['offshellnesses'].append((res_str,(
4086 (invmass([PS_point_found[j-1] for j in
4087 res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
4088
4089 resonance['PS_point_used'] = PS_point_found
4090
4091 def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
4092 """ Assigns a kinematic configuration to the resonance dictionary
4093 given in argument."""
4094
4095
4096 mass_string = evaluator.full_model.get_particle(
4097 resonance['ParticlePDG']).get('mass')
4098 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
4099
4100
4101 special_mass = (1.0 + offshellness)*mass
4102
4103
4104 prod_proc = base_objects.Process({'legs':base_objects.LegList(
4105 copy.copy(leg) for leg in process.get('legs') if
4106 leg.get('number') not in resonance['FSMothersNumbers'])})
4107
4108
4109
4110 prod_proc.get('legs').append(base_objects.Leg({
4111 'number':max(l.get('number') for l in process.get('legs'))+1,
4112 'state':True,
4113 'id':0}))
4114
4115 decay_proc = base_objects.Process({'legs':base_objects.LegList(
4116 copy.copy(leg) for leg in process.get('legs') if leg.get('number')
4117 in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
4118
4119
4120
4121
4122 decay_proc.get('legs').insert(0,base_objects.Leg({
4123 'number':-1,
4124 'state':False,
4125 'id':0}))
4126 prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
4127 special_mass=special_mass)[0]
4128 decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
4129 special_mass=special_mass)[0]
4130 momenta = glue_momenta(prod_kinematic,decay_kinematic)
4131
4132
4133
4134 ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
4135 for i in range(len(prod_proc.get('legs'))-1)]
4136
4137 ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
4138 momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
4139
4140
4141 return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
4142
4143
4144
4145 @misc.mute_logger()
4146 def get_width(PDG, lambdaCMS, param_card):
4147 """ Returns the width to use for particle with absolute PDG 'PDG' and
4148 for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
4149
4150
4151
4152 if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
4153 return 0.0
4154
4155 particle = evaluator.full_model.get_particle(PDG)
4156
4157
4158
4159 if particle.get('ghost') or particle.get('goldstone'):
4160 return 0.0
4161
4162
4163 if particle.get('width')=='ZERO':
4164 return 0.0
4165
4166 if (PDG,lambdaCMS) in options['cached_widths']:
4167 return options['cached_widths'][(PDG,lambdaCMS)]
4168
4169 if options['recompute_width'] == 'never':
4170 width = evaluator.full_model.\
4171 get('parameter_dict')[particle.get('width')].real
4172 else:
4173
4174 if aloha.complex_mass:
4175 raise MadGraph5Error, "The width for particle with PDG %d and"%PDG+\
4176 " lambdaCMS=%f should have already been "%lambdaCMS+\
4177 "computed during the NWA run."
4178
4179
4180 if options['recompute_width'] in ['always','first_time']:
4181 particle_name = particle.get_name()
4182 with misc.TMP_directory(dir=options['output_path']) as path:
4183 param_card.write(pjoin(path,'tmp.dat'))
4184
4185
4186
4187 command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
4188 ' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
4189 ' --precision_channel=0.001'
4190
4191 param_card.write(pjoin(options['output_path'],'tmp.dat'))
4192
4193
4194
4195 orig_model = options['cmd']._curr_model
4196 orig_fortran_model = options['cmd']._curr_fortran_model
4197 options['cmd'].do_compute_widths(command, evaluator.full_model)
4198
4199 options['cmd']._curr_model = orig_model
4200 options['cmd']._curr_fortran_model = orig_fortran_model
4201
4202
4203 evaluator.full_model.set_parameters_and_couplings(
4204 param_card=param_card)
4205 try:
4206 tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
4207 except:
4208 raise MadGraph5Error, 'Error occured during width '+\
4209 'computation with command:\n compute_widths %s'%command
4210 width = tmp_param_card['decay'].get(PDG).value
4211
4212
4213
4214
4215
4216
4217
4218 if options['recompute_width'] in ['never','first_time']:
4219
4220 for lam in options['lambdaCMS']:
4221 options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
4222 else:
4223 options['cached_widths'][(PDG,lambdaCMS)] = width
4224
4225 return options['cached_widths'][(PDG,lambdaCMS)]
4226
4227 def get_order(diagrams, diagsName):
4228 """Compute the common summed of coupling orders used for this cms check
4229 in the diagrams specified. When inconsistency occurs, use orderName
4230 in the warning message if throwm."""
4231
4232 orders = set([])
4233 for diag in diagrams:
4234 diag_orders = diag.calculate_orders()
4235 orders.add(sum((diag_orders[order] if order in diag_orders else 0)
4236 for order in options['expansion_orders']))
4237 if len(orders)>1:
4238 logger.warning(msg%('%s '%diagsName,str(orders)))
4239 return min(list(orders))
4240 else:
4241 return list(orders)[0]
4242
4243 MLoptions = copy.copy(options['MLOptions'])
4244
4245 MLoptions['DoubleCheckHelicityFilter'] = False
4246
4247
4248 for tweak in options['tweak']['custom']:
4249 if tweak.startswith('seed'):
4250 try:
4251 new_seed = int(tweak[4:])
4252 except ValueError:
4253 raise MadGraph5Error, "Seed '%s' is not of the right format 'seed<int>'."%tweak
4254 random.seed(new_seed)
4255
4256 mode = 'CMS' if aloha.complex_mass else 'NWA'
4257 for i, leg in enumerate(process.get('legs')):
4258 leg.set('number', i+1)
4259
4260 logger.info("Running CMS check for process %s (now doing %s scheme)" % \
4261 ( process.nice_string().replace('Process:', 'process'), mode))
4262
4263 proc_dir = None
4264 resonances = None
4265 warning_msg = "All %sdiagrams do not share the same sum of orders "+\
4266 "%s; found %%s."%(','.join(options['expansion_orders']))+\
4267 " This potentially problematic for the CMS check."
4268 if NLO:
4269
4270
4271
4272 if options['name']=='auto':
4273 proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
4274 temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
4275 ('_' if process.get('perturbation_couplings') else '')+
4276 '_'.join(process.get('perturbation_couplings')),mode)
4277 else:
4278 proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
4279 temp_dir_prefix,options['name'], mode)
4280
4281 timing, matrix_element = generate_loop_matrix_element(process,
4282 options['reuse'], output_path=options['output_path'],
4283 cmd = options['cmd'], proc_name=proc_name,
4284 loop_filter=options['loop_filter'])
4285 if matrix_element is None:
4286
4287 return None
4288
4289 reusing = isinstance(matrix_element, base_objects.Process)
4290 proc_dir = pjoin(options['output_path'],proc_name)
4291
4292
4293 infos = evaluator.setup_process(matrix_element, proc_dir,
4294 reusing = reusing, param_card = options['param_card'],
4295 MLOptions=MLoptions)
4296
4297 evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
4298 mp = None, loop_filter = True,MLOptions=MLoptions)
4299
4300
4301 tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
4302 if os.path.isfile(tmp_card_backup):
4303
4304 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4305 " Now reverting 'param_card.dat' to its original value.")
4306 shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
4307 else:
4308
4309 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
4310
4311 tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
4312 'model_functions.f__TemporaryBackup__')
4313 if os.path.isfile(tmp_modelfunc_backup):
4314
4315 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4316 " Now reverting 'model_functions.f' to its original value.")
4317 shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
4318 'model_functions.f'))
4319 evaluator.apply_log_tweak(proc_dir, 'recompile')
4320 else:
4321
4322 shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
4323 tmp_modelfunc_backup)
4324
4325
4326 MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
4327 read_ps = True, npoints = 1, hel_config = options['helicity'],
4328 split_orders=options['split_orders'])
4329
4330
4331
4332 for dir in glob.glob(pjoin(proc_dir,'SubProcesses','P*_*')):
4333 if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
4334 continue
4335 try:
4336 os.remove(pjoin(dir,'check'))
4337 os.remove(pjoin(dir,'check_sa.o'))
4338 except OSError:
4339 pass
4340
4341 with open(os.devnull, 'w') as devnull:
4342 retcode = subprocess.call(['make','check'],
4343 cwd=dir, stdout=devnull, stderr=devnull)
4344 if retcode != 0:
4345 raise MadGraph5Error, "Compilation error with "+\
4346 "'make check' in %s"%dir
4347
4348
4349 pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
4350 if reusing:
4351
4352
4353 if not os.path.isfile(pkl_path):
4354 raise InvalidCmd('The folder %s could'%proc_dir+\
4355 " not be reused because the resonance specification file "+
4356 "'resonance_specs.pkl' is missing.")
4357 else:
4358 proc_name, born_order, loop_order, resonances = \
4359 save_load_object.load_from_file(pkl_path)
4360
4361
4362 for res in resonances:
4363 set_PSpoint(res, force_other_res_offshell=resonances)
4364
4365
4366 if isinstance(opt, list):
4367 opt.append((proc_name, resonances))
4368 else:
4369 resonances = opt
4370 else:
4371 helas_born_diagrams = matrix_element.get_born_diagrams()
4372 if len(helas_born_diagrams)==0:
4373 logger.warning('The CMS check for loop-induced process is '+\
4374 'not yet available (nor is it very interesting).')
4375 return None
4376 born_order = get_order(helas_born_diagrams,'Born')
4377 loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
4378
4379
4380 if isinstance(opt, list):
4381 opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
4382 resonances = opt[-1][1]
4383 else:
4384 resonances = opt
4385
4386
4387 save_load_object.save_to_file(pkl_path, (process.base_string(),
4388 born_order, loop_order,resonances))
4389
4390 else:
4391
4392 try:
4393 amplitude = diagram_generation.Amplitude(process)
4394 except InvalidCmd:
4395 logging.info("No diagrams for %s" % \
4396 process.nice_string().replace('Process', 'process'))
4397 return None
4398 if not amplitude.get('diagrams'):
4399
4400 logging.info("No diagrams for %s" % \
4401 process.nice_string().replace('Process', 'process'))
4402 return None
4403
4404 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4405 gen_color=True)
4406 diagrams = matrix_element.get('diagrams')
4407 born_order = get_order(diagrams,'Born')
4408
4409 loop_order = -1
4410
4411 if isinstance(opt, list):
4412 opt.append((process.base_string(),find_resonances(diagrams)))
4413 resonances = opt[-1][1]
4414 else:
4415 resonances= opt
4416
4417 if len(resonances)==0:
4418 logger.info("No resonance found for process %s."\
4419 %process.base_string())
4420 return None
4421
4422
4423 if not options['cached_param_card'][mode][0]:
4424 if NLO:
4425 param_card = check_param_card.ParamCard(
4426 pjoin(proc_dir,'Cards','param_card.dat'))
4427 else:
4428 param_card = check_param_card.ParamCard(
4429 StringIO.StringIO(evaluator.full_model.write_param_card()))
4430 options['cached_param_card'][mode][0] = param_card
4431 name2block, _ = param_card.analyze_param_card()
4432 options['cached_param_card'][mode][1] = name2block
4433
4434 else:
4435 param_card = options['cached_param_card'][mode][0]
4436 name2block = options['cached_param_card'][mode][1]
4437
4438
4439 if loop_order != -1 and (loop_order+born_order)%2 != 0:
4440 raise MadGraph5Error, 'The summed squared matrix element '+\
4441 " order '%d' is not even."%(loop_order+born_order)
4442 result = {'born_order':born_order,
4443 'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
4444 'resonances_result':[]}
4445
4446
4447 if NLO:
4448 try:
4449 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
4450 pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
4451 except:
4452 pass
4453
4454
4455 had_log_tweaks=False
4456 if NLO:
4457 for tweak in options['tweak']['custom']:
4458 if tweak.startswith('seed'):
4459 continue
4460 try:
4461 logstart, logend = tweak.split('->')
4462 except:
4463 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4464 if logstart in ['logp','logm', 'log'] and \
4465 logend in ['logp','logm', 'log']:
4466 if NLO:
4467 evaluator.apply_log_tweak(proc_dir, [logstart, logend])
4468 had_log_tweaks = True
4469 else:
4470 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4471 if had_log_tweaks:
4472 evaluator.apply_log_tweak(proc_dir, 'recompile')
4473
4474
4475 if options['resonances']=='all':
4476 resonances_to_run = resonances
4477 elif isinstance(options['resonances'],int):
4478 resonances_to_run = resonances[:options['resonances']]
4479 elif isinstance(options['resonances'],list):
4480 resonances_to_run = []
4481 for res in resonances:
4482 for res_selection in options['resonances']:
4483 if abs(res['ParticlePDG'])==res_selection[0] and \
4484 res['FSMothersNumbers']==set(res_selection[1]):
4485 resonances_to_run.append(res)
4486 break
4487 else:
4488 raise InvalidCmd("Resonance selection '%s' not reckognized"%\
4489 str(options['resonances']))
4490
4491
4492
4493 if NLO and options['show_plot']:
4494 widgets = ['ME evaluations:', pbar.Percentage(), ' ',
4495 pbar.Bar(),' ', pbar.ETA(), ' ']
4496 progress_bar = pbar.ProgressBar(widgets=widgets,
4497 maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
4498 progress_bar.update(0)
4499
4500 sys.stdout.flush()
4501 else:
4502 progress_bar = None
4503
4504 for resNumber, res in enumerate(resonances_to_run):
4505
4506
4507 result['resonances_result'].append({'resonance':res,'born':[]})
4508 if NLO:
4509 result['resonances_result'][-1]['finite'] = []
4510
4511 for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
4512
4513
4514 new_param_card = check_param_card.ParamCard(param_card)
4515
4516 for param, replacement in options['expansion_parameters'].items():
4517
4518
4519 orig_param = param.replace('__tmpprefix__','')
4520 if orig_param not in name2block:
4521
4522
4523
4524 continue
4525 for block, lhaid in name2block[orig_param]:
4526 orig_value = float(param_card[block].get(lhaid).value)
4527 new_value = eval(replacement,
4528 {param:orig_value,'lambdacms':lambdaCMS})
4529 new_param_card[block].get(lhaid).value=new_value
4530
4531
4532
4533
4534
4535
4536
4537
4538 evaluator.full_model.set_parameters_and_couplings(
4539 param_card=new_param_card)
4540
4541 for decay in new_param_card['decay'].keys():
4542 if mode=='CMS':
4543 new_width = get_width(abs(decay[0]), lambdaCMS,
4544 new_param_card)
4545 else:
4546 new_width = 0.0
4547 new_param_card['decay'].get(decay).value= new_width
4548
4549
4550 evaluator.full_model.set_parameters_and_couplings(
4551 param_card=new_param_card)
4552 if NLO:
4553 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4554
4555
4556 if lambdaCMS==1.0 and mode=='CMS' and \
4557 options['recompute_width'] in ['always','first_time']:
4558 new_param_card.write(pjoin(proc_dir,
4559 'Cards','param_card.dat_recomputed_widths'))
4560
4561
4562
4563 if mode=='NWA' and (options['recompute_width']=='always' or (
4564 options['recompute_width']=='first_time' and lambdaCMS==1.0)):
4565
4566 tmp_param_card = check_param_card.ParamCard(new_param_card)
4567
4568
4569 for decay in new_param_card['decay'].keys():
4570 particle_name = evaluator.full_model.get_particle(\
4571 abs(decay[0])).get_name()
4572 new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
4573 tmp_param_card['decay'].get(decay).value = new_width
4574 if not options['has_FRdecay'] and new_width != 0.0 and \
4575 (abs(decay[0]),lambdaCMS) not in options['cached_widths']:
4576 logger.info('Numerically computed width of particle'+\
4577 ' %s for lambda=%.4g : %-9.6gGeV'%
4578 (particle_name,lambdaCMS,new_width))
4579
4580
4581
4582 if lambdaCMS==1.0 and NLO:
4583 tmp_param_card.write(pjoin(proc_dir,
4584 'Cards','param_card.dat_recomputed_widths'))
4585
4586
4587 for param, replacement in options['tweak']['params'].items():
4588
4589
4590 orig_param = param.replace('__tmpprefix__','')
4591
4592 if orig_param.lower() == 'allwidths':
4593
4594 for decay in new_param_card['decay'].keys():
4595 orig_value = float(new_param_card['decay'].get(decay).value)
4596 new_value = eval(replacement,
4597 {param:orig_value,'lambdacms':lambdaCMS})
4598 new_param_card['decay'].get(decay).value = new_value
4599 continue
4600 if orig_param not in name2block:
4601
4602
4603 continue
4604 for block, lhaid in name2block[orig_param]:
4605 orig_value = float(new_param_card[block].get(lhaid).value)
4606 new_value = eval(replacement,
4607 {param:orig_value,'lambdacms':lambdaCMS})
4608 new_param_card[block].get(lhaid).value=new_value
4609
4610 if options['tweak']['params']:
4611
4612 evaluator.full_model.set_parameters_and_couplings(
4613 param_card=new_param_card)
4614 if NLO:
4615 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4616
4617
4618 if NLO:
4619 ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
4620 proc_dir, PSpoint=res['PS_point_used'], verbose=False,
4621 format='dict', skip_compilation=True)
4622
4623
4624
4625
4626 result['resonances_result'][-1]['born'].append(ME_res['born'])
4627 result['resonances_result'][-1]['finite'].append(
4628 ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
4629 else:
4630 ME_res = evaluator.evaluate_matrix_element(matrix_element,
4631 p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
4632 result['resonances_result'][-1]['born'].append(ME_res)
4633 if not progress_bar is None:
4634 progress_bar.update(resNumber*len(options['lambdaCMS'])+\
4635 (lambdaNumber+1))
4636
4637 sys.stdout.flush()
4638
4639
4640 log_reversed = False
4641 for tweak in options['tweak']['custom']:
4642 if tweak.startswith('log') and had_log_tweaks:
4643 if log_reversed:
4644 continue
4645 if NLO:
4646 evaluator.apply_log_tweak(proc_dir, 'default')
4647 evaluator.apply_log_tweak(proc_dir, 'recompile')
4648 log_reversed = True
4649
4650
4651 evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
4652 if NLO:
4653 try:
4654 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
4655 pjoin(proc_dir,'Cards','param_card.dat'))
4656 except:
4657 param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4658
4659
4660
4661 try:
4662 os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
4663 os.remove(pjoin(proc_dir,'Source','MODEL',
4664 'model_functions.f__TemporaryBackup__'))
4665 except:
4666 pass
4667
4668 return (process.nice_string().replace('Process:', '').strip(),result)
4669
4670 -def get_value(process, evaluator, p=None, options=None):
4671 """Return the value/momentum for a phase space point"""
4672
4673 for i, leg in enumerate(process.get('legs')):
4674 leg.set('number', i+1)
4675
4676 logger.info("Checking %s in %s gauge" % \
4677 ( process.nice_string().replace('Process:', 'process'),
4678 'unitary' if aloha.unitary_gauge else 'feynman'))
4679
4680 legs = process.get('legs')
4681
4682
4683 try:
4684 if process.get('perturbation_couplings')==[]:
4685 amplitude = diagram_generation.Amplitude(process)
4686 else:
4687 amplitude = loop_diagram_generation.LoopAmplitude(process)
4688 except InvalidCmd:
4689 logging.info("No diagrams for %s" % \
4690 process.nice_string().replace('Process', 'process'))
4691 return None
4692
4693 if not amplitude.get('diagrams'):
4694
4695 logging.info("No diagrams for %s" % \
4696 process.nice_string().replace('Process', 'process'))
4697 return None
4698
4699 if not p:
4700
4701 p, w_rambo = evaluator.get_momenta(process, options)
4702
4703
4704 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
4705 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4706 gen_color = True)
4707 else:
4708 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
4709 gen_color = True, optimized_output = evaluator.loop_optimized_output)
4710
4711 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
4712 output='jamp',options=options)
4713
4714 if mvalue and mvalue['m2']:
4715 return {'process':process.base_string(),'value':mvalue,'p':p}
4716
4718 """Present the results of a comparison in a nice list format for loop
4719 processes. It detail the results from each lorentz transformation performed.
4720 """
4721
4722 process = comparison_results[0]['process']
4723 results = comparison_results[0]['results']
4724
4725
4726 threshold_rotations = 1e-6
4727
4728
4729
4730 threshold_boosts = 1e-3
4731 res_str = "%s" % process.base_string()
4732
4733 transfo_col_size = 17
4734 col_size = 18
4735 transfo_name_header = 'Transformation name'
4736
4737 if len(transfo_name_header) + 1 > transfo_col_size:
4738 transfo_col_size = len(transfo_name_header) + 1
4739
4740 for transfo_name, value in results:
4741 if len(transfo_name) + 1 > transfo_col_size:
4742 transfo_col_size = len(transfo_name) + 1
4743
4744 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
4745 fixed_string_length("Value", col_size) + \
4746 fixed_string_length("Relative diff.", col_size) + "Result"
4747
4748 ref_value = results[0]
4749 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
4750 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
4751
4752
4753 all_pass = True
4754 for res in results[1:]:
4755 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
4756 threshold_rotations
4757 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
4758 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
4759 this_pass = rel_diff <= threshold
4760 if not this_pass:
4761 all_pass = False
4762 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
4763 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
4764 fixed_string_length("%1.10e" % rel_diff, col_size) + \
4765 ("Passed" if this_pass else "Failed")
4766 if all_pass:
4767 res_str += '\n' + 'Summary: passed'
4768 else:
4769 res_str += '\n' + 'Summary: failed'
4770
4771 return res_str
4772
4774 """Present the results of a comparison in a nice list format
4775 if output='fail' return the number of failed process -- for test--
4776 """
4777
4778
4779 if comparison_results[0]['process']['perturbation_couplings']!=[]:
4780 return output_lorentz_inv_loop(comparison_results, output)
4781
4782 proc_col_size = 17
4783
4784 threshold=1e-10
4785 process_header = "Process"
4786
4787 if len(process_header) + 1 > proc_col_size:
4788 proc_col_size = len(process_header) + 1
4789
4790 for proc, values in comparison_results:
4791 if len(proc) + 1 > proc_col_size:
4792 proc_col_size = len(proc) + 1
4793
4794 col_size = 18
4795
4796 pass_proc = 0
4797 fail_proc = 0
4798 no_check_proc = 0
4799
4800 failed_proc_list = []
4801 no_check_proc_list = []
4802
4803 res_str = fixed_string_length(process_header, proc_col_size) + \
4804 fixed_string_length("Min element", col_size) + \
4805 fixed_string_length("Max element", col_size) + \
4806 fixed_string_length("Relative diff.", col_size) + \
4807 "Result"
4808
4809 for one_comp in comparison_results:
4810 proc = one_comp['process'].base_string()
4811 data = one_comp['results']
4812
4813 if data == 'pass':
4814 no_check_proc += 1
4815 no_check_proc_list.append(proc)
4816 continue
4817
4818 values = [data[i]['m2'] for i in range(len(data))]
4819
4820 min_val = min(values)
4821 max_val = max(values)
4822 diff = (max_val - min_val) / abs(max_val)
4823
4824 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4825 fixed_string_length("%1.10e" % min_val, col_size) + \
4826 fixed_string_length("%1.10e" % max_val, col_size) + \
4827 fixed_string_length("%1.10e" % diff, col_size)
4828
4829 if diff < threshold:
4830 pass_proc += 1
4831 proc_succeed = True
4832 res_str += "Passed"
4833 else:
4834 fail_proc += 1
4835 proc_succeed = False
4836 failed_proc_list.append(proc)
4837 res_str += "Failed"
4838
4839
4840
4841
4842
4843 if len(data[0]['jamp'])!=0:
4844 for k in range(len(data[0]['jamp'][0])):
4845 sum = [0] * len(data)
4846
4847 for j in range(len(data[0]['jamp'])):
4848
4849 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4850 sum = [sum[i] + values[i] for i in range(len(values))]
4851
4852
4853 min_val = min(sum)
4854 max_val = max(sum)
4855 if not max_val:
4856 continue
4857 diff = (max_val - min_val) / max_val
4858
4859 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
4860 fixed_string_length("%1.10e" % min_val, col_size) + \
4861 fixed_string_length("%1.10e" % max_val, col_size) + \
4862 fixed_string_length("%1.10e" % diff, col_size)
4863
4864 if diff > 1e-10:
4865 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4866 fail_proc += 1
4867 pass_proc -= 1
4868 failed_proc_list.append(proc)
4869 res_str += tmp_str + "Failed"
4870 elif not proc_succeed:
4871 res_str += tmp_str + "Passed"
4872
4873
4874
4875 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
4876 (pass_proc, pass_proc + fail_proc,
4877 fail_proc, pass_proc + fail_proc)
4878
4879 if fail_proc != 0:
4880 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
4881 if no_check_proc:
4882 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
4883
4884 if output == 'text':
4885 return res_str
4886 else:
4887 return fail_proc
4888
4890 """Present the results of a comparison in a nice list format
4891 if output='fail' return the number of failed process -- for test--
4892 """
4893
4894 proc_col_size = 17
4895
4896
4897
4898 pert_coupl = comparison_results[0]['perturbation_couplings']
4899 comparison_results = comparison_results[1:]
4900
4901 if pert_coupl:
4902 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
4903 else:
4904 process_header = "Process"
4905
4906 if len(process_header) + 1 > proc_col_size:
4907 proc_col_size = len(process_header) + 1
4908
4909 for data in comparison_results:
4910 proc = data['process']
4911 if len(proc) + 1 > proc_col_size:
4912 proc_col_size = len(proc) + 1
4913
4914 pass_proc = 0
4915 fail_proc = 0
4916 no_check_proc = 0
4917
4918 failed_proc_list = []
4919 no_check_proc_list = []
4920
4921 col_size = 18
4922
4923 res_str = fixed_string_length(process_header, proc_col_size) + \
4924 fixed_string_length("Unitary", col_size) + \
4925 fixed_string_length("Feynman", col_size) + \
4926 fixed_string_length("Relative diff.", col_size) + \
4927 "Result"
4928
4929 for one_comp in comparison_results:
4930 proc = one_comp['process']
4931 data = [one_comp['value_unit'], one_comp['value_feynm']]
4932
4933
4934 if data[0] == 'pass':
4935 no_check_proc += 1
4936 no_check_proc_list.append(proc)
4937 continue
4938
4939 values = [data[i]['m2'] for i in range(len(data))]
4940
4941 min_val = min(values)
4942 max_val = max(values)
4943 diff = (max_val - min_val) / max_val
4944
4945 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4946 fixed_string_length("%1.10e" % values[0], col_size) + \
4947 fixed_string_length("%1.10e" % values[1], col_size) + \
4948 fixed_string_length("%1.10e" % diff, col_size)
4949
4950 if diff < 1e-8:
4951 pass_proc += 1
4952 proc_succeed = True
4953 res_str += "Passed"
4954 else:
4955 fail_proc += 1
4956 proc_succeed = False
4957 failed_proc_list.append(proc)
4958 res_str += "Failed"
4959
4960
4961
4962
4963
4964 if len(data[0]['jamp'])>0:
4965 for k in range(len(data[0]['jamp'][0])):
4966 sum = [0, 0]
4967
4968 for j in range(len(data[0]['jamp'])):
4969
4970 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4971 sum = [sum[i] + values[i] for i in range(len(values))]
4972
4973
4974 min_val = min(sum)
4975 max_val = max(sum)
4976 if not max_val:
4977 continue
4978 diff = (max_val - min_val) / max_val
4979
4980 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
4981 fixed_string_length("%1.10e" % sum[0], col_size) + \
4982 fixed_string_length("%1.10e" % sum[1], col_size) + \
4983 fixed_string_length("%1.10e" % diff, col_size)
4984
4985 if diff > 1e-10:
4986 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4987 fail_proc += 1
4988 pass_proc -= 1
4989 failed_proc_list.append(proc)
4990 res_str += tmp_str + "Failed"
4991 elif not proc_succeed:
4992 res_str += tmp_str + "Passed"
4993
4994
4995
4996 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
4997 (pass_proc, pass_proc + fail_proc,
4998 fail_proc, pass_proc + fail_proc)
4999
5000 if fail_proc != 0:
5001 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5002 if no_check_proc:
5003 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5004
5005
5006 if output == 'text':
5007 return res_str
5008 else:
5009 return fail_proc
5010
5011 -def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
5012 """Creates a suitable filename for saving these results."""
5013
5014 if opts['name']=='auto' and opts['analyze']!='None':
5015
5016 return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
5017 [0],extension)
5018
5019 if opts['name']!='auto':
5020 basename = opts['name']
5021 else:
5022 prefix = 'cms_check_'
5023
5024 if len(cms_res['ordered_processes'])==1:
5025 proc = cms_res['ordered_processes'][0]
5026 replacements = {' ':'','+':'p','-':'m','~':'x', '>':'_','=':'eq'}
5027
5028 try:
5029 proc=proc[:proc.index('[')]
5030 except ValueError:
5031 pass
5032
5033 for key, value in replacements.items():
5034 proc = proc.replace(key,value)
5035
5036 basename =prefix+proc+'_%s_'%used_model.get('name')+\
5037 ( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
5038 cms_res['perturbation_orders']!=[] else '')
5039
5040 else:
5041 basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
5042
5043 suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
5044 if output_path:
5045 return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
5046 else:
5047 return '%s%s.%s'%(basename,suffix,extension)
5048
5050 """ Outputs nicely the outcome of the complex mass scheme check performed
5051 by varying the width in the offshell region of resonances found for eahc process.
5052 Output just specifies whether text should be returned or a list of failed
5053 processes. Use 'concise_text' for a consise report of the results."""
5054
5055 pert_orders=result['perturbation_orders']
5056
5057
5058
5059
5060
5061
5062
5063
5064
5065 diff_lambda_power = options['diff_lambda_power']
5066
5067
5068
5069
5070
5071
5072
5073 if 'has_FRdecay' in result:
5074 has_FRdecay = result['has_FRdecay']
5075 else:
5076 has_FRdecay = False
5077
5078 if not pert_orders:
5079 CMS_test_threshold = 1e-3
5080 else:
5081
5082
5083
5084
5085
5086
5087 if not has_FRdecay and ('recomputed_with' not in result or \
5088 result['recompute_width'] in ['always','first_time']):
5089 CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
5090 else:
5091
5092
5093 CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
5094
5095
5096
5097
5098 consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
5099
5100
5101 group_val = 3
5102
5103
5104
5105
5106 diff_zero_threshold = 1e-3
5107
5108
5109 lambda_range = options['lambda_plot_range']
5110
5111
5112
5113
5114
5115
5116
5117
5118
5119
5120 res_str = ''
5121
5122 concise_str = ''
5123 concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
5124 concise_repl_dict = {'Header':{'process':'Process',
5125 'asymptot':'Asymptot',
5126 'cms_check':'Deviation to asymptot',
5127 'status':'Result'}}
5128
5129
5130
5131
5132
5133 useLatexParticleName = 'built-in'
5134 name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
5135 'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
5136 'mu+':r'\mu^+',
5137 'mu-':r'\mu^-',
5138 'ta+':r'\tau^+',
5139 'ta-':r'\tau^-'}
5140 for p in ['e','m','t']:
5141 d = {'e':'e','m':r'\mu','t':r'\tau'}
5142 name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
5143 name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
5144
5145 for p in ['u','d','c','s','b','t']:
5146 name2tex[p]=p
5147 name2tex['%s~'%p]=r'\bar{%s}'%p
5148
5149 def format_particle_name(particle, latex=useLatexParticleName):
5150 p_name = particle
5151 if latex=='model':
5152 try:
5153 texname = model.get_particle(particle).get('texname')
5154 if texname and texname!='none':
5155 p_name = r'$\displaystyle %s$'%texname
5156 except:
5157 pass
5158 elif latex=='built-in':
5159 try:
5160 p_name = r'$\displaystyle %s$'%name2tex[particle]
5161 except:
5162 pass
5163 return p_name
5164
5165 def resonance_str(resonance, latex=useLatexParticleName):
5166 """ Provides a concise string to characterize the resonance """
5167 particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
5168 mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
5169 return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
5170 ','.join(mothersID))
5171
5172 def format_title(process, resonance):
5173 """ Format the plot title given the process and resonance """
5174
5175 process_string = []
5176 for particle in process.split():
5177 if particle=='$$':
5178 process_string.append(r'\$\$')
5179 continue
5180 if particle=='>':
5181 process_string.append(r'$\displaystyle \rightarrow$')
5182 continue
5183 process_string.append(format_particle_name(particle))
5184
5185 if resonance=='':
5186 return r'CMS check for %s' %(' '.join(process_string))
5187 else:
5188 return r'CMS check for %s ( resonance %s )'\
5189 %(' '.join(process_string),resonance)
5190
5191 def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
5192 proc=None, res=None):
5193 """ Guess the lambda scaling from a list of ME values and return it.
5194 Also compare with the expected result if specified and trigger a
5195 warning if not in agreement."""
5196
5197 bpowers = []
5198 for i, lambdaCMS in enumerate(lambda_values[1:]):
5199 bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
5200 lambda_values[0]/lambdaCMS)))
5201
5202
5203 bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
5204 key = lambda elem: elem[1], reverse=True)[0][0]
5205 if not expected:
5206 return bpower
5207 if bpower != expected:
5208 logger.warning('The apparent scaling of the squared amplitude'+
5209 'seems inconsistent w.r.t to detected value '+
5210 '(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
5211 ' This happend for process %s and resonance %s'%(proc, res))
5212 return bpower
5213
5214 def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
5215 """ Checks if the values passed in argument are stable and return the
5216 stability check outcome warning if it is not precise enough. """
5217 values = sorted([
5218 abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
5219 i, val in enumerate(ME_values)])
5220 median = values[len(values)//2]
5221 max_diff = max(abs(values[0]-median),abs(values[-1]-median))
5222 stability = max_diff/median
5223 stab_threshold = 1e-2
5224 if stability >= stab_threshold:
5225 return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
5226 %(values_name, stability)
5227 else:
5228 return None
5229
5230 if options['analyze']=='None':
5231 if options['reuse']:
5232 save_path = CMS_save_path('pkl', result, model, options,
5233 output_path=output_path)
5234 buff = "\nThe results of this check have been stored on disk and its "+\
5235 "analysis can be rerun at anytime with the MG5aMC command:\n "+\
5236 " check cms --analyze=%s\n"%save_path
5237 res_str += buff
5238 concise_str += buff
5239 save_load_object.save_to_file(save_path, result)
5240 elif len(result['ordered_processes'])>0:
5241 buff = "\nUse the following synthax if you want to store "+\
5242 "the raw results on disk.\n"+\
5243 " check cms -reuse <proc_def> <options>\n"
5244 res_str += buff
5245 concise_str += buff
5246
5247
5248
5249
5250
5251 checks = []
5252 for process in result['ordered_processes']:
5253 checks.extend([(process,resID) for resID in \
5254 range(len(result[process]['CMS']))])
5255
5256 if options['reuse']:
5257 logFile = open(CMS_save_path(
5258 'log', result, model, options, output_path=output_path),'w')
5259
5260 lambdaCMS_list=result['lambdaCMS']
5261
5262
5263 failed_procs = []
5264
5265
5266 bar = lambda char: char*47
5267
5268
5269 if 'widths_computed' in result:
5270 res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
5271 if result['recompute_width'] == 'never':
5272 res_str += '| Widths extracted from the param_card.dat'
5273 else:
5274 res_str += '| Widths computed %s'%('analytically' if has_FRdecay
5275 else 'numerically')
5276 if result['recompute_width'] == 'first_time':
5277 res_str += ' for \lambda = 1'
5278 elif result['recompute_width'] == 'always':
5279 res_str += ' for all \lambda values'
5280 res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
5281 for particle_name, width in result['widths_computed']:
5282 res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
5283 res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
5284
5285
5286
5287
5288 nstab_points=group_val
5289
5290 differences_target = {}
5291 for process, resID in checks:
5292
5293
5294 concise_repl_dict[process] = {'process':process,
5295 'asymptot':'N/A',
5296 'cms_check':'N/A',
5297 'status':'N/A'}
5298 proc_res = result[process]
5299 cms_res = proc_res['CMS'][resID]
5300 nwa_res = proc_res['NWA'][resID]
5301 resonance = resonance_str(cms_res['resonance'], latex='none')
5302 cms_born=cms_res['born']
5303 nwa_born=nwa_res['born']
5304
5305 res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
5306
5307 proc_title = "%s (resonance %s)"%(process,resonance)
5308 centering = (bar(2)+8-len(proc_title))//2
5309 res_str += "%s%s\n"%(' '*centering,proc_title)
5310
5311 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5312
5313
5314 if diff_lambda_power!=1:
5315 res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
5316 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5317
5318 born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
5319 expected=proc_res['born_order'], proc=process, res=resonance)
5320 stab_cms_born = check_stability(cms_born[-nstab_points:],
5321 lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
5322 if stab_cms_born:
5323 res_str += stab_cms_born
5324 stab_nwa_born = check_stability(nwa_born[-nstab_points:],
5325 lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
5326 if stab_nwa_born:
5327 res_str += stab_nwa_born
5328
5329 res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
5330 for i, p in enumerate(cms_res['resonance']['PS_point_used']):
5331 res_str += " | p%-2.d = "%(i+1)
5332 for pi in p:
5333 res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
5334 res_str += "\n"
5335
5336 res_str += "== Offshellnesses of all detected resonances\n"
5337 for res_name, offshellness in cms_res['resonance']['offshellnesses']:
5338 res_str += " | %-15s = %f\n"%(res_name, offshellness)
5339 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5340
5341 if not pert_orders:
5342 res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
5343 else:
5344 cms_finite=cms_res['finite']
5345 nwa_finite=nwa_res['finite']
5346 loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
5347 expected=proc_res['loop_order'], proc=process, res=resonance)
5348 res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
5349 %(born_power,loop_power)
5350 stab_cms_finite = check_stability(cms_finite[-nstab_points:],
5351 lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
5352 if stab_cms_finite:
5353 res_str += stab_cms_finite
5354 stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
5355 lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
5356 if stab_nwa_finite:
5357 res_str += stab_nwa_finite
5358
5359 CMSData = []
5360 NWAData = []
5361 DiffData = []
5362 for idata, lam in enumerate(lambdaCMS_list):
5363 if not pert_orders:
5364 new_cms=cms_born[idata]/(lam**born_power)
5365 new_nwa=nwa_born[idata]/(lam**born_power)
5366 else:
5367 new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
5368 new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
5369 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5370 CMSData.append(new_cms)
5371 NWAData.append(new_nwa)
5372 DiffData.append(new_diff)
5373
5374
5375
5376
5377
5378
5379 trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
5380 low_diff_median = sorted(DiffData[trim_range:-trim_range])\
5381 [(len(DiffData)-2*trim_range)//2]
5382
5383
5384
5385
5386
5387
5388 current_median = 0
5389
5390 scan_index = 0
5391 reference = abs(sorted(NWAData)[len(NWAData)//2])
5392 if low_diff_median!= 0.0:
5393 if abs(reference/low_diff_median)<diff_zero_threshold:
5394 reference = abs(low_diff_median)
5395 while True:
5396 scanner = DiffData[scan_index:group_val+scan_index]
5397 current_median = sorted(scanner)[len(scanner)//2]
5398
5399
5400 if abs(current_median-low_diff_median)/reference<\
5401 consideration_threshold:
5402 break;
5403 scan_index += 1
5404 if (group_val+scan_index)>=len(DiffData):
5405
5406
5407 logger.warning('The median scanning failed during the CMS check '+
5408 'for process %s'%proc_title+\
5409 'This is means that the difference plot has not stable'+\
5410 'intermediate region and MG5_aMC will arbitrarily consider the'+\
5411 'left half of the values.')
5412 scan_index = -1
5413 break;
5414
5415 if scan_index == -1:
5416 cms_check_data_range = len(DiffData)//2
5417 else:
5418 cms_check_data_range = scan_index + group_val
5419
5420 res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
5421 %(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
5422 len(lambdaCMS_list)-scan_index)
5423
5424 CMScheck_values = DiffData[cms_check_data_range:]
5425
5426
5427
5428
5429 if scan_index >= 0:
5430
5431 scan_index = len(CMScheck_values)
5432 used_group_val = max(3,group_val)
5433 unstability_found = True
5434 while True:
5435 scanner = CMScheck_values[scan_index-used_group_val:scan_index]
5436 maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
5437 if maxdiff/reference<consideration_threshold:
5438 break;
5439 if (scan_index-used_group_val)==0:
5440
5441
5442 unstability_found = False
5443 break;
5444
5445 scan_index -= 1
5446
5447
5448 if unstability_found:
5449 unstab_check=CMScheck_values[scan_index:]
5450 relative_array = [val > CMScheck_values[scan_index-1] for
5451 val in unstab_check]
5452 upper = relative_array.count(True)
5453 lower = relative_array.count(False)
5454 if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
5455 logger.warning(
5456 """For process %s, a numerically unstable region was detected starting from lambda < %.1e.
5457 Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
5458 If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
5459 minimum value of lambda to be considered in the CMS check."""\
5460 %(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
5461
5462
5463
5464
5465 scan_index = 0
5466 max_diff = 0.0
5467 res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
5468 %('%.3g'%reference)
5469 res_str += "== Asymptotic difference value detected = %s\n"\
5470 %('%.3g'%low_diff_median)
5471 concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
5472
5473
5474 differences_target[(process,resID)]= low_diff_median
5475
5476 while True:
5477 current_vals = CMScheck_values[scan_index:scan_index+group_val]
5478 max_diff = max(max_diff, abs(low_diff_median-
5479 sorted(current_vals)[len(current_vals)//2])/reference)
5480 if (scan_index+group_val)>=len(CMScheck_values):
5481 break
5482 scan_index += 1
5483
5484
5485 cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
5486 CMS_test_threshold*100.0)
5487 res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
5488 concise_repl_dict[process]['cms_check'] = \
5489 "%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
5490
5491 if max_diff>CMS_test_threshold:
5492 failed_procs.append((process,resonance))
5493 res_str += "%s %s %s\n"%(bar('='),
5494 'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
5495 concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
5496 else 'Passed'
5497
5498 if output=='concise_text':
5499
5500 max_proc_size = max(
5501 [len(process) for process in result['ordered_processes']]+[10])
5502
5503 res_str = concise_str
5504 res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
5505 for process in result['ordered_processes']:
5506 res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
5507
5508 if len(checks):
5509 res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
5510 ('.\n' if not failed_procs else ', failed checks are for:\n')
5511 else:
5512 return "\nNo CMS check to perform, the process either has no diagram or does not "+\
5513 "not feature any massive s-channel resonance."
5514
5515 for process, resonance in failed_procs:
5516 res_str += "> %s, %s\n"%(process, resonance)
5517
5518 if output=='concise_text':
5519 res_str += '\nMore detailed information on this check available with the command:\n'
5520 res_str += ' MG5_aMC>display checks\n'
5521
5522
5523
5524
5525 if not options['show_plot']:
5526 if options['reuse']:
5527 logFile.write(res_str)
5528 logFile.close()
5529 if output.endswith('text'):
5530 return res_str
5531 else:
5532 return failed_procs
5533
5534 fig_output_file = CMS_save_path('pdf', result, model, options,
5535 output_path=output_path)
5536 base_fig_name = fig_output_file[:-4]
5537 suffix = 1
5538 while os.path.isfile(fig_output_file):
5539 fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
5540 suffix+=1
5541
5542 process_data_plot_dict={}
5543
5544
5545
5546 all_res = [(result, None)]
5547 for i, add_res in enumerate(options['analyze'].split(',')[1:]):
5548 specs =re.match(r'^(?P<filename>.*)\((?P<title>.*)\)$', add_res)
5549 if specs:
5550 filename = specs.group('filename')
5551 title = specs.group('title')
5552 else:
5553 filename = add_res
5554 title = '#%d'%(i+1)
5555
5556 new_result = save_load_object.load_from_file(filename)
5557 if new_result is None:
5558 raise InvalidCmd('The complex mass scheme check result'+
5559 " file below could not be read.\n %s"%filename)
5560 if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
5561 or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
5562 raise self.InvalidCmd('The complex mass scheme check result'+
5563 " file below does not seem compatible.\n %s"%filename)
5564 all_res.append((new_result,title))
5565
5566
5567 for process, resID in checks:
5568 data1=[]
5569 data2=[]
5570 info ={}
5571 for res in all_res:
5572 proc_res = res[0][process]
5573 cms_res = proc_res['CMS'][resID]
5574 nwa_res = proc_res['NWA'][resID]
5575 resonance = resonance_str(cms_res['resonance'])
5576 if options['resonances']!=1:
5577 info['title'] = format_title(process, resonance)
5578 else:
5579 info['title'] = format_title(process, '')
5580
5581 cms_born=cms_res['born']
5582 nwa_born=nwa_res['born']
5583 if len(cms_born) != len(lambdaCMS_list) or\
5584 len(nwa_born) != len(lambdaCMS_list):
5585 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5586 ' lambdaCMS values specified for process %s'%process
5587 if pert_orders:
5588 cms_finite=cms_res['finite']
5589 nwa_finite=nwa_res['finite']
5590 if len(cms_finite) != len(lambdaCMS_list) or\
5591 len(nwa_finite) != len(lambdaCMS_list):
5592 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5593 ' lambdaCMS values specified for process %s'%process
5594
5595 bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
5596 expected=proc_res['born_order'], proc=process, res=resonance)
5597
5598 CMSData = []
5599 NWAData = []
5600 DiffData = []
5601 for idata, lam in enumerate(lambdaCMS_list):
5602 if not pert_orders:
5603 new_cms = cms_born[idata]/lam**bpower
5604 new_nwa = nwa_born[idata]/lam**bpower
5605 else:
5606 new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
5607 new_nwa=nwa_finite[idata]
5608 new_cms /= lam*nwa_born[idata]
5609 new_nwa /= lam*nwa_born[idata]
5610 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5611 CMSData.append(new_cms)
5612 NWAData.append(new_nwa)
5613 DiffData.append(new_diff)
5614 if res[1] is None:
5615 if not pert_orders:
5616 data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
5617 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
5618 else:
5619 data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
5620 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
5621 data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
5622 %('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
5623 ,DiffData])
5624 data2.append([r'Detected asymptot',[differences_target[(process,resID)]
5625 for i in range(len(lambdaCMS_list))]])
5626 else:
5627 data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' '),CMSData])
5628 data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' '),NWAData])
5629 data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' '),DiffData])
5630
5631 process_data_plot_dict[(process,resID)]=(data1,data2, info)
5632
5633
5634 try:
5635 import matplotlib.pyplot as plt
5636 from matplotlib.backends.backend_pdf import PdfPages
5637 logger.info('Rendering plots... (this can take some time because of the latex labels)')
5638
5639 res_str += \
5640 """\n-----------------------------------------------------------------------------------------------
5641 | In the plots, the Complex Mass Scheme check is successful if the normalized difference |
5642 | between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
5643 -----------------------------------------------------------------------------------------------\n"""
5644
5645
5646 if lambda_range[1]>0:
5647 min_lambda_index = -1
5648 for i, lam in enumerate(lambdaCMS_list):
5649 if lam<=lambda_range[1]:
5650 min_lambda_index = i
5651 break
5652 else:
5653 min_lambda_index = 0
5654 if lambda_range[0]>0:
5655 max_lambda_index = -1
5656 for i, lam in enumerate(lambdaCMS_list):
5657 if lam<=lambda_range[0]:
5658 max_lambda_index=i-1
5659 break
5660 else:
5661 max_lambda_index=len(lambdaCMS_list)-1
5662
5663 if max_lambda_index==-1 or min_lambda_index==-1 or \
5664 min_lambda_index==max_lambda_index:
5665 raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
5666 (lambda_range[0],lambda_range[1]))
5667
5668 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5669 lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
5670
5671 plt.rc('text', usetex=True)
5672 plt.rc('font', family='serif')
5673 pp=PdfPages(fig_output_file)
5674 if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
5675 colorlist=['b','r','g','k','c','m','y']
5676 else:
5677 import matplotlib.colors as colors
5678 import matplotlib.cm as mplcm
5679 import matplotlib.colors as colors
5680
5681
5682 cm = plt.get_cmap('gist_rainbow')
5683 cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
5684 scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
5685
5686 colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
5687
5688
5689
5690
5691
5692
5693 legend_size = 10
5694 for iproc, (process, resID) in enumerate(checks):
5695 data1,data2, info=process_data_plot_dict[(process,resID)]
5696
5697 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5698 for i in range(len(data1)):
5699 data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
5700 for i in range(len(data2)):
5701 data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
5702 plt.figure(iproc+1)
5703 plt.subplot(211)
5704 minvalue=1e+99
5705 maxvalue=-1e+99
5706 for i, d1 in enumerate(data1):
5707
5708 color=colorlist[i//2]
5709 data_plot=d1[1]
5710 minvalue=min(min(data_plot),minvalue)
5711 maxvalue=max(max(data_plot),maxvalue)
5712 plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
5713 linestyle=('-' if i%2==0 else '--'),
5714 label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
5715 ymin = minvalue-(maxvalue-minvalue)/5.
5716 ymax = maxvalue+(maxvalue-minvalue)/5.
5717
5718 plt.yscale('linear')
5719 plt.xscale('log')
5720 plt.title(info['title'],fontsize=12,y=1.08)
5721 plt.ylabel(r'$\displaystyle \mathcal{M}$')
5722
5723 if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
5724 for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
5725 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5726 else:
5727 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5728
5729 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
5730
5731 plt.subplot(212)
5732 minvalue=1e+99
5733 maxvalue=-1e+99
5734
5735 try:
5736 asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
5737 plt.plot(lambdaCMS_list, data2[asymptot_index][1],
5738 color='0.75', marker='', linestyle='-', label='')
5739 except ValueError:
5740 pass
5741
5742 color_ID = -1
5743 for d2 in data2:
5744
5745 if d2[0]=='Detected asymptot':
5746 continue
5747 color_ID += 1
5748 color=colorlist[color_ID]
5749 data_plot=d2[1]
5750 minvalue=min(min(data_plot),minvalue)
5751 maxvalue=max(max(data_plot),maxvalue)
5752 plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
5753 linestyle='-', label=d2[0])
5754 ymin = minvalue-(maxvalue-minvalue)/5.
5755 ymax = maxvalue+(maxvalue-minvalue)/5.
5756
5757 plt.yscale('linear')
5758 plt.xscale('log')
5759 plt.ylabel(r'$\displaystyle \Delta$')
5760 plt.xlabel(r'$\displaystyle \lambda$')
5761
5762
5763 sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
5764 left_stability = sum(abs(s[0]-s[-1]) for s in sd)
5765 sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
5766 right_stability = sum(abs(s[0]-s[-1]) for s in sd)
5767 left_stable = False if right_stability==0.0 else \
5768 (left_stability/right_stability)<0.1
5769
5770 if left_stable:
5771 if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
5772 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5773 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5774 else:
5775 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5776 else:
5777 if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
5778 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5779 plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
5780 else:
5781 plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
5782
5783 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
5784 minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
5785
5786 plt.savefig(pp,format='pdf')
5787
5788 pp.close()
5789
5790 if len(checks)>0:
5791 logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
5792
5793 if sys.platform.startswith('linux'):
5794 misc.call(["xdg-open", fig_output_file])
5795 elif sys.platform.startswith('darwin'):
5796 misc.call(["open", fig_output_file])
5797
5798 plt.close("all")
5799
5800 except Exception as e:
5801 if isinstance(e, ImportError):
5802 res_str += "\n= Install matplotlib to get a "+\
5803 "graphical display of the results of the cms check."
5804 else:
5805 general_error = "\n= Could not produce the cms check plot because of "+\
5806 "the following error: %s"%str(e)
5807 try:
5808 import Tkinter
5809 if isinstance(e, Tkinter.TclError):
5810 res_str += "\n= Plots are not generated because your system"+\
5811 " does not support graphical display."
5812 else:
5813 res_str += general_error
5814 except:
5815 res_str += general_error
5816
5817 if options['reuse']:
5818 logFile.write(res_str)
5819 logFile.close()
5820
5821 if output.endswith('text'):
5822 return res_str
5823 else:
5824 return failed_procs
5825