1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 from __future__ import absolute_import
23 import array
24 import copy
25 import fractions
26 import itertools
27 import logging
28 import math
29 import os
30 import sys
31 import re
32 import shutil
33 import random
34 import glob
35 import re
36 import subprocess
37 import time
38 import datetime
39 import errno
40 import pickle
41
42
43
44 import aloha
45 import aloha.aloha_writers as aloha_writers
46 import aloha.create_aloha as create_aloha
47
48 import madgraph.iolibs.export_python as export_python
49 import madgraph.iolibs.helas_call_writers as helas_call_writers
50 import models.import_ufo as import_ufo
51 import madgraph.iolibs.save_load_object as save_load_object
52 import madgraph.iolibs.file_writers as writers
53
54 import madgraph.core.base_objects as base_objects
55 import madgraph.core.color_algebra as color
56 import madgraph.core.color_amp as color_amp
57 import madgraph.core.helas_objects as helas_objects
58 import madgraph.core.diagram_generation as diagram_generation
59
60 import madgraph.various.rambo as rambo
61 import madgraph.various.misc as misc
62 import madgraph.various.progressbar as pbar
63 import madgraph.various.banner as bannermod
64 import madgraph.various.progressbar as pbar
65
66 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
67 import madgraph.loop.loop_helas_objects as loop_helas_objects
68 import madgraph.loop.loop_base_objects as loop_base_objects
69 import models.check_param_card as check_param_card
70
71 from madgraph.interface.madevent_interface import MadLoopInitializer
72 from madgraph.interface.common_run_interface import AskforEditCard
73 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
74
75 from madgraph.iolibs.files import cp
76
77 import models.model_reader as model_reader
78 import aloha.template_files.wavefunctions as wavefunctions
79 from aloha.template_files.wavefunctions import \
80 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
81 import six
82 StringIO = six
83 from six.moves import range
84 from six.moves import zip
85 import io
86 if six.PY3:
87 file = io.FileIO
88
89
90 ADDED_GLOBAL = []
91
92 temp_dir_prefix = "TMP_CHECK"
93
94 pjoin = os.path.join
97 for value in list(to_clean):
98 del globals()[value]
99 to_clean.remove(value)
100
105 """ Just an 'option container' to mimick the interface which is passed to the
106 tests. We put in only what is now used from interface by the test:
107 cmd.options['fortran_compiler']
108 cmd.options['complex_mass_scheme']
109 cmd._mgme_dir"""
110 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
111 fortran_compiler = 'gfortran' ):
112 self._mgme_dir = mgme_dir
113 self.options = {}
114 self.options['complex_mass_scheme']=complex_mass_scheme
115 self.options['fortran_compiler']=fortran_compiler
116
117
118
119
120
121 logger = logging.getLogger('madgraph.various.process_checks')
126 """boost the set momenta in the 'boost direction' by the 'beta'
127 factor"""
128
129 boost_p = []
130 gamma = 1/ math.sqrt(1 - beta**2)
131 for imp in p:
132 bosst_p = imp[boost_direction]
133 E, px, py, pz = imp
134 boost_imp = []
135
136 boost_imp.append(gamma * E - gamma * beta * bosst_p)
137
138 if boost_direction == 1:
139 boost_imp.append(-gamma * beta * E + gamma * px)
140 else:
141 boost_imp.append(px)
142
143 if boost_direction == 2:
144 boost_imp.append(-gamma * beta * E + gamma * py)
145 else:
146 boost_imp.append(py)
147
148 if boost_direction == 3:
149 boost_imp.append(-gamma * beta * E + gamma * pz)
150 else:
151 boost_imp.append(pz)
152
153 boost_p.append(boost_imp)
154
155 return boost_p
156
161 """Class taking care of matrix element evaluation, storing
162 relevant quantities for speedup."""
163
164 - def __init__(self, model , param_card = None,
165 auth_skipping = False, reuse = True, cmd = FakeInterface()):
166 """Initialize object with stored_quantities, helas_writer,
167 model, etc.
168 auth_skipping = True means that any identical matrix element will be
169 evaluated only once
170 reuse = True means that the matrix element corresponding to a
171 given process can be reused (turn off if you are using
172 different models for the same process)"""
173
174 self.cmd = cmd
175
176
177 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
178
179
180 self.full_model = model_reader.ModelReader(model)
181 try:
182 self.full_model.set_parameters_and_couplings(param_card)
183 except MadGraph5Error:
184 if isinstance(param_card, (str,file)):
185 raise
186 logger.warning('param_card present in the event file not compatible.'+
187 ' We will use the default one.')
188 self.full_model.set_parameters_and_couplings()
189
190 self.auth_skipping = auth_skipping
191 self.reuse = reuse
192 self.cmass_scheme = cmd.options['complex_mass_scheme']
193 self.store_aloha = []
194 self.stored_quantities = {}
195
196
197
198
199 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
200 gauge_check=False, auth_skipping=None, output='m2',
201 options=None):
202 """Calculate the matrix element and evaluate it for a phase space point
203 output is either m2, amp, jamp
204 """
205
206 if full_model:
207 self.full_model = full_model
208 process = matrix_element.get('processes')[0]
209 model = process.get('model')
210
211
212 if "matrix_elements" not in self.stored_quantities:
213 self.stored_quantities['matrix_elements'] = []
214 matrix_methods = {}
215
216 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
217 try:
218 if matrix_element not in self.stored_quantities['matrix_elements']:
219 self.stored_quantities['matrix_elements'].append(matrix_element)
220
221 matrix = eval("Matrix_%s()" % process.shell_string(), globals())
222 me_value = matrix.smatrix(p, self.full_model)
223 if output == "m2":
224 return matrix.smatrix(p, self.full_model), matrix.amp2
225 else:
226 m2 = matrix.smatrix(p, self.full_model)
227 return {'m2': m2, output:getattr(matrix, output)}
228 except NameError:
229 pass
230
231 if (auth_skipping or self.auth_skipping) and matrix_element in \
232 self.stored_quantities['matrix_elements']:
233
234 logger.info("Skipping %s, " % process.nice_string() + \
235 "identical matrix element already tested" \
236 )
237 return None
238
239
240 self.stored_quantities['matrix_elements'].append(matrix_element)
241
242
243
244 if "list_colorize" not in self.stored_quantities:
245 self.stored_quantities["list_colorize"] = []
246 if "list_color_basis" not in self.stored_quantities:
247 self.stored_quantities["list_color_basis"] = []
248 if "list_color_matrices" not in self.stored_quantities:
249 self.stored_quantities["list_color_matrices"] = []
250
251 col_basis = color_amp.ColorBasis()
252 new_amp = matrix_element.get_base_amplitude()
253 matrix_element.set('base_amplitude', new_amp)
254 colorize_obj = col_basis.create_color_dict_list(new_amp)
255
256 try:
257
258
259
260 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
261 except ValueError:
262
263
264 self.stored_quantities['list_colorize'].append(colorize_obj)
265 col_basis.build()
266 self.stored_quantities['list_color_basis'].append(col_basis)
267 col_matrix = color_amp.ColorMatrix(col_basis)
268 self.stored_quantities['list_color_matrices'].append(col_matrix)
269 col_index = -1
270
271
272 matrix_element.set('color_basis',
273 self.stored_quantities['list_color_basis'][col_index])
274 matrix_element.set('color_matrix',
275 self.stored_quantities['list_color_matrices'][col_index])
276
277
278 if "used_lorentz" not in self.stored_quantities:
279 self.stored_quantities["used_lorentz"] = []
280
281 me_used_lorentz = set(matrix_element.get_used_lorentz())
282 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
283 if lorentz not in self.store_aloha]
284
285 aloha_model = create_aloha.AbstractALOHAModel(model.get('modelpath'))
286 aloha_model.add_Lorentz_object(model.get('lorentz'))
287 aloha_model.compute_subset(me_used_lorentz)
288
289
290 aloha_routines = []
291 for routine in aloha_model.values():
292 aloha_routines.append(routine.write(output_dir = None,
293 mode='mg5',
294 language = 'Python'))
295 for routine in aloha_model.external_routines:
296 for path in aloha_model.locate_external(routine, 'Python'):
297 aloha_routines.append(open(path).read())
298
299
300 previous_globals = list(globals().keys())
301 for routine in aloha_routines:
302 exec(routine, globals())
303 for key in globals().keys():
304 if key not in previous_globals:
305 ADDED_GLOBAL.append(key)
306
307
308 self.store_aloha.extend(me_used_lorentz)
309
310 exporter = export_python.ProcessExporterPython(matrix_element,
311 self.helas_writer)
312 try:
313 matrix_methods = exporter.get_python_matrix_methods(\
314 gauge_check=gauge_check)
315
316 except helas_call_writers.HelasWriterError as error:
317 logger.info(error)
318 return None
319
320
321
322 if self.reuse:
323
324 exec(matrix_methods[process.shell_string()], globals())
325 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
326 else:
327
328 exec(matrix_methods[process.shell_string()], globals())
329
330 if not p:
331 p, w_rambo = self.get_momenta(process, options)
332
333 exec("data = Matrix_%s()" % process.shell_string(), globals())
334 if output == "m2":
335 return data.smatrix(p, self.full_model), data.amp2
336 else:
337 m2 = data.smatrix(p,self.full_model)
338 return {'m2': m2, output:getattr(data, output)}
339
340 @staticmethod
342 """ Check whether the specified kinematic point passes isolation cuts
343 """
344
345 def Pt(pmom):
346 """ Computes the pt of a 4-momentum"""
347 return math.sqrt(pmom[1]**2+pmom[2]**2)
348
349 def DeltaR(p1,p2):
350 """ Computes the DeltaR between two 4-momenta"""
351
352 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
353 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
354 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
355 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
356
357 phi1=math.atan2(p1[2],p1[1])
358 phi2=math.atan2(p2[2],p2[1])
359 dphi=abs(phi2-phi1)
360
361 dphi=abs(abs(dphi-math.pi)-math.pi)
362
363 return math.sqrt(dphi**2+(eta2-eta1)**2)
364
365 for i, pmom in enumerate(pmoms[2:]):
366
367 if Pt(pmom)<ptcut:
368 return False
369
370 for pmom2 in pmoms[3+i:]:
371 if DeltaR(pmom,pmom2)<drcut:
372 return False
373 return True
374
375
376
377
378 - def get_momenta(self, process, options=None, special_mass=None):
379 """Get a point in phase space for the external states in the given
380 process, with the CM energy given. The incoming particles are
381 assumed to be oriented along the z axis, with particle 1 along the
382 positive z axis.
383 For the CMS check, one must be able to chose the mass of the special
384 resonance particle with id = -1, and the special_mass option allows
385 to specify it."""
386
387 if not options:
388 energy=1000
389 events=None
390 else:
391 energy = options['energy']
392 events = options['events']
393 to_skip = options['skip_evt']
394
395 if not (isinstance(process, base_objects.Process) and \
396 isinstance(energy, (float,int))):
397 raise rambo.RAMBOError("Not correct type for arguments to get_momenta")
398
399
400 sorted_legs = sorted(process.get('legs'), key=lambda l: l.get('number'))
401
402
403 if events:
404 ids = [l.get('id') for l in sorted_legs]
405 import MadSpin.decay as madspin
406 if not hasattr(self, 'event_file') or self.event_file.inputfile.closed:
407 print( "reset")
408 fsock = open(events)
409 self.event_file = madspin.Event(fsock)
410
411 skip = 0
412 while self.event_file.get_next_event() != 'no_event':
413 event = self.event_file.particle
414
415 event_ids = [p['pid'] for p in event.values()]
416 if event_ids == ids:
417 skip += 1
418 if skip > to_skip:
419 break
420 else:
421 raise MadGraph5Error('No compatible events for %s' % ids)
422 p = []
423 for part in event.values():
424 m = part['momentum']
425 p.append([m.E, m.px, m.py, m.pz])
426 fsock.close()
427 return p, 1
428
429 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
430 nfinal = len(sorted_legs) - nincoming
431
432
433 mass = []
434 for l in sorted_legs:
435 if l.get('id') != 0:
436 mass_string = self.full_model.get_particle(l.get('id')).get('mass')
437 mass.append(self.full_model.get('parameter_dict')[mass_string].real)
438 else:
439 if isinstance(special_mass, float):
440 mass.append(special_mass)
441 else:
442 raise Exception("A 'special_mass' option must be specified"+\
443 " in get_momenta when a leg with id=-10 is present (for CMS check)")
444
445
446
447
448
449
450
451
452
453 energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
454
455
456
457
458
459
460
461 if nfinal == 1:
462 p = []
463 energy = mass[-1]
464 p.append([energy/2,0,0,energy/2])
465 p.append([energy/2,0,0,-energy/2])
466 p.append([mass[-1],0,0,0])
467 return p, 1.0
468
469 e2 = energy**2
470 m1 = mass[0]
471 p = []
472
473 masses = rambo.FortranList(nfinal)
474 for i in range(nfinal):
475 masses[i+1] = mass[nincoming + i]
476
477 if nincoming == 1:
478
479 p.append([abs(m1), 0., 0., 0.])
480 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
481
482 for i in range(1, nfinal+1):
483 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
484 p_rambo[(2,i)], p_rambo[(3,i)]]
485 p.append(momi)
486
487 return p, w_rambo
488
489 if nincoming != 2:
490 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
491
492 if nfinal == 1:
493 energy = masses[1]
494 if masses[1] == 0.0:
495 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
496 ' state particle massless is invalid')
497
498 e2 = energy**2
499 m2 = mass[1]
500
501 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
502 2*m1**2*m2**2 + m2**4) / (4*e2))
503 e1 = math.sqrt(mom**2+m1**2)
504 e2 = math.sqrt(mom**2+m2**2)
505
506 p.append([e1, 0., 0., mom])
507 p.append([e2, 0., 0., -mom])
508
509 if nfinal == 1:
510 p.append([energy, 0., 0., 0.])
511 return p, 1.
512
513 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
514
515
516 for i in range(1, nfinal+1):
517 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
518 p_rambo[(2,i)], p_rambo[(3,i)]]
519 p.append(momi)
520
521 return p, w_rambo
522
528 """Class taking care of matrix element evaluation for loop processes."""
529
530 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
531 cmd=FakeInterface(),*args,**kwargs):
532 """Allow for initializing the MG5 root where the temporary fortran
533 output for checks is placed."""
534
535 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
536
537 self.mg_root=self.cmd._mgme_dir
538
539 if output_path is None:
540 self.output_path = self.cmd._mgme_dir
541 else:
542 self.output_path = output_path
543
544 self.cuttools_dir=cuttools_dir
545 self.tir_dir=tir_dir
546 self.loop_optimized_output = cmd.options['loop_optimized_output']
547
548
549 self.proliferate=True
550
551
552
553
554 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
555 gauge_check=False, auth_skipping=None, output='m2',
556 PS_name = None, MLOptions={}):
557 """Calculate the matrix element and evaluate it for a phase space point
558 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
559 empty lists at this point.
560 If PS_name is not none the written out PS.input will be saved in
561 the file PS.input_<PS_name> as well."""
562
563 process = matrix_element.get('processes')[0]
564 model = process.get('model')
565
566 if options and 'split_orders' in list(options.keys()):
567 split_orders = options['split_orders']
568 else:
569 split_orders = -1
570
571 if "loop_matrix_elements" not in self.stored_quantities:
572 self.stored_quantities['loop_matrix_elements'] = []
573
574 if (auth_skipping or self.auth_skipping) and matrix_element in \
575 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
576
577 logger.info("Skipping %s, " % process.nice_string() + \
578 "identical matrix element already tested" )
579 return None
580
581
582 if not p:
583 p, w_rambo = self.get_momenta(process, options=options)
584
585 if matrix_element in [el[0] for el in \
586 self.stored_quantities['loop_matrix_elements']]:
587 export_dir=self.stored_quantities['loop_matrix_elements'][\
588 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
589 ].index(matrix_element)][1]
590 logger.debug("Reusing generated output %s"%str(export_dir))
591 else:
592 export_dir=pjoin(self.output_path,temp_dir_prefix)
593 if os.path.isdir(export_dir):
594 if not self.proliferate:
595 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
596 else:
597 id=1
598 while os.path.isdir(pjoin(self.output_path,\
599 '%s_%i'%(temp_dir_prefix,id))):
600 id+=1
601 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
602
603 if self.proliferate:
604 self.stored_quantities['loop_matrix_elements'].append(\
605 (matrix_element,export_dir))
606
607
608
609 import madgraph.loop.loop_exporters as loop_exporters
610 if self.loop_optimized_output:
611 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
612 else:
613 exporter_class=loop_exporters.LoopProcessExporterFortranSA
614
615 MLoptions = {'clean': True,
616 'complex_mass': self.cmass_scheme,
617 'export_format':'madloop',
618 'mp':True,
619 'SubProc_prefix':'P',
620 'compute_color_flows': not process.get('has_born'),
621 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
622 'cuttools_dir': self.cuttools_dir,
623 'fortran_compiler': self.cmd.options['fortran_compiler'],
624 'output_dependencies': self.cmd.options['output_dependencies']}
625
626 MLoptions.update(self.tir_dir)
627
628 FortranExporter = exporter_class(export_dir, MLoptions)
629 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
630 FortranExporter.copy_template(model)
631 FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
632 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
633 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
634 for c in l]))
635 FortranExporter.convert_model(model,wanted_lorentz,wanted_couplings)
636 FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
637
638 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
639 split_orders=split_orders)
640
641 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
642 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
643
644 if gauge_check:
645 file_path, orig_file_content, new_file_content = \
646 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
647 ['helas_calls_ampb_1.f','loop_matrix.f'])
648 file = open(file_path,'w')
649 file.write(new_file_content)
650 file.close()
651 if self.loop_optimized_output:
652 mp_file_path, mp_orig_file_content, mp_new_file_content = \
653 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
654 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
655 mp_file = open(mp_file_path,'w')
656 mp_file.write(mp_new_file_content)
657 mp_file.close()
658
659
660 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
661 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
662
663
664 if gauge_check:
665 file = open(file_path,'w')
666 file.write(orig_file_content)
667 file.close()
668 if self.loop_optimized_output:
669 mp_file = open(mp_file_path,'w')
670 mp_file.write(mp_orig_file_content)
671 mp_file.close()
672
673
674 if not self.proliferate:
675 shutil.rmtree(export_dir)
676
677 if output == "m2":
678
679
680 return finite_m2, []
681 else:
682 return {'m2': finite_m2, output:[]}
683
684 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
685 DoubleCheckHelicityFilter=False, MLOptions={}):
686 """ Set parameters in MadLoopParams.dat suited for these checks.MP
687 stands for multiple precision and can either be a bool or an integer
688 to specify the mode."""
689
690
691 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
692 MLCard = bannermod.MadLoopParam(file)
693
694 if isinstance(mp,bool):
695 mode = 4 if mp else 1
696 else:
697 mode = mp
698
699 for key, value in MLOptions.items():
700 if key == "MLReductionLib":
701 if isinstance(value, int):
702 ml_reds = str(value)
703 if isinstance(value,list):
704 if len(value)==0:
705 ml_reds = '1'
706 else:
707 ml_reds="|".join([str(vl) for vl in value])
708 elif isinstance(value, str):
709 ml_reds = value
710 elif isinstance(value, int):
711 ml_reds = str(value)
712 else:
713 raise MadGraph5Error('The argument %s '%str(value)+\
714 ' in fix_MadLoopParamCard must be a string, integer'+\
715 ' or a list.')
716 MLCard.set("MLReductionLib",ml_reds)
717 elif key == 'ImprovePS':
718 MLCard.set('ImprovePSPoint',2 if value else -1)
719 elif key == 'ForceMP':
720 mode = 4
721 elif key in MLCard:
722 MLCard.set(key,value)
723 else:
724 raise Exception('The MadLoop options %s specified in function'%key+\
725 ' fix_MadLoopParamCard does not correspond to an option defined'+\
726 ' MadLoop nor is it specially handled in this function.')
727 if not mode is None:
728 MLCard.set('CTModeRun',mode)
729 MLCard.set('CTModeInit',mode)
730 MLCard.set('UseLoopFilter',loop_filter)
731 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
732
733 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
734
735 @classmethod
736 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
737 verbose=True, format='tuple', skip_compilation=False):
738 """Compile and run ./check, then parse the output and return the result
739 for process with id = proc_id and PSpoint if specified.
740 If PS_name is not none the written out PS.input will be saved in
741 the file PS.input_<PS_name> as well"""
742 if verbose:
743 sys.stdout.write('.')
744 sys.stdout.flush()
745
746 shell_name = None
747 directories = misc.glob('P%i_*' % proc_id, pjoin(working_dir, 'SubProcesses'))
748 if directories and os.path.isdir(directories[0]):
749 shell_name = os.path.basename(directories[0])
750
751
752 if not shell_name:
753 logging.info("Directory hasn't been created for process %s: %s", proc, directories)
754 return ((0.0, 0.0, 0.0, 0.0, 0), [])
755
756 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
757
758 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
759 if not skip_compilation:
760
761 if os.path.isfile(pjoin(dir_name,'check')):
762 os.remove(pjoin(dir_name,'check'))
763 try:
764 os.remove(pjoin(dir_name,'check_sa.o'))
765 os.remove(pjoin(dir_name,'loop_matrix.o'))
766 except OSError:
767 pass
768
769 devnull = open(os.devnull, 'w')
770 retcode = subprocess.call(['make','check'],
771 cwd=dir_name, stdout=devnull, stderr=devnull)
772 devnull.close()
773
774 if retcode != 0:
775 logging.info("Error while executing make in %s" % shell_name)
776 return ((0.0, 0.0, 0.0, 0.0, 0), [])
777
778
779 if PSpoint:
780 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
781
782
783 if not PS_name is None:
784 misc.write_PS_input(pjoin(dir_name, \
785 'PS.input_%s'%PS_name),PSpoint)
786
787 try:
788 output = subprocess.Popen('./check',
789 cwd=dir_name,
790 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
791 output.read()
792 output.close()
793 if os.path.exists(pjoin(dir_name,'result.dat')):
794 return cls.parse_check_output(open(pjoin(dir_name,\
795 'result.dat')),format=format)
796 else:
797 logging.warning("Error while looking for file %s"%str(os.path\
798 .join(dir_name,'result.dat')))
799 return ((0.0, 0.0, 0.0, 0.0, 0), [])
800 except IOError:
801 logging.warning("Error while executing ./check in %s" % shell_name)
802 return ((0.0, 0.0, 0.0, 0.0, 0), [])
803
804 @classmethod
806 """Parse the output string and return a pair where first four values are
807 the finite, born, single and double pole of the ME and the fourth is the
808 GeV exponent and the second value is a list of 4 momenta for all particles
809 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
810
811 res_dict = {'res_p':[],
812 'born':0.0,
813 'finite':0.0,
814 '1eps':0.0,
815 '2eps':0.0,
816 'gev_pow':0,
817 'export_format':'Default',
818 'accuracy':0.0,
819 'return_code':0,
820 'Split_Orders_Names':[],
821 'Loop_SO_Results':[],
822 'Born_SO_Results':[],
823 'Born_kept':[],
824 'Loop_kept':[]
825 }
826 res_p = []
827
828
829
830 if isinstance(output,(file,io.TextIOWrapper)) or isinstance(output,list):
831 text=output
832 elif isinstance(output,(str)) or (six.PY2 and isinstance(output, six.text_type)):
833 text=output.split('\n')
834 elif isinstance(output, bytes):
835 text=output.decode(errors='ignore').split('\n')
836 else:
837 raise MadGraph5Error('Type for argument output not supported in'+\
838 ' parse_check_output: %s' % type(output))
839 for line in text:
840 splitline=line.split()
841 if len(splitline)==0:
842 continue
843 elif splitline[0]=='PS':
844 res_p.append([float(s) for s in splitline[1:]])
845 elif splitline[0]=='ASO2PI':
846 res_dict['alphaS_over_2pi']=float(splitline[1])
847 elif splitline[0]=='BORN':
848 res_dict['born']=float(splitline[1])
849 elif splitline[0]=='FIN':
850 res_dict['finite']=float(splitline[1])
851 elif splitline[0]=='1EPS':
852 res_dict['1eps']=float(splitline[1])
853 elif splitline[0]=='2EPS':
854 res_dict['2eps']=float(splitline[1])
855 elif splitline[0]=='EXP':
856 res_dict['gev_pow']=int(splitline[1])
857 elif splitline[0]=='Export_Format':
858 res_dict['export_format']=splitline[1]
859 elif splitline[0]=='ACC':
860 res_dict['accuracy']=float(splitline[1])
861 elif splitline[0]=='RETCODE':
862 res_dict['return_code']=int(splitline[1])
863 elif splitline[0]=='Split_Orders_Names':
864 res_dict['Split_Orders_Names']=splitline[1:]
865 elif splitline[0] in ['Born_kept', 'Loop_kept']:
866 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
867 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
868
869
870
871
872 res_dict[splitline[0]].append(\
873 ([int(el) for el in splitline[1:]],{}))
874 elif splitline[0]=='SO_Loop':
875 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
876 float(splitline[2])
877 elif splitline[0]=='SO_Born':
878 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
879 float(splitline[2])
880
881 res_dict['res_p'] = res_p
882
883 if format=='tuple':
884 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
885 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
886 else:
887 return res_dict
888
889 @staticmethod
891 """ Changes the file model_functions.f in the SOURCE of the process output
892 so as to change how logarithms are analytically continued and see how
893 it impacts the CMS check."""
894 valid_modes = ['default','recompile']
895 if not (mode in valid_modes or (isinstance(mode, list) and
896 len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
897 raise MadGraph5Error("Mode '%s' not reckonized"%mode+
898 " in function apply_log_tweak.")
899
900 model_path = pjoin(proc_path,'Source','MODEL')
901 directories = misc.glob('P0_*', pjoin(proc_path,'SubProcesses'))
902 if directories and os.path.isdir(directories[0]):
903 exe_path = directories[0]
904 else:
905 raise MadGraph5Error('Could not find a process executable '+\
906 'directory in %s'%proc_dir)
907 bu_path = pjoin(model_path, 'model_functions.f__backUp__')
908
909 if mode=='default':
910
911 if not os.path.isfile(bu_path):
912 raise MadGraph5Error('Back up file %s could not be found.'%bu_path)
913 shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
914 return
915
916 if mode=='recompile':
917 try:
918 os.remove(pjoin(model_path,'model_functions.o'))
919 os.remove(pjoin(proc_path,'lib','libmodel.a'))
920 except:
921 pass
922 misc.compile(cwd=model_path)
923
924 try:
925 os.remove(pjoin(exe_path,'check'))
926 except:
927 pass
928 misc.compile(arg=['check'], cwd=exe_path)
929 return
930
931 if mode[0]==mode[1]:
932 return
933
934
935 mp_prefix = 'MP_'
936 target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
937
938
939 if not os.path.isfile(bu_path):
940 shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
941 model_functions = open(pjoin(model_path,'model_functions.f'),'r')
942
943 new_model_functions = []
944 has_replaced = False
945 just_replaced = False
946 find_one_replacement= False
947 mp_mode = None
948 suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
949 replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
950 for line in model_functions:
951
952 if just_replaced:
953 if not re.match(r'\s{6}', line):
954 continue
955 else:
956 just_replaced = False
957 if mp_mode is None:
958
959 new_model_functions.append(line)
960 if (target_line%mp_prefix).lower() in line.lower():
961 mp_mode = mp_prefix
962 elif (target_line%'').lower() in line.lower():
963 mp_mode = ''
964 else:
965
966 if not has_replaced and re.match(replace_regex%mp_mode,line,
967 re.IGNORECASE):
968
969 if mode[0]=='log':
970 if mp_mode=='':
971 new_line =\
972 """ if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
973 reg%s=log(arg) %s TWOPII
974 else
975 reg%s=log(arg)
976 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
977 else:
978 new_line =\
979 """ if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
980 mp_reg%s=log(arg) %s TWOPII
981 else
982 mp_reg%s=log(arg)
983 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
984 else:
985 new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
986 ('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
987 new_model_functions.append(new_line)
988 just_replaced = True
989 has_replaced = True
990 find_one_replacement = True
991 else:
992 new_model_functions.append(line)
993 if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
994 mp_mode = None
995 has_replaced = False
996
997 if not find_one_replacement:
998 logger.warning('No replacement was found/performed for token '+
999 "'%s->%s'."%(mode[0],mode[1]))
1000 else:
1001 open(pjoin(model_path,'model_functions.f'),'w').\
1002 write(''.join(new_model_functions))
1003 return
1004
1006 """ Modify loop_matrix.f so to have one external massless gauge boson
1007 polarization vector turned into its momentum. It is not a pretty and
1008 flexible solution but it works for this particular case."""
1009
1010 shell_name = None
1011 directories = misc.glob('P0_*', working_dir)
1012 if directories and os.path.isdir(directories[0]):
1013 shell_name = os.path.basename(directories[0])
1014
1015 dir_name = pjoin(working_dir, shell_name)
1016
1017
1018 ind=0
1019 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
1020 file_names[ind])):
1021 ind += 1
1022 if ind==len(file_names):
1023 raise Exception("No helas calls output file found.")
1024
1025 helas_file_name=pjoin(dir_name,file_names[ind])
1026 file = open(pjoin(dir_name,helas_file_name), 'r')
1027
1028 helas_calls_out=""
1029 original_file=""
1030 gaugeVectorRegExp=re.compile(\
1031 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
1032 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
1033 foundGauge=False
1034
1035 for line in file:
1036 helas_calls_out+=line
1037 original_file+=line
1038 if line.find("INCLUDE 'coupl.inc'") != -1 or \
1039 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
1040 helas_calls_out+=" INTEGER WARDINT\n"
1041 if not foundGauge:
1042 res=gaugeVectorRegExp.search(line)
1043 if res!=None:
1044 foundGauge=True
1045 helas_calls_out+=" DO WARDINT=1,4\n"
1046 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
1047 if not mp:
1048 helas_calls_out+=\
1049 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
1050 else:
1051 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1052 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1053 helas_calls_out+=" ENDDO\n"
1054 file.close()
1055
1056 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1057
1062 """Class taking care of matrix element evaluation and running timing for
1063 loop processes."""
1064
1068
1069 @classmethod
1071 """ Return a dictionary of the parameter of the MadLoopParamCard.
1072 The key is the name of the parameter and the value is the corresponding
1073 string read from the card."""
1074
1075 return bannermod.MadLoopParam(MLCardPath)
1076
1077
1078 @classmethod
1080 """ Set the parameters in MadLoopParamCard to the values specified in
1081 the dictionary params.
1082 The key is the name of the parameter and the value is the corresponding
1083 string to write in the card."""
1084
1085 MLcard = bannermod.MadLoopParam(MLCardPath)
1086 for key,value in params.items():
1087 MLcard.set(key, value, changeifuserset=False)
1088 MLcard.write(MLCardPath, commentdefault=True)
1089
1091 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1092 Notice this only affects the double precision evaluation which is
1093 normally fine as we do not make the timing check on mp."""
1094
1095 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1096 loop_matrix = file.read()
1097 file.close()
1098
1099 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1100 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1101 if skip else '.FALSE.'), loop_matrix)
1102 file.write(loop_matrix)
1103 file.close()
1104
1106 """ Edit loop_matrix.f in order to set the flag which stops the
1107 execution after booting the program (i.e. reading the color data)."""
1108
1109 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1110 loop_matrix = file.read()
1111 file.close()
1112
1113 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1114 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1115 if bootandstop else '.FALSE.'), loop_matrix)
1116 file.write(loop_matrix)
1117 file.close()
1118
1119 - def setup_process(self, matrix_element, export_dir, reusing = False,
1120 param_card = None, MLOptions={},clean=True):
1121 """ Output the matrix_element in argument and perform the initialization
1122 while providing some details about the output in the dictionary returned.
1123 Returns None if anything fails"""
1124
1125 infos={'Process_output': None,
1126 'HELAS_MODEL_compilation' : None,
1127 'dir_path' : None,
1128 'Initialization' : None,
1129 'Process_compilation' : None}
1130
1131 if not reusing and clean:
1132 if os.path.isdir(export_dir):
1133 clean_up(self.output_path)
1134 if os.path.isdir(export_dir):
1135 raise InvalidCmd(\
1136 "The directory %s already exist. Please remove it."\
1137 %str(export_dir))
1138 else:
1139 if not os.path.isdir(export_dir):
1140 raise InvalidCmd(\
1141 "Could not find the directory %s to reuse."%str(export_dir))
1142
1143
1144 if not reusing and clean:
1145 model = matrix_element['processes'][0].get('model')
1146
1147
1148 import madgraph.loop.loop_exporters as loop_exporters
1149 if self.loop_optimized_output:
1150 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1151 else:
1152 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1153
1154 MLoptions = {'clean': True,
1155 'complex_mass': self.cmass_scheme,
1156 'export_format':'madloop',
1157 'mp':True,
1158 'SubProc_prefix':'P',
1159 'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
1160 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1161 'cuttools_dir': self.cuttools_dir,
1162 'fortran_compiler':self.cmd.options['fortran_compiler'],
1163 'output_dependencies':self.cmd.options['output_dependencies']}
1164
1165 MLoptions.update(self.tir_dir)
1166
1167 start=time.time()
1168 FortranExporter = exporter_class(export_dir, MLoptions)
1169 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1170 FortranExporter.copy_template(model)
1171 FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
1172 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1173 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1174 for c in l]))
1175 FortranExporter.convert_model(self.full_model,wanted_lorentz,wanted_couplings)
1176 infos['Process_output'] = time.time()-start
1177 start=time.time()
1178 FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
1179 infos['HELAS_MODEL_compilation'] = time.time()-start
1180
1181
1182 if param_card != None:
1183 if isinstance(param_card, str):
1184 cp(pjoin(param_card),\
1185 pjoin(export_dir,'Cards','param_card.dat'))
1186 else:
1187 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1188
1189
1190
1191 MadLoopInitializer.fix_PSPoint_in_check(
1192 pjoin(export_dir,'SubProcesses'), read_ps = False, npoints = 4)
1193
1194 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1195 mp = False, loop_filter = True,MLOptions=MLOptions)
1196
1197 shell_name = None
1198 directories = misc.glob('P0_*', pjoin(export_dir, 'SubProcesses'))
1199 if directories and os.path.isdir(directories[0]):
1200 shell_name = os.path.basename(directories[0])
1201 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1202 infos['dir_path']=dir_name
1203
1204
1205
1206 if not MadLoopInitializer.need_MadLoopInit(
1207 export_dir, subproc_prefix='P'):
1208 return infos
1209
1210 attempts = [3,15]
1211
1212 try:
1213 os.remove(pjoin(dir_name,'check'))
1214 os.remove(pjoin(dir_name,'check_sa.o'))
1215 except OSError:
1216 pass
1217
1218 nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
1219 pjoin(export_dir,'SubProcesses'),infos,\
1220 req_files = ['HelFilter.dat','LoopFilter.dat'],
1221 attempts = attempts)
1222 if attempts is None:
1223 logger.error("Could not compile the process %s,"%shell_name+\
1224 " try to generate it via the 'generate' command.")
1225 return None
1226 if nPS_necessary is None:
1227 logger.error("Could not initialize the process %s"%shell_name+\
1228 " with %s PS points."%max(attempts))
1229 return None
1230 elif nPS_necessary > min(attempts):
1231 logger.warning("Could not initialize the process %s"%shell_name+\
1232 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1233
1234 return infos
1235
1236 - def time_matrix_element(self, matrix_element, reusing = False,
1237 param_card = None, keep_folder = False, options=None,
1238 MLOptions = {}):
1239 """ Output the matrix_element in argument and give detail information
1240 about the timing for its output and running"""
1241
1242
1243
1244 make_it_quick=False
1245
1246 if options and 'split_orders' in list(options.keys()):
1247 split_orders = options['split_orders']
1248 else:
1249 split_orders = -1
1250
1251 assert ((not reusing and isinstance(matrix_element, \
1252 helas_objects.HelasMatrixElement)) or (reusing and
1253 isinstance(matrix_element, base_objects.Process)))
1254 if not reusing:
1255 proc_name = matrix_element['processes'][0].shell_string()[2:]
1256 else:
1257 proc_name = matrix_element.shell_string()[2:]
1258
1259 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1260 temp_dir_prefix+"_%s"%proc_name)
1261
1262 res_timings = self.setup_process(matrix_element,export_dir, \
1263 reusing, param_card,MLOptions = MLOptions,clean=True)
1264
1265 if res_timings == None:
1266 return None
1267 dir_name=res_timings['dir_path']
1268
1269 def check_disk_usage(path):
1270 return subprocess.Popen("du -shc -L "+str(path), \
1271 stdout=subprocess.PIPE, shell=True).communicate()[0].decode(errors='ignore').split()[-2]
1272
1273
1274
1275
1276
1277 res_timings['du_source']=check_disk_usage(pjoin(\
1278 export_dir,'Source','*','*.f'))
1279 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1280 res_timings['du_color']=check_disk_usage(pjoin(dir_name,
1281 'MadLoop5_resources','*.dat'))
1282 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1283
1284 if not res_timings['Initialization']==None:
1285 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1286 elif make_it_quick:
1287 time_per_ps_estimate = -1.0
1288 else:
1289
1290
1291 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1292 read_ps = False, npoints = 3, hel_config = -1,
1293 split_orders=split_orders)
1294 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1295 time_per_ps_estimate = run_time/3.0
1296
1297 self.boot_time_setup(dir_name,bootandstop=True)
1298 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1299 res_timings['Booting_time'] = run_time
1300 self.boot_time_setup(dir_name,bootandstop=False)
1301
1302
1303 contributing_hel=0
1304 n_contrib_hel=0
1305 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1306 proc_prefix = proc_prefix_file.read()
1307 proc_prefix_file.close()
1308 helicities = open(pjoin(dir_name,'MadLoop5_resources',
1309 '%sHelFilter.dat'%proc_prefix)).read().split()
1310 for i, hel in enumerate(helicities):
1311 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1312 if contributing_hel==0:
1313 contributing_hel=i+1
1314 n_contrib_hel += 1
1315
1316 if contributing_hel==0:
1317 logger.error("Could not find a contributing helicity "+\
1318 "configuration for process %s."%proc_name)
1319 return None
1320
1321 res_timings['n_contrib_hel']=n_contrib_hel
1322 res_timings['n_tot_hel']=len(helicities)
1323
1324
1325 if not make_it_quick:
1326 target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,50)
1327 else:
1328 target_pspoints_number = 10
1329
1330 logger.info("Checking timing for process %s "%proc_name+\
1331 "with %d PS points."%target_pspoints_number)
1332
1333 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1334 read_ps = False, npoints = target_pspoints_number*2, \
1335 hel_config = contributing_hel, split_orders=split_orders)
1336 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1337
1338 if compile_time == None: return None
1339
1340 res_timings['run_polarized_total']=\
1341 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1342
1343 if make_it_quick:
1344 res_timings['run_unpolarized_total'] = 1.0
1345 res_timings['ram_usage'] = 0.0
1346 else:
1347 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1348 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1349 split_orders=split_orders)
1350 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
1351 checkRam=True)
1352
1353 if compile_time == None: return None
1354 res_timings['run_unpolarized_total']=\
1355 (run_time-res_timings['Booting_time'])/target_pspoints_number
1356 res_timings['ram_usage'] = ram_usage
1357
1358 if not self.loop_optimized_output:
1359 return res_timings
1360
1361
1362
1363
1364
1365 self.skip_loop_evaluation_setup(dir_name,skip=True)
1366
1367 if make_it_quick:
1368 res_timings['run_unpolarized_coefs'] = 1.0
1369 else:
1370 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1371 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1372 split_orders=split_orders)
1373 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1374 if compile_time == None: return None
1375 res_timings['run_unpolarized_coefs']=\
1376 (run_time-res_timings['Booting_time'])/target_pspoints_number
1377
1378 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1379 read_ps = False, npoints = target_pspoints_number*2, \
1380 hel_config = contributing_hel, split_orders=split_orders)
1381 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1382 if compile_time == None: return None
1383 res_timings['run_polarized_coefs']=\
1384 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1385
1386
1387 self.skip_loop_evaluation_setup(dir_name,skip=False)
1388
1389 return res_timings
1390
1391
1392
1393
1394
1395 - def check_matrix_element_stability(self, matrix_element,options=None,
1396 infos_IN = None, param_card = None, keep_folder = False,
1397 MLOptions = {}):
1398 """ Output the matrix_element in argument, run in for nPoints and return
1399 a dictionary containing the stability information on each of these points.
1400 If infos are provided, then the matrix element output is skipped and
1401 reused from a previous run and the content of infos.
1402 """
1403
1404 if not options:
1405 reusing = False
1406 nPoints = 100
1407 split_orders = -1
1408 else:
1409 reusing = options['reuse']
1410 nPoints = options['npoints']
1411 split_orders = options['split_orders']
1412
1413 assert ((not reusing and isinstance(matrix_element, \
1414 helas_objects.HelasMatrixElement)) or (reusing and
1415 isinstance(matrix_element, base_objects.Process)))
1416
1417
1418 def format_PS_point(ps, rotation=0):
1419 """ Write out the specified PS point to the file dir_path/PS.input
1420 while rotating it if rotation!=0. We consider only rotations of 90
1421 but one could think of having rotation of arbitrary angle too.
1422 The first two possibilities, 1 and 2 are a rotation and boost
1423 along the z-axis so that improve_ps can still work.
1424 rotation=0 => No rotation
1425 rotation=1 => Z-axis pi/2 rotation
1426 rotation=2 => Z-axis pi/4 rotation
1427 rotation=3 => Z-axis boost
1428 rotation=4 => (x'=z,y'=-x,z'=-y)
1429 rotation=5 => (x'=-z,y'=y,z'=x)"""
1430 if rotation==0:
1431 p_out=copy.copy(ps)
1432 elif rotation==1:
1433 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1434 elif rotation==2:
1435 sq2 = math.sqrt(2.0)
1436 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1437 elif rotation==3:
1438 p_out = boost_momenta(ps, 3)
1439
1440
1441 elif rotation==4:
1442 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1443 elif rotation==5:
1444 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1445 else:
1446 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1447
1448 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1449
1450 def pick_PS_point(proc, options):
1451 """ Randomly generate a PS point and make sure it is eligible. Then
1452 return it. Users can edit the cuts here if they want."""
1453
1454 p, w_rambo = self.get_momenta(proc, options)
1455 if options['events']:
1456 return p
1457
1458 while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
1459 p, w_rambo = self.get_momenta(proc, options)
1460
1461
1462
1463
1464 if len(p)==3:
1465 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1466 return p
1467
1468
1469
1470
1471 accuracy_threshold=1.0e-1
1472
1473
1474
1475 num_rotations = 1
1476
1477 if "MLReductionLib" not in MLOptions:
1478 tools=[1]
1479 else:
1480 tools=MLOptions["MLReductionLib"]
1481 tools=list(set(tools))
1482
1483
1484 tool_var={'pjfry':2,'golem':4,'samurai':5,'ninja':6,'collier':7}
1485 for tool in ['pjfry','golem','samurai','ninja','collier']:
1486 tool_dir='%s_dir'%tool
1487 if not tool_dir in self.tir_dir:
1488 continue
1489 tool_libpath=self.tir_dir[tool_dir]
1490 tool_libname="lib%s.a"%tool
1491 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1492 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1493 if tool_var[tool] in tools:
1494 tools.remove(tool_var[tool])
1495 if not tools:
1496 return None
1497
1498
1499 if not reusing:
1500 process = matrix_element['processes'][0]
1501 else:
1502 process = matrix_element
1503 proc_name = process.shell_string()[2:]
1504 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1505 temp_dir_prefix+"_%s"%proc_name)
1506
1507 tools_name=bannermod.MadLoopParam._ID_reduction_tool_map
1508
1509 return_dict={}
1510 return_dict['Stability']={}
1511 infos_save={'Process_output': None,
1512 'HELAS_MODEL_compilation' : None,
1513 'dir_path' : None,
1514 'Initialization' : None,
1515 'Process_compilation' : None}
1516
1517 for tool in tools:
1518 tool_name=tools_name[tool]
1519
1520
1521
1522
1523
1524 DP_stability = []
1525 QP_stability = []
1526
1527 Unstable_PS_points = []
1528
1529 Exceptional_PS_points = []
1530
1531 MLoptions=MLOptions
1532 MLoptions["MLReductionLib"]=tool
1533 clean = (tool==tools[0]) and not nPoints==0
1534 if infos_IN==None or (tool_name not in infos_IN):
1535 infos=infos_IN
1536 else:
1537 infos=infos_IN[tool_name]
1538
1539 if not infos:
1540 infos = self.setup_process(matrix_element,export_dir, \
1541 reusing, param_card,MLoptions,clean)
1542 if not infos:
1543 return None
1544
1545 if clean:
1546 infos_save['Process_output']=infos['Process_output']
1547 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1548 infos_save['dir_path']=infos['dir_path']
1549 infos_save['Process_compilation']=infos['Process_compilation']
1550 else:
1551 if not infos['Process_output']:
1552 infos['Process_output']=infos_save['Process_output']
1553 if not infos['HELAS_MODEL_compilation']:
1554 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1555 if not infos['dir_path']:
1556 infos['dir_path']=infos_save['dir_path']
1557 if not infos['Process_compilation']:
1558 infos['Process_compilation']=infos_save['Process_compilation']
1559
1560 dir_path=infos['dir_path']
1561
1562
1563 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1564 data_i = 0
1565
1566 if reusing:
1567
1568 data_i=0
1569 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1570 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1571 saved_run = save_load_object.load_from_file(pickle_path)
1572 if data_i>0:
1573 logger.info("Loading additional data stored in %s."%
1574 str(pickle_path))
1575 logger.info("Loaded data moved to %s."%str(pjoin(
1576 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1577 shutil.move(pickle_path,
1578 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1579 DP_stability.extend(saved_run['DP_stability'])
1580 QP_stability.extend(saved_run['QP_stability'])
1581 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1582 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1583 data_i += 1
1584
1585 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1586 'QP_stability':QP_stability,
1587 'Unstable_PS_points':Unstable_PS_points,
1588 'Exceptional_PS_points':Exceptional_PS_points}
1589
1590 if nPoints==0:
1591 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1592
1593 if data_i>1:
1594 save_load_object.save_to_file(pjoin(dir_path,
1595 savefile%'_0'),return_dict['Stability'][tool_name])
1596 continue
1597 else:
1598 logger.info("ERROR: Not reusing a directory or any pickled"+
1599 " result for tool %s and the number"%tool_name+\
1600 " of point for the check is zero.")
1601 return None
1602
1603 logger.info("Checking stability of process %s "%proc_name+\
1604 "with %d PS points by %s."%(nPoints,tool_name))
1605 if infos['Initialization'] != None:
1606 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1607 sec_needed = int(time_per_ps_estimate*nPoints*4)
1608 else:
1609 sec_needed = 0
1610
1611 progress_bar = None
1612 time_info = False
1613 if sec_needed>5:
1614 time_info = True
1615 logger.info("This check should take about "+\
1616 "%s to run. Started on %s."%(\
1617 str(datetime.timedelta(seconds=sec_needed)),\
1618 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1619 if logger.getEffectiveLevel()<logging.WARNING and \
1620 (sec_needed>5 or infos['Initialization'] == None):
1621 widgets = ['Stability check:', pbar.Percentage(), ' ',
1622 pbar.Bar(),' ', pbar.ETA(), ' ']
1623 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1624 fd=sys.stdout)
1625 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1626 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1627
1628
1629
1630 try:
1631 os.remove(pjoin(dir_path,'check'))
1632 os.remove(pjoin(dir_path,'check_sa.o'))
1633 except OSError:
1634 pass
1635
1636 devnull = open(os.devnull, 'w')
1637 retcode = subprocess.call(['make','check'],
1638 cwd=dir_path, stdout=devnull, stderr=devnull)
1639 devnull.close()
1640 if retcode != 0:
1641 logging.info("Error while executing make in %s" % dir_path)
1642 return None
1643
1644
1645
1646
1647 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1648
1649
1650 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1651 checkerName = 'StabilityCheckDriver.f'
1652 else:
1653 checkerName = 'StabilityCheckDriver_loop_induced.f'
1654
1655 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1656 checkerName),'r') as checkerFile:
1657 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1658 checkerToWrite = checkerFile.read()%{'proc_prefix':
1659 proc_prefix.read()}
1660 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1661 checkerFile.write(checkerToWrite)
1662 checkerFile.close()
1663
1664
1665
1666
1667
1668 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1669 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1670 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1671 os.remove(pjoin(dir_path,'loop_matrix.o'))
1672 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1673 mode='fortran', job_specs = False)
1674
1675
1676
1677
1678 if len(process['legs'])==3:
1679 self.fix_MadLoopParamCard(dir_path, mp=False,
1680 loop_filter=False, DoubleCheckHelicityFilter=True)
1681
1682 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1683 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1684 cwd=dir_path, bufsize=0)
1685 start_index = len(DP_stability)
1686 if progress_bar!=None:
1687 progress_bar.start()
1688
1689
1690 interrupted = False
1691
1692
1693 retry = 0
1694
1695 i=start_index
1696 if options and 'events' in options and options['events']:
1697
1698 import MadSpin.decay as madspin
1699 fsock = open(options['events'])
1700 self.event_file = madspin.Event(fsock)
1701 while i<(start_index+nPoints):
1702
1703 qp_dict={}
1704 dp_dict={}
1705 UPS = None
1706 EPS = None
1707
1708 if retry==0:
1709 p = pick_PS_point(process, options)
1710
1711 try:
1712 if progress_bar!=None:
1713 progress_bar.update(i+1-start_index)
1714
1715 PSPoint = format_PS_point(p,0)
1716 dp_res=[]
1717 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1718 split_orders=split_orders))
1719 dp_dict['CTModeA']=dp_res[-1]
1720 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1721 split_orders=split_orders))
1722 dp_dict['CTModeB']=dp_res[-1]
1723 for rotation in range(1,num_rotations+1):
1724 PSPoint = format_PS_point(p,rotation)
1725 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1726 split_orders=split_orders))
1727 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1728
1729 if any([not res for res in dp_res]):
1730 return None
1731 dp_accuracy =((max(dp_res)-min(dp_res))/
1732 abs(sum(dp_res)/len(dp_res)))
1733 dp_dict['Accuracy'] = dp_accuracy
1734 if dp_accuracy>accuracy_threshold:
1735 if tool in [1,6]:
1736
1737 UPS = [i,p]
1738 qp_res=[]
1739 PSPoint = format_PS_point(p,0)
1740 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1741 split_orders=split_orders))
1742 qp_dict['CTModeA']=qp_res[-1]
1743 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1744 split_orders=split_orders))
1745 qp_dict['CTModeB']=qp_res[-1]
1746 for rotation in range(1,num_rotations+1):
1747 PSPoint = format_PS_point(p,rotation)
1748 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1749 split_orders=split_orders))
1750 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1751
1752 if any([not res for res in qp_res]):
1753 return None
1754
1755 qp_accuracy = ((max(qp_res)-min(qp_res))/
1756 abs(sum(qp_res)/len(qp_res)))
1757 qp_dict['Accuracy']=qp_accuracy
1758 if qp_accuracy>accuracy_threshold:
1759 EPS = [i,p]
1760 else:
1761
1762
1763 UPS = [i,p]
1764
1765 except KeyboardInterrupt:
1766 interrupted = True
1767 break
1768 except IOError as e:
1769 if e.errno == errno.EINTR:
1770 if retry==100:
1771 logger.error("Failed hundred times consecutively because"+
1772 " of system call interruptions.")
1773 raise
1774 else:
1775 logger.debug("Recovered from a system call interruption."+\
1776 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1777
1778 time.sleep(0.5)
1779
1780 retry = retry+1
1781
1782 try:
1783 StabChecker.kill()
1784 except Exception:
1785 pass
1786 StabChecker = subprocess.Popen(\
1787 [pjoin(dir_path,'StabilityCheckDriver')],
1788 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1789 stderr=subprocess.PIPE, cwd=dir_path, bufsize=0)
1790 continue
1791 else:
1792 raise
1793
1794
1795
1796 retry = 0
1797
1798 i=i+1
1799
1800
1801 DP_stability.append(dp_dict)
1802 QP_stability.append(qp_dict)
1803 if not EPS is None:
1804 Exceptional_PS_points.append(EPS)
1805 if not UPS is None:
1806 Unstable_PS_points.append(UPS)
1807
1808 if progress_bar!=None:
1809 progress_bar.finish()
1810 if time_info:
1811 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1812 "%d-%m-%Y %H:%M"))
1813
1814
1815 if not interrupted:
1816 StabChecker.stdin.write('y\n'.encode())
1817 else:
1818 StabChecker.kill()
1819
1820
1821
1822
1823
1824
1825
1826 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1827 return_dict['Stability'][tool_name])
1828
1829 if interrupted:
1830 break
1831
1832 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1833 reusing else matrix_element
1834 return return_dict
1835
1836 @classmethod
1837 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1838 split_orders=-1):
1839 """ This version of get_me_value is simplified for the purpose of this
1840 class. No compilation is necessary. The CT mode can be specified."""
1841
1842
1843 StabChecker.stdin.write('\x1a'.encode())
1844 StabChecker.stdin.write('1\n'.encode())
1845 StabChecker.stdin.write(('%d\n'%mode).encode())
1846 StabChecker.stdin.write(('%s\n'%PSpoint).encode())
1847 StabChecker.stdin.write(('%.16E\n'%mu_r).encode())
1848 StabChecker.stdin.write(('%d\n'%hel).encode())
1849 StabChecker.stdin.write(('%d\n'%split_orders).encode())
1850
1851
1852 try:
1853
1854 while True:
1855 output = StabChecker.stdout.readline().decode(errors='ignore')
1856
1857 if output != '':
1858 last_non_empty = output
1859 if output==' ##TAG#RESULT_START#TAG##\n':
1860 break
1861
1862 ret_code = StabChecker.poll()
1863 if not ret_code is None:
1864 output = StabChecker.stdout.readline().decode(errors='ignore')
1865 if output != '':
1866 last_non_empty = output
1867 error = StabChecker.stderr.readline().decode(errors='ignore')
1868 raise MadGraph5Error("The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1869 (ret_code, last_non_empty, error))
1870
1871 res = ""
1872 while True:
1873 output = StabChecker.stdout.readline().decode(errors='ignore')
1874 if output != '':
1875 last_non_empty = output
1876 if str(output)==' ##TAG#RESULT_STOP#TAG##\n':
1877 break
1878 else:
1879 res += output
1880 ret_code = StabChecker.poll()
1881 if not ret_code is None:
1882 output = StabChecker.stdout.readline().decode(errors='ignore')
1883 if output != '':
1884 last_non_empty = output
1885 error = StabChecker.stderr.readline().decode(errors='ignore')
1886 raise MadGraph5Error("The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1887 (ret_code, last_non_empty, error))
1888
1889 return cls.parse_check_output(res,format='tuple')[0][0]
1890 except IOError as e:
1891 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1892 raise e
1893
1896 """ Perform a python evaluation of the matrix element independently for
1897 all possible helicity configurations for a fixed number of points N and
1898 returns the average for each in the format [[hel_config, eval],...].
1899 This is used to determine what are the vanishing and dependent helicity
1900 configurations at generation time and accordingly setup the output.
1901 This is not yet implemented at LO."""
1902
1903
1904 assert isinstance(process,base_objects.Process)
1905 assert process.get('perturbation_couplings')==[]
1906
1907 N_eval=50
1908
1909 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
1910 auth_skipping = False, reuse = True)
1911
1912 amplitude = diagram_generation.Amplitude(process)
1913 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
1914
1915 cumulative_helEvals = []
1916
1917 for i in range(N_eval):
1918 p, w_rambo = evaluator.get_momenta(process)
1919 helEvals = evaluator.evaluate_matrix_element(\
1920 matrix_element, p = p, output = 'helEvals')['helEvals']
1921 if cumulative_helEvals==[]:
1922 cumulative_helEvals=copy.copy(helEvals)
1923 else:
1924 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
1925 enumerate(cumulative_helEvals)]
1926
1927
1928 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
1929
1930
1931
1932 clean_added_globals(ADDED_GLOBAL)
1933
1934 return cumulative_helEvals
1935
1938 """A wrapper function for running an iteration of a function over
1939 a multiprocess, without having to first create a process list
1940 (which makes a big difference for very large multiprocesses.
1941 stored_quantities is a dictionary for any quantities that we want
1942 to reuse between runs."""
1943
1944 model = multiprocess.get('model')
1945 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
1946 if not leg.get('state')]
1947 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
1948 if leg.get('state')]
1949
1950 id_anti_id_dict = {}
1951 for id in set(tuple(sum(isids+fsids, []))):
1952 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
1953 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
1954 sorted_ids = []
1955 results = []
1956 for is_prod in itertools.product(*isids):
1957 for fs_prod in itertools.product(*fsids):
1958
1959
1960 if check_already_checked(is_prod, fs_prod, sorted_ids,
1961 multiprocess, model, id_anti_id_dict):
1962 continue
1963
1964 process = multiprocess.get_process_with_legs(base_objects.LegList(\
1965 [base_objects.Leg({'id': id, 'state':False}) for \
1966 id in is_prod] + \
1967 [base_objects.Leg({'id': id, 'state':True}) for \
1968 id in fs_prod]))
1969
1970 if opt is not None:
1971 if isinstance(opt, dict):
1972 try:
1973 value = opt[process.base_string()]
1974 except Exception:
1975 continue
1976 result = function(process, stored_quantities, value, options=options)
1977 else:
1978 result = function(process, stored_quantities, opt, options=options)
1979 else:
1980 result = function(process, stored_quantities, options=options)
1981
1982 if result:
1983 results.append(result)
1984
1985 return results
1986
1987
1988
1989
1990
1991 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
1992 id_anti_id_dict = {}):
1993 """Check if process already checked, if so return True, otherwise add
1994 process and antiprocess to sorted_ids."""
1995
1996
1997 if id_anti_id_dict:
1998 is_ids = [id_anti_id_dict[id] for id in \
1999 is_ids]
2000 else:
2001 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
2002 is_ids]
2003
2004 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
2005 [process.get('id')])
2006
2007 if ids in sorted_ids:
2008
2009 return True
2010
2011
2012 sorted_ids.append(ids)
2013
2014
2015 return False
2016
2022 """ Generate a loop matrix element from the process definition, and returns
2023 it along with the timing information dictionary.
2024 If reuse is True, it reuses the already output directory if found.
2025 There is the possibility of specifying the proc_name."""
2026
2027 assert isinstance(process_definition,
2028 (base_objects.ProcessDefinition,base_objects.Process))
2029 assert process_definition.get('perturbation_couplings')!=[]
2030
2031 if isinstance(process_definition,base_objects.ProcessDefinition):
2032 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
2033 raise InvalidCmd("This check can only be performed on single "+
2034 " processes. (i.e. without multiparticle labels).")
2035
2036 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2037 if not leg.get('state')]
2038 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2039 if leg.get('state')]
2040
2041
2042 process = process_definition.get_process(isids,fsids)
2043 else:
2044 process = process_definition
2045
2046 if not output_path is None:
2047 root_path = output_path
2048 else:
2049 root_path = cmd._mgme_dir
2050
2051 timing = {'Diagrams_generation': None,
2052 'n_loops': None,
2053 'HelasDiagrams_generation': None,
2054 'n_loop_groups': None,
2055 'n_loop_wfs': None,
2056 'loop_wfs_ranks': None}
2057
2058 if proc_name:
2059 proc_dir = pjoin(root_path,proc_name)
2060 else:
2061 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
2062 '_'.join(process.shell_string().split('_')[1:])))
2063 if reuse and os.path.isdir(proc_dir):
2064 logger.info("Reusing directory %s"%str(proc_dir))
2065
2066 return timing, process
2067
2068 logger.info("Generating p%s"%process_definition.nice_string()[1:])
2069
2070 start=time.time()
2071 try:
2072 amplitude = loop_diagram_generation.LoopAmplitude(process,
2073 loop_filter=loop_filter)
2074 except InvalidCmd:
2075
2076
2077 return time.time()-start, None
2078 if not amplitude.get('diagrams'):
2079
2080 return time.time()-start, None
2081
2082
2083
2084 loop_optimized_output = cmd.options['loop_optimized_output']
2085 timing['Diagrams_generation']=time.time()-start
2086 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2087 start=time.time()
2088
2089 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2090 optimized_output = loop_optimized_output,gen_color=True)
2091
2092
2093
2094 matrix_element.compute_all_analytic_information()
2095 timing['HelasDiagrams_generation']=time.time()-start
2096
2097 if loop_optimized_output:
2098 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2099 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2100 ldiag.get('loop_wavefunctions')]
2101 timing['n_loop_wfs']=len(lwfs)
2102 timing['loop_wfs_ranks']=[]
2103 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2104 for l in lwfs])+1):
2105 timing['loop_wfs_ranks'].append(\
2106 len([1 for l in lwfs if \
2107 l.get_analytic_info('wavefunction_rank')==rank]))
2108
2109 return timing, matrix_element
2110
2111
2112
2113
2114 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2115 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2116 """For a single loop process, check both its timings and then its stability
2117 in one go without regenerating it."""
2118
2119 if 'reuse' not in options:
2120 keep_folder=False
2121 else:
2122 keep_folder = options['reuse']
2123
2124 model=process_definition.get('model')
2125
2126 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2127 keep_folder,output_path=output_path,cmd=cmd)
2128 reusing = isinstance(matrix_element, base_objects.Process)
2129 options['reuse'] = reusing
2130 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2131 model=model, output_path=output_path, cmd=cmd)
2132
2133 if not myProfiler.loop_optimized_output:
2134 MLoptions={}
2135 else:
2136 MLoptions=MLOptions
2137
2138 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2139 param_card, keep_folder=keep_folder,options=options,
2140 MLOptions = MLoptions)
2141
2142 timing2['reduction_tool'] = MLoptions['MLReductionLib'][0]
2143
2144 if timing2 == None:
2145 return None, None
2146
2147
2148 timing = dict(list(timing1.items())+list(timing2.items()))
2149 stability = myProfiler.check_matrix_element_stability(matrix_element,
2150 options=options, infos_IN=timing,param_card=param_card,
2151 keep_folder = keep_folder,
2152 MLOptions = MLoptions)
2153 if stability == None:
2154 return None, None
2155 else:
2156 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2157 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2158 return timing, stability
2159
2160
2161
2162
2163 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2164 options=None,nPoints=100, output_path=None,
2165 cmd = FakeInterface(), MLOptions = {}):
2166 """For a single loop process, give a detailed summary of the generation and
2167 execution timing."""
2168
2169 if "reuse" in options:
2170 reuse=options['reuse']
2171 else:
2172 reuse=False
2173
2174 reuse=options['reuse']
2175 keep_folder = reuse
2176 model=process_definition.get('model')
2177
2178 timing, matrix_element = generate_loop_matrix_element(process_definition,
2179 reuse, output_path=output_path, cmd=cmd)
2180 reusing = isinstance(matrix_element, base_objects.Process)
2181 options['reuse'] = reusing
2182 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2183 output_path=output_path,model=model,cmd=cmd)
2184
2185 if not myStabilityChecker.loop_optimized_output:
2186 MLoptions = {}
2187 else:
2188 MLoptions = MLOptions
2189
2190 if 'COLLIERComputeUVpoles' not in MLoptions:
2191 MLoptions['COLLIERComputeUVpoles']=False
2192 if 'COLLIERComputeIRpoles' not in MLoptions:
2193 MLoptions['COLLIERComputeIRpoles']=False
2194
2195 if 'COLLIERRequiredAccuracy' not in MLoptions:
2196 MLoptions['COLLIERRequiredAccuracy']=1e-13
2197
2198 if 'COLLIERUseInternalStabilityTest' not in MLoptions:
2199 MLoptions['COLLIERUseInternalStabilityTest']=False
2200
2201
2202
2203 MLoptions['COLLIERGlobalCache'] = 0
2204
2205 if "MLReductionLib" not in MLOptions:
2206 MLoptions["MLReductionLib"] = []
2207 if cuttools:
2208 MLoptions["MLReductionLib"].extend([1])
2209 if "iregi_dir" in tir:
2210 MLoptions["MLReductionLib"].extend([3])
2211 if "pjfry_dir" in tir:
2212 MLoptions["MLReductionLib"].extend([2])
2213 if "golem_dir" in tir:
2214 MLoptions["MLReductionLib"].extend([4])
2215 if "samurai_dir" in tir:
2216 MLoptions["MLReductionLib"].extend([5])
2217 if "ninja_dir" in tir:
2218 MLoptions["MLReductionLib"].extend([6])
2219 if "collier_dir" in tir:
2220 MLoptions["MLReductionLib"].extend([7])
2221
2222 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2223 options=options,param_card=param_card,
2224 keep_folder=keep_folder,
2225 MLOptions=MLoptions)
2226
2227 if stability == None:
2228 return None
2229 else:
2230 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2231 return stability
2232
2233
2234
2235
2236 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2237 output_path=None, options={}, cmd = FakeInterface(),
2238 MLOptions = {}):
2239 """For a single loop process, give a detailed summary of the generation and
2240 execution timing."""
2241
2242 if 'reuse' not in options:
2243 keep_folder = False
2244 else:
2245 keep_folder = options['reuse']
2246 model=process_definition.get('model')
2247 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2248 keep_folder, output_path=output_path, cmd=cmd)
2249 reusing = isinstance(matrix_element, base_objects.Process)
2250 options['reuse'] = reusing
2251 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2252 output_path=output_path, cmd=cmd)
2253
2254 if not myTimer.loop_optimized_output:
2255 MLoptions = {}
2256 else:
2257 MLoptions = MLOptions
2258
2259 if 'COLLIERComputeUVpoles' not in MLoptions:
2260 MLoptions['COLLIERComputeUVpoles']=False
2261 if 'COLLIERComputeIRpoles' not in MLoptions:
2262 MLoptions['COLLIERComputeIRpoles']=False
2263
2264 if 'COLLIERGlobalCache' not in MLoptions:
2265 MLoptions['COLLIERGlobalCache']=-1
2266
2267 if 'MLReductionLib' not in MLoptions or \
2268 len(MLoptions['MLReductionLib'])==0:
2269 MLoptions['MLReductionLib'] = [6]
2270
2271 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2272 keep_folder = keep_folder, options=options,
2273 MLOptions = MLoptions)
2274
2275 if timing2 == None:
2276 return None
2277 else:
2278
2279 res = dict(list(timing1.items())+list(timing2.items()))
2280 res['loop_optimized_output']=myTimer.loop_optimized_output
2281 res['reduction_tool'] = MLoptions['MLReductionLib'][0]
2282 return res
2283
2284
2285
2286
2287 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2288 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2289 """Check processes by generating them with all possible orderings
2290 of particles (which means different diagram building and Helas
2291 calls), and comparing the resulting matrix element values."""
2292
2293 cmass_scheme = cmd.options['complex_mass_scheme']
2294 if isinstance(processes, base_objects.ProcessDefinition):
2295
2296
2297 multiprocess = processes
2298 model = multiprocess.get('model')
2299
2300
2301 if multiprocess.get('perturbation_couplings')==[]:
2302 evaluator = MatrixElementEvaluator(model,
2303 auth_skipping = True, reuse = False, cmd = cmd)
2304 else:
2305 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2306 model=model, auth_skipping = True,
2307 reuse = False, output_path=output_path, cmd = cmd)
2308
2309 results = run_multiprocs_no_crossings(check_process,
2310 multiprocess,
2311 evaluator,
2312 quick,
2313 options)
2314
2315 if "used_lorentz" not in evaluator.stored_quantities:
2316 evaluator.stored_quantities["used_lorentz"] = []
2317
2318 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2319
2320 clean_up(output_path)
2321
2322 return results, evaluator.stored_quantities["used_lorentz"]
2323
2324 elif isinstance(processes, base_objects.Process):
2325 processes = base_objects.ProcessList([processes])
2326 elif isinstance(processes, base_objects.ProcessList):
2327 pass
2328 else:
2329 raise InvalidCmd("processes is of non-supported format")
2330
2331 if not processes:
2332 raise InvalidCmd("No processes given")
2333
2334 model = processes[0].get('model')
2335
2336
2337 if processes[0].get('perturbation_couplings')==[]:
2338 evaluator = MatrixElementEvaluator(model, param_card,
2339 auth_skipping = True, reuse = False, cmd = cmd)
2340 else:
2341 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2342 model=model,param_card=param_card,
2343 auth_skipping = True, reuse = False,
2344 output_path=output_path, cmd = cmd)
2345
2346
2347
2348 sorted_ids = []
2349 comparison_results = []
2350
2351
2352 for process in processes:
2353
2354
2355 if check_already_checked([l.get('id') for l in process.get('legs') if \
2356 not l.get('state')],
2357 [l.get('id') for l in process.get('legs') if \
2358 l.get('state')],
2359 sorted_ids, process, model):
2360 continue
2361
2362 res = check_process(process, evaluator, quick, options)
2363 if res:
2364 comparison_results.append(res)
2365
2366 if "used_lorentz" not in evaluator.stored_quantities:
2367 evaluator.stored_quantities["used_lorentz"] = []
2368
2369 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2370
2371 clean_up(output_path)
2372
2373 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2374
2376 """Check the helas calls for a process by generating the process
2377 using all different permutations of the process legs (or, if
2378 quick, use a subset of permutations), and check that the matrix
2379 element is invariant under this."""
2380
2381 model = process.get('model')
2382
2383
2384 for i, leg in enumerate(process.get('legs')):
2385 leg.set('number', i+1)
2386
2387 logger.info("Checking crossings of %s" % \
2388 process.nice_string().replace('Process:', 'process'))
2389
2390 process_matrix_elements = []
2391
2392
2393
2394 if quick:
2395 leg_positions = [[] for leg in process.get('legs')]
2396 quick = list(range(1,len(process.get('legs')) + 1))
2397
2398 values = []
2399
2400
2401 number_checked=0
2402 for legs in itertools.permutations(process.get('legs')):
2403
2404 order = [l.get('number') for l in legs]
2405 if quick:
2406 found_leg = True
2407 for num in quick:
2408
2409
2410 leg_position = legs.index([l for l in legs if \
2411 l.get('number') == num][0])
2412
2413 if not leg_position in leg_positions[num-1]:
2414 found_leg = False
2415 leg_positions[num-1].append(leg_position)
2416
2417 if found_leg:
2418 continue
2419
2420
2421
2422 if quick and process.get('perturbation_couplings') and number_checked >3:
2423 continue
2424
2425 legs = base_objects.LegList(legs)
2426
2427 if order != list(range(1,len(legs) + 1)):
2428 logger.info("Testing permutation: %s" % \
2429 order)
2430
2431 newproc = copy.copy(process)
2432 newproc.set('legs',legs)
2433
2434
2435 try:
2436 if newproc.get('perturbation_couplings')==[]:
2437 amplitude = diagram_generation.Amplitude(newproc)
2438 else:
2439
2440 loop_base_objects.cutting_method = 'optimal' if \
2441 number_checked%2 == 0 else 'default'
2442 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2443 except InvalidCmd:
2444 result=False
2445 else:
2446 result = amplitude.get('diagrams')
2447
2448 loop_base_objects.cutting_method = 'optimal'
2449
2450 if not result:
2451
2452 logging.info("No diagrams for %s" % \
2453 process.nice_string().replace('Process', 'process'))
2454 break
2455
2456 if order == list(range(1,len(legs) + 1)):
2457
2458 p, w_rambo = evaluator.get_momenta(process, options)
2459
2460
2461 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2462 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2463 gen_color=False)
2464 else:
2465 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2466 optimized_output=evaluator.loop_optimized_output)
2467
2468
2469
2470
2471 if amplitude.get('process').get('has_born'):
2472
2473
2474 if matrix_element in process_matrix_elements:
2475
2476
2477 continue
2478
2479 process_matrix_elements.append(matrix_element)
2480
2481 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2482 options=options)
2483 if res == None:
2484 break
2485
2486 values.append(res[0])
2487 number_checked += 1
2488
2489
2490
2491 if abs(max(values)) + abs(min(values)) > 0 and \
2492 2 * abs(max(values) - min(values)) / \
2493 (abs(max(values)) + abs(min(values))) > 0.01:
2494 break
2495
2496
2497 if not values:
2498 return None
2499
2500
2501
2502 diff = 0
2503 if abs(max(values)) + abs(min(values)) > 0:
2504 diff = 2* abs(max(values) - min(values)) / \
2505 (abs(max(values)) + abs(min(values)))
2506
2507
2508 if process.get('perturbation_couplings'):
2509 passed = diff < 1.e-5
2510 else:
2511 passed = diff < 1.e-8
2512
2513 return {"process": process,
2514 "momenta": p,
2515 "values": values,
2516 "difference": diff,
2517 "passed": passed}
2518
2520 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2521 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2522
2523 if mg_root is None:
2524 pass
2525
2526 directories = misc.glob('%s*' % temp_dir_prefix, mg_root)
2527 if directories != []:
2528 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2529 for dir in directories:
2530
2531 if os.path.isdir(pjoin(dir,'SubProcesses')):
2532 shutil.rmtree(dir)
2533
2542
2543 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2544 """Present the results from a timing and stability consecutive check"""
2545
2546
2547 opt = timing['loop_optimized_output']
2548
2549 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2550 ' output:\n'
2551 text += output_timings(myprocdef,timing)
2552
2553 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2554 ' output:\n'
2555 text += output_stability(stability,output_path, reusing=reusing)
2556
2557 mode = 'optimized' if opt else 'default'
2558 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2559 %(mode,stability['Process'].shell_string()))
2560 logFile = open(logFilePath, 'w')
2561 logFile.write(text)
2562 logFile.close()
2563 logger.info('Log of this profile check was output to file %s'\
2564 %str(logFilePath))
2565 return text
2566
2568 """Present the result of a stability check in a nice format.
2569 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2570 under the MadGraph5_aMC@NLO root folder (output_path)"""
2571
2572 def accuracy(eval_list):
2573 """ Compute the accuracy from different evaluations."""
2574 return (2.0*(max(eval_list)-min(eval_list))/
2575 abs(max(eval_list)+min(eval_list)))
2576
2577 def best_estimate(eval_list):
2578 """ Returns the best estimate from different evaluations."""
2579 return (max(eval_list)+min(eval_list))/2.0
2580
2581 def loop_direction_test_power(eval_list):
2582 """ Computes the loop direction test power P is computed as follow:
2583 P = accuracy(loop_dir_test) / accuracy(all_test)
2584 So that P is large if the loop direction test is effective.
2585 The tuple returned is (log(median(P)),log(min(P)),frac)
2586 where frac is the fraction of events with powers smaller than -3
2587 which means events for which the reading direction test shows an
2588 accuracy three digits higher than it really is according to the other
2589 tests."""
2590 powers=[]
2591 for eval in eval_list:
2592 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2593
2594 other_evals = [eval[key] for key in eval.keys() if key not in \
2595 ['CTModeB','Accuracy']]
2596 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2597 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2598
2599 n_fail=0
2600 for p in powers:
2601 if (math.log(p)/math.log(10))<-3:
2602 n_fail+=1
2603
2604 if len(powers)==0:
2605 return (None,None,None)
2606
2607 return (math.log(median(powers))/math.log(10),
2608 math.log(min(powers))/math.log(10),
2609 n_fail/len(powers))
2610
2611 def test_consistency(dp_eval_list, qp_eval_list):
2612 """ Computes the consistency test C from the DP and QP evaluations.
2613 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2614 So a consistent test would have C as close to one as possible.
2615 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2616 consistencies = []
2617 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2618 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2619 if key!='Accuracy']
2620 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2621 if key!='Accuracy']
2622 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2623 accuracy(dp_evals)!=0.0:
2624 consistencies.append(accuracy(dp_evals)/(abs(\
2625 best_estimate(qp_evals)-best_estimate(dp_evals))))
2626
2627 if len(consistencies)==0:
2628 return (None,None,None)
2629
2630 return (math.log(median(consistencies))/math.log(10),
2631 math.log(min(consistencies))/math.log(10),
2632 math.log(max(consistencies))/math.log(10))
2633
2634 def median(orig_list):
2635 """ Find the median of a sorted float list. """
2636 tmp=copy.copy(orig_list)
2637 tmp.sort()
2638 if len(tmp)%2==0:
2639 return (tmp[int((len(tmp)/2)-1)]+tmp[int(len(tmp)/2)])/2.0
2640 else:
2641 return tmp[int((len(tmp)-1)/2)]
2642
2643
2644 f = format_output
2645 opt = stability['loop_optimized_output']
2646
2647 mode = 'optimized' if opt else 'default'
2648 process = stability['Process']
2649 res_str = "Stability checking for %s (%s mode)\n"\
2650 %(process.nice_string()[9:],mode)
2651
2652 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2653 %(mode,process.shell_string())), 'w')
2654
2655 logFile.write('Stability check results\n\n')
2656 logFile.write(res_str)
2657 data_plot_dict={}
2658 accuracy_dict={}
2659 nPSmax=0
2660 max_acc=0.0
2661 min_acc=1.0
2662 if stability['Stability']:
2663 toolnames= list(stability['Stability'].keys())
2664 toolnamestr=" | ".join(tn+
2665 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2666 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2667 for key,stab in stability['Stability'].items()]
2668 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2669 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2670 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2671 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2672 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2673 len_PS=["%i"%len(evals)+\
2674 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2675 len_PS_str=" | ".join(len_PS)
2676 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2677 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2678 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2679 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2680 pmedminlist=[]
2681 pfraclist=[]
2682 for key,stab in stability['Stability'].items():
2683 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2684 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2685 pfrac_str = f(pfrac,'%.2e')
2686 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2687 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2688 pmedminlist_str=" | ".join(pmedminlist)
2689 pfraclist_str=" | ".join(pfraclist)
2690 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2691 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2692 len_UPS=["%i"%len(upup)+\
2693 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2694 len_UPS_str=" | ".join(len_UPS)
2695 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2696 res_str_i += \
2697 """
2698 = Legend for the statistics of the stability tests. (all log below ar log_10)
2699 The loop direction test power P is computed as follow:
2700 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2701 So that log(P) is positive if the loop direction test is effective.
2702 The tuple printed out is (log(median(P)),log(min(P)))
2703 The consistency test C is computed when QP evaluations are available:
2704 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2705 So a consistent test would have log(C) as close to zero as possible.
2706 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2707 res_str+=res_str_i
2708 for key in stability['Stability'].keys():
2709 toolname=key
2710 stab=stability['Stability'][key]
2711 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2712
2713 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2714 stab['QP_stability']]
2715 nPS = len(DP_stability)
2716 if nPS>nPSmax:nPSmax=nPS
2717 UPS = stab['Unstable_PS_points']
2718 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2719 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2720 EPS = stab['Exceptional_PS_points']
2721 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2722 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2723 res_str_i = ""
2724
2725 xml_toolname = {'GOLEM95':'GOLEM','IREGI':'IREGI',
2726 'CUTTOOLS':'CUTTOOLS','PJFRY++':'PJFRY',
2727 'NINJA':'NINJA','SAMURAI':'SAMURAI',
2728 'COLLIER':'COLLIER'}[toolname.upper()]
2729 if len(UPS)>0:
2730 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2731 %(len(UPS),nPS,toolname)
2732 prefix = 'DP' if toolname=='CutTools' else ''
2733 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2734 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2735 res_str_i += "|= %s Max accuracy............... %s\n"\
2736 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2737 res_str_i += "|= %s Min accuracy............... %s\n"\
2738 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2739 (pmed,pmin,pfrac)=loop_direction_test_power(\
2740 [stab['DP_stability'][U[0]] for U in UPS])
2741 if toolname=='CutTools':
2742 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2743 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2744 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2745 %f(pfrac,'%.2e')
2746 res_str_i += "|= QP Median accuracy............ %s\n"\
2747 %f(median(UPS_stability_QP),'%.2e')
2748 res_str_i += "|= QP Max accuracy............... %s\n"\
2749 %f(min(UPS_stability_QP),'%.2e')
2750 res_str_i += "|= QP Min accuracy............... %s\n"\
2751 %f(max(UPS_stability_QP),'%.2e')
2752 (pmed,pmin,pfrac)=loop_direction_test_power(\
2753 [stab['QP_stability'][U[0]] for U in UPS])
2754 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2755 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2756 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2757 (pmed,pmin,pmax)=test_consistency(\
2758 [stab['DP_stability'][U[0]] for U in UPS],
2759 [stab['QP_stability'][U[0]] for U in UPS])
2760 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2761 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2762 if len(EPS)==0:
2763 res_str_i += "= Number of Exceptional PS points : 0\n"
2764 if len(EPS)>0:
2765 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2766 %(len(EPS),nPS,toolname)
2767 res_str_i += "|= DP Median accuracy............ %s\n"\
2768 %f(median(EPS_stability_DP),'%.2e')
2769 res_str_i += "|= DP Max accuracy............... %s\n"\
2770 %f(min(EPS_stability_DP),'%.2e')
2771 res_str_i += "|= DP Min accuracy............... %s\n"\
2772 %f(max(EPS_stability_DP),'%.2e')
2773 pmed,pmin,pfrac=loop_direction_test_power(\
2774 [stab['DP_stability'][E[0]] for E in EPS])
2775 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2776 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2777 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2778 %f(pfrac,'%.2e')
2779 res_str_i += "|= QP Median accuracy............ %s\n"\
2780 %f(median(EPS_stability_QP),'%.2e')
2781 res_str_i += "|= QP Max accuracy............... %s\n"\
2782 %f(min(EPS_stability_QP),'%.2e')
2783 res_str_i += "|= QP Min accuracy............... %s\n"\
2784 %f(max(EPS_stability_QP),'%.2e')
2785 pmed,pmin,pfrac=loop_direction_test_power(\
2786 [stab['QP_stability'][E[0]] for E in EPS])
2787 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2788 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2789 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2790
2791 logFile.write(res_str_i)
2792
2793 if len(EPS)>0:
2794 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2795 %(len(EPS),toolname))
2796 logFile.write('<EPS_data reduction=%s>\n'%xml_toolname.upper())
2797 for i, eps in enumerate(EPS):
2798 logFile.write('\nEPS #%i\n'%(i+1))
2799 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2800 for p in eps[1]]))
2801 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[eps[0]])
2802 logFile.write(' QP accuracy : %.4e\n'%QP_stability[eps[0]])
2803 logFile.write('</EPS_data>\n')
2804 if len(UPS)>0:
2805 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2806 %(len(UPS),toolname))
2807 logFile.write('<UPS_data reduction=%s>\n'%xml_toolname.upper())
2808 for i, ups in enumerate(UPS):
2809 logFile.write('\nUPS #%i\n'%(i+1))
2810 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2811 for p in ups[1]]))
2812 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[ups[0]])
2813 logFile.write(' QP accuracy : %.4e\n'%QP_stability[ups[0]])
2814 logFile.write('</UPS_data>\n')
2815
2816 logFile.write('\nData entries for the stability plot.\n')
2817 logFile.write('First row is a maximal accuracy delta, second is the '+\
2818 'fraction of events with DP accuracy worse than delta.\n')
2819 logFile.write('<plot_data reduction=%s>\n'%xml_toolname.upper())
2820
2821 if max(DP_stability)>0.0:
2822 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2823 if min_digit_acc>=0:
2824 min_digit_acc = min_digit_acc+1
2825 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2826 else:
2827 logFile.writelines('%.4e %.4e\n'%(accuracies[i], 0.0) for i in \
2828 range(len(accuracies)))
2829 logFile.write('</plot_data>\n')
2830 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2831 ' is output then.'
2832 logFile.write('Perfect accuracy over all the trial PS points.')
2833 res_str +=res_str_i
2834 continue
2835
2836 accuracy_dict[toolname]=accuracies
2837 if max(accuracies) > max_acc: max_acc=max(accuracies)
2838 if min(accuracies) < min_acc: min_acc=min(accuracies)
2839 data_plot=[]
2840 for acc in accuracies:
2841 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2842 /float(len(DP_stability)))
2843 data_plot_dict[toolname]=data_plot
2844
2845 logFile.writelines('%.4e %.4e\n'%(accuracies[i], data_plot[i]) for i in \
2846 range(len(accuracies)))
2847 logFile.write('</plot_data>\n')
2848 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2849 %(nPS,toolname))
2850 logFile.write('First row is DP, second is QP (if available).\n\n')
2851 logFile.write('<accuracies reduction=%s>\n'%xml_toolname.upper())
2852 logFile.writelines('%.4e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2853 else '%.4e\n'%QP_stability[i]) for i in range(nPS))
2854 logFile.write('</accuracies>\n')
2855 res_str+=res_str_i
2856 logFile.close()
2857 res_str += "\n= Stability details of the run are output to the file"+\
2858 " stability_%s_%s.log\n"%(mode,process.shell_string())
2859
2860
2861
2862
2863 if any(isinstance(handler,logging.FileHandler) for handler in \
2864 logging.getLogger('madgraph').handlers):
2865 return res_str
2866
2867 try:
2868 import matplotlib.pyplot as plt
2869 colorlist=['b','r','g','y','m','c','k']
2870 for i,key in enumerate(data_plot_dict.keys()):
2871 color=colorlist[i]
2872 data_plot=data_plot_dict[key]
2873 accuracies=accuracy_dict[key]
2874 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2875 label=key)
2876 plt.axis([min_acc,max_acc,\
2877 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2878 plt.yscale('log')
2879 plt.xscale('log')
2880 plt.title('Stability plot for %s (%s mode, %d points)'%\
2881 (process.nice_string()[9:],mode,nPSmax))
2882 plt.ylabel('Fraction of events')
2883 plt.xlabel('Maximal precision')
2884 plt.legend()
2885 if not reusing:
2886 logger.info('Some stability statistics will be displayed once you '+\
2887 'close the plot window')
2888 plt.show()
2889 else:
2890 fig_output_file = str(pjoin(output_path,
2891 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2892 logger.info('Stability plot output to file %s. '%fig_output_file)
2893 plt.savefig(fig_output_file)
2894 return res_str
2895 except Exception as e:
2896 if isinstance(e, ImportError):
2897 res_str += "\n= Install matplotlib to get a "+\
2898 "graphical display of the results of this check."
2899 else:
2900 res_str += "\n= Could not produce the stability plot because of "+\
2901 "the following error: %s"%str(e)
2902 return res_str
2903
2905 """Present the result of a timings check in a nice format """
2906
2907
2908 f = format_output
2909 loop_optimized_output = timings['loop_optimized_output']
2910 reduction_tool = bannermod.MadLoopParam._ID_reduction_tool_map[
2911 timings['reduction_tool']]
2912
2913 res_str = "%s \n"%process.nice_string()
2914 try:
2915 gen_total = timings['HELAS_MODEL_compilation']+\
2916 timings['HelasDiagrams_generation']+\
2917 timings['Process_output']+\
2918 timings['Diagrams_generation']+\
2919 timings['Process_compilation']+\
2920 timings['Initialization']
2921 except TypeError:
2922 gen_total = None
2923 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2924 res_str += "|= Diagrams generation....... %s\n"\
2925 %f(timings['Diagrams_generation'],'%.3gs')
2926 res_str += "|= Helas Diagrams generation. %s\n"\
2927 %f(timings['HelasDiagrams_generation'],'%.3gs')
2928 res_str += "|= Process output............ %s\n"\
2929 %f(timings['Process_output'],'%.3gs')
2930 res_str += "|= HELAS+model compilation... %s\n"\
2931 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2932 res_str += "|= Process compilation....... %s\n"\
2933 %f(timings['Process_compilation'],'%.3gs')
2934 res_str += "|= Initialization............ %s\n"\
2935 %f(timings['Initialization'],'%.3gs')
2936
2937 res_str += "\n= Reduction tool tested...... %s\n"%reduction_tool
2938 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
2939 %(timings['run_unpolarized_total']*1000.0)
2940 if loop_optimized_output:
2941 coef_time=timings['run_unpolarized_coefs']*1000.0
2942 loop_time=(timings['run_unpolarized_total']-\
2943 timings['run_unpolarized_coefs'])*1000.0
2944 total=coef_time+loop_time
2945 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2946 %(coef_time,int(round(100.0*coef_time/total)))
2947 res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
2948 %(loop_time,int(round(100.0*loop_time/total)))
2949 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
2950 %(timings['run_polarized_total']*1000.0)
2951 if loop_optimized_output:
2952 coef_time=timings['run_polarized_coefs']*1000.0
2953 loop_time=(timings['run_polarized_total']-\
2954 timings['run_polarized_coefs'])*1000.0
2955 total=coef_time+loop_time
2956 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2957 %(coef_time,int(round(100.0*coef_time/total)))
2958 res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
2959 %(loop_time,int(round(100.0*loop_time/total)))
2960 res_str += "\n= Miscellaneous ========================\n"
2961 res_str += "|= Number of hel. computed... %s/%s\n"\
2962 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2963 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2964 if loop_optimized_output:
2965 res_str += "|= Number of loop groups..... %s\n"\
2966 %f(timings['n_loop_groups'],'%d')
2967 res_str += "|= Number of loop wfs........ %s\n"\
2968 %f(timings['n_loop_wfs'],'%d')
2969 if timings['loop_wfs_ranks']!=None:
2970 for i, r in enumerate(timings['loop_wfs_ranks']):
2971 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2972 res_str += "|= Loading time (Color data). ~%.3gms\n"\
2973 %(timings['Booting_time']*1000.0)
2974 res_str += "|= Maximum RAM usage (rss)... %s\n"\
2975 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
2976 res_str += "\n= Output disk size =====================\n"
2977 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
2978 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
2979 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
2980 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
2981
2982 return res_str
2983
2985 """Present the results of a comparison in a nice list format
2986 mode short: return the number of fail process
2987 """
2988 proc_col_size = 17
2989 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
2990 if pert_coupl:
2991 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
2992 else:
2993 process_header = "Process"
2994
2995 if len(process_header) + 1 > proc_col_size:
2996 proc_col_size = len(process_header) + 1
2997
2998 for proc in comparison_results:
2999 if len(proc['process'].base_string()) + 1 > proc_col_size:
3000 proc_col_size = len(proc['process'].base_string()) + 1
3001
3002 col_size = 18
3003
3004 pass_proc = 0
3005 fail_proc = 0
3006 no_check_proc = 0
3007
3008 failed_proc_list = []
3009 no_check_proc_list = []
3010
3011 res_str = fixed_string_length(process_header, proc_col_size) + \
3012 fixed_string_length("Min element", col_size) + \
3013 fixed_string_length("Max element", col_size) + \
3014 fixed_string_length("Relative diff.", col_size) + \
3015 "Result"
3016
3017 for result in comparison_results:
3018 proc = result['process'].base_string()
3019 values = result['values']
3020
3021 if len(values) <= 1:
3022 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3023 " * No permutations, process not checked *"
3024 no_check_proc += 1
3025 no_check_proc_list.append(result['process'].nice_string())
3026 continue
3027
3028 passed = result['passed']
3029
3030 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3031 fixed_string_length("%1.10e" % min(values), col_size) + \
3032 fixed_string_length("%1.10e" % max(values), col_size) + \
3033 fixed_string_length("%1.10e" % result['difference'],
3034 col_size)
3035 if passed:
3036 pass_proc += 1
3037 res_str += "Passed"
3038 else:
3039 fail_proc += 1
3040 failed_proc_list.append(result['process'].nice_string())
3041 res_str += "Failed"
3042
3043 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3044 (pass_proc, pass_proc + fail_proc,
3045 fail_proc, pass_proc + fail_proc)
3046
3047 if fail_proc != 0:
3048 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3049 if no_check_proc != 0:
3050 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3051
3052 return res_str
3053
3055 """Helper function to fix the length of a string by cutting it
3056 or adding extra space."""
3057
3058 if len(mystr) > length:
3059 return mystr[0:length]
3060 else:
3061 return mystr + " " * (length - len(mystr))
3062
3063
3064
3065
3066
3067 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
3068 options=None, output_path=None, cmd = FakeInterface()):
3069 """Check gauge invariance of the processes by using the BRS check.
3070 For one of the massless external bosons (e.g. gluon or photon),
3071 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
3072 """
3073 cmass_scheme = cmd.options['complex_mass_scheme']
3074 if isinstance(processes, base_objects.ProcessDefinition):
3075
3076
3077 multiprocess = processes
3078
3079 model = multiprocess.get('model')
3080
3081 if multiprocess.get('perturbation_couplings')==[]:
3082 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
3083 auth_skipping = True, reuse = False)
3084 else:
3085 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3086 cmd=cmd,model=model, param_card=param_card,
3087 auth_skipping = False, reuse = False,
3088 output_path=output_path)
3089
3090 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
3091
3092 logger.info('Set All width to zero for non complex mass scheme checks')
3093 for particle in evaluator.full_model.get('particles'):
3094 if particle.get('width') != 'ZERO':
3095 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3096 results = run_multiprocs_no_crossings(check_gauge_process,
3097 multiprocess,
3098 evaluator,
3099 options=options
3100 )
3101
3102 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3103
3104 clean_up(output_path)
3105
3106 return results
3107
3108 elif isinstance(processes, base_objects.Process):
3109 processes = base_objects.ProcessList([processes])
3110 elif isinstance(processes, base_objects.ProcessList):
3111 pass
3112 else:
3113 raise InvalidCmd("processes is of non-supported format")
3114
3115 assert processes, "No processes given"
3116
3117 model = processes[0].get('model')
3118
3119
3120 if processes[0].get('perturbation_couplings')==[]:
3121 evaluator = MatrixElementEvaluator(model, param_card,
3122 auth_skipping = True, reuse = False,
3123 cmd = cmd)
3124 else:
3125 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3126 model=model, param_card=param_card,
3127 auth_skipping = False, reuse = False,
3128 output_path=output_path, cmd = cmd)
3129 comparison_results = []
3130 comparison_explicit_flip = []
3131
3132
3133 for process in processes:
3134
3135
3136
3137
3138
3139
3140
3141
3142
3143 result = check_gauge_process(process, evaluator,options=options)
3144 if result:
3145 comparison_results.append(result)
3146
3147 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3148
3149 clean_up(output_path)
3150
3151 return comparison_results
3152
3155 """Check gauge invariance for the process, unless it is already done."""
3156
3157 model = process.get('model')
3158
3159
3160 found_gauge = False
3161 for i, leg in enumerate(process.get('legs')):
3162 part = model.get_particle(leg.get('id'))
3163 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3164 found_gauge = True
3165 break
3166 if not found_gauge:
3167 logger.info("No ward identity for %s" % \
3168 process.nice_string().replace('Process', 'process'))
3169
3170 return None
3171
3172 for i, leg in enumerate(process.get('legs')):
3173 leg.set('number', i+1)
3174
3175 logger.info("Checking ward identities for %s" % \
3176 process.nice_string().replace('Process', 'process'))
3177
3178 legs = process.get('legs')
3179
3180
3181 try:
3182 if process.get('perturbation_couplings')==[]:
3183 amplitude = diagram_generation.Amplitude(process)
3184 else:
3185 amplitude = loop_diagram_generation.LoopAmplitude(process)
3186 except InvalidCmd:
3187 logging.info("No diagrams for %s" % \
3188 process.nice_string().replace('Process', 'process'))
3189 return None
3190 if not amplitude.get('diagrams'):
3191
3192 logging.info("No diagrams for %s" % \
3193 process.nice_string().replace('Process', 'process'))
3194 return None
3195
3196 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3197 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3198 gen_color = False)
3199 else:
3200 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3201 optimized_output=evaluator.loop_optimized_output)
3202
3203
3204
3205
3206
3207
3208
3209
3210 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3211 output='jamp', options=options)
3212
3213 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3214 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3215 gen_color = False)
3216
3217 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3218 output='jamp', options=options)
3219
3220 if mvalue and mvalue['m2']:
3221 return {'process':process,'value':mvalue,'brs':brsvalue}
3222
3224 """Present the results of a comparison in a nice list format"""
3225
3226 proc_col_size = 17
3227
3228 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3229
3230
3231 if pert_coupl:
3232 threshold=1e-5
3233 else:
3234 threshold=1e-10
3235
3236 if pert_coupl:
3237 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3238 else:
3239 process_header = "Process"
3240
3241 if len(process_header) + 1 > proc_col_size:
3242 proc_col_size = len(process_header) + 1
3243
3244 for one_comp in comparison_results:
3245 proc = one_comp['process'].base_string()
3246 mvalue = one_comp['value']
3247 brsvalue = one_comp['brs']
3248 if len(proc) + 1 > proc_col_size:
3249 proc_col_size = len(proc) + 1
3250
3251 col_size = 18
3252
3253 pass_proc = 0
3254 fail_proc = 0
3255
3256 failed_proc_list = []
3257 no_check_proc_list = []
3258
3259 res_str = fixed_string_length(process_header, proc_col_size) + \
3260 fixed_string_length("matrix", col_size) + \
3261 fixed_string_length("BRS", col_size) + \
3262 fixed_string_length("ratio", col_size) + \
3263 "Result"
3264
3265 for one_comp in comparison_results:
3266 proc = one_comp['process'].base_string()
3267 mvalue = one_comp['value']
3268 brsvalue = one_comp['brs']
3269 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3270 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3271 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3272 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3273 fixed_string_length("%1.10e" % ratio, col_size)
3274
3275 if ratio > threshold:
3276 fail_proc += 1
3277 proc_succeed = False
3278 failed_proc_list.append(proc)
3279 res_str += "Failed"
3280 else:
3281 pass_proc += 1
3282 proc_succeed = True
3283 res_str += "Passed"
3284
3285
3286
3287
3288
3289 if len(mvalue['jamp'])!=0:
3290 for k in range(len(mvalue['jamp'][0])):
3291 m_sum = 0
3292 brs_sum = 0
3293
3294 for j in range(len(mvalue['jamp'])):
3295
3296 m_sum += abs(mvalue['jamp'][j][k])**2
3297 brs_sum += abs(brsvalue['jamp'][j][k])**2
3298
3299
3300 if not m_sum:
3301 continue
3302 ratio = abs(brs_sum) / abs(m_sum)
3303
3304 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3305 fixed_string_length("%1.10e" % m_sum, col_size) + \
3306 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3307 fixed_string_length("%1.10e" % ratio, col_size)
3308
3309 if ratio > 1e-15:
3310 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3311 fail_proc += 1
3312 pass_proc -= 1
3313 failed_proc_list.append(proc)
3314 res_str += tmp_str + "Failed"
3315 elif not proc_succeed:
3316 res_str += tmp_str + "Passed"
3317
3318
3319 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3320 (pass_proc, pass_proc + fail_proc,
3321 fail_proc, pass_proc + fail_proc)
3322
3323 if fail_proc != 0:
3324 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3325
3326 if output=='text':
3327 return res_str
3328 else:
3329 return fail_proc
3330
3331
3332
3333 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3334 reuse = False, output_path=None, cmd = FakeInterface()):
3335 """ Check if the square matrix element (sum over helicity) is lorentz
3336 invariant by boosting the momenta with different value."""
3337
3338 cmass_scheme = cmd.options['complex_mass_scheme']
3339 if isinstance(processes, base_objects.ProcessDefinition):
3340
3341
3342 multiprocess = processes
3343 model = multiprocess.get('model')
3344
3345 if multiprocess.get('perturbation_couplings')==[]:
3346 evaluator = MatrixElementEvaluator(model,
3347 cmd= cmd, auth_skipping = False, reuse = True)
3348 else:
3349 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3350 model=model, auth_skipping = False, reuse = True,
3351 output_path=output_path, cmd = cmd)
3352
3353 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3354
3355 logger.info('Set All width to zero for non complex mass scheme checks')
3356 for particle in evaluator.full_model.get('particles'):
3357 if particle.get('width') != 'ZERO':
3358 evaluator.full_model.get('parameter_dict')[\
3359 particle.get('width')] = 0.
3360
3361 results = run_multiprocs_no_crossings(check_lorentz_process,
3362 multiprocess,
3363 evaluator,
3364 options=options)
3365
3366 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3367
3368 clean_up(output_path)
3369
3370 return results
3371
3372 elif isinstance(processes, base_objects.Process):
3373 processes = base_objects.ProcessList([processes])
3374 elif isinstance(processes, base_objects.ProcessList):
3375 pass
3376 else:
3377 raise InvalidCmd("processes is of non-supported format")
3378
3379 assert processes, "No processes given"
3380
3381 model = processes[0].get('model')
3382
3383
3384 if processes[0].get('perturbation_couplings')==[]:
3385 evaluator = MatrixElementEvaluator(model, param_card,
3386 auth_skipping = False, reuse = True,
3387 cmd=cmd)
3388 else:
3389 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3390 model=model,param_card=param_card,
3391 auth_skipping = False, reuse = True,
3392 output_path=output_path, cmd = cmd)
3393
3394 comparison_results = []
3395
3396
3397 for process in processes:
3398
3399
3400
3401
3402
3403
3404
3405
3406
3407 result = check_lorentz_process(process, evaluator,options=options)
3408 if result:
3409 comparison_results.append(result)
3410
3411 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3412
3413 clean_up(output_path)
3414
3415 return comparison_results
3416
3419 """Check gauge invariance for the process, unless it is already done."""
3420
3421 amp_results = []
3422 model = process.get('model')
3423
3424 for i, leg in enumerate(process.get('legs')):
3425 leg.set('number', i+1)
3426
3427 logger.info("Checking lorentz transformations for %s" % \
3428 process.nice_string().replace('Process:', 'process'))
3429
3430 legs = process.get('legs')
3431
3432
3433 try:
3434 if process.get('perturbation_couplings')==[]:
3435 amplitude = diagram_generation.Amplitude(process)
3436 else:
3437 amplitude = loop_diagram_generation.LoopAmplitude(process)
3438 except InvalidCmd:
3439 logging.info("No diagrams for %s" % \
3440 process.nice_string().replace('Process', 'process'))
3441 return None
3442
3443 if not amplitude.get('diagrams'):
3444
3445 logging.info("No diagrams for %s" % \
3446 process.nice_string().replace('Process', 'process'))
3447 return None
3448
3449
3450 p, w_rambo = evaluator.get_momenta(process, options)
3451
3452
3453 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3454 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3455 gen_color = True)
3456 else:
3457 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3458 optimized_output = evaluator.loop_optimized_output)
3459
3460 MLOptions = {'ImprovePS':True,'ForceMP':True}
3461 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3462 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3463 auth_skipping = True, options=options)
3464 else:
3465 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3466 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3467 options = options)
3468
3469 if data and data['m2']:
3470 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3471 results = [data]
3472 else:
3473 results = [('Original evaluation',data)]
3474 else:
3475 return {'process':process, 'results':'pass'}
3476
3477
3478
3479
3480 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3481 for boost in range(1,4):
3482 boost_p = boost_momenta(p, boost)
3483 results.append(evaluator.evaluate_matrix_element(matrix_element,
3484 p=boost_p,output='jamp'))
3485 else:
3486
3487 boost_p = boost_momenta(p, 3)
3488 results.append(('Z-axis boost',
3489 evaluator.evaluate_matrix_element(matrix_element, options=options,
3490 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3491
3492
3493
3494
3495 if not options['events']:
3496 boost_p = boost_momenta(p, 1)
3497 results.append(('X-axis boost',
3498 evaluator.evaluate_matrix_element(matrix_element, options=options,
3499 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3500 boost_p = boost_momenta(p, 2)
3501 results.append(('Y-axis boost',
3502 evaluator.evaluate_matrix_element(matrix_element,options=options,
3503 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3504
3505
3506 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3507 results.append(('Z-axis pi/2 rotation',
3508 evaluator.evaluate_matrix_element(matrix_element,options=options,
3509 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3510
3511 sq2 = math.sqrt(2.0)
3512 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3513 results.append(('Z-axis pi/4 rotation',
3514 evaluator.evaluate_matrix_element(matrix_element,options=options,
3515 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3516
3517
3518 return {'process': process, 'results': results}
3519
3520
3521
3522
3523 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3524 options=None, tir={}, output_path=None,
3525 cuttools="", reuse=False, cmd = FakeInterface()):
3526 """Check gauge invariance of the processes by flipping
3527 the gauge of the model
3528 """
3529
3530 mg_root = cmd._mgme_dir
3531
3532 cmass_scheme = cmd.options['complex_mass_scheme']
3533
3534 if isinstance(processes_unit, base_objects.ProcessDefinition):
3535
3536
3537 multiprocess_unit = processes_unit
3538 model = multiprocess_unit.get('model')
3539
3540
3541
3542 loop_optimized_bu = cmd.options['loop_optimized_output']
3543 if processes_unit.get('squared_orders'):
3544 if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
3545 cmd.options['loop_optimized_output'] = True
3546 else:
3547 raise InvalidCmd("The gauge test cannot be performed for "+
3548 " a process with more than QCD corrections and which"+
3549 " specifies squared order constraints.")
3550 else:
3551 cmd.options['loop_optimized_output'] = False
3552
3553 aloha.unitary_gauge = True
3554 if processes_unit.get('perturbation_couplings')==[]:
3555 evaluator = MatrixElementEvaluator(model, param_card,
3556 cmd=cmd,auth_skipping = False, reuse = True)
3557 else:
3558 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3559 cmd=cmd, model=model,
3560 param_card=param_card,
3561 auth_skipping = False,
3562 output_path=output_path,
3563 reuse = False)
3564 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3565 logger.info('Set All width to zero for non complex mass scheme checks')
3566 for particle in evaluator.full_model.get('particles'):
3567 if particle.get('width') != 'ZERO':
3568 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3569
3570 output_u = run_multiprocs_no_crossings(get_value,
3571 multiprocess_unit,
3572 evaluator,
3573 options=options)
3574
3575 clean_added_globals(ADDED_GLOBAL)
3576
3577 if processes_unit.get('perturbation_couplings')!=[]:
3578 clean_up(output_path)
3579
3580 momentum = {}
3581 for data in output_u:
3582 momentum[data['process']] = data['p']
3583
3584 multiprocess_feynm = processes_feynm
3585 model = multiprocess_feynm.get('model')
3586
3587
3588 aloha.unitary_gauge = False
3589
3590
3591 cmd.options['loop_optimized_output'] = True
3592 if processes_feynm.get('perturbation_couplings')==[]:
3593 evaluator = MatrixElementEvaluator(model, param_card,
3594 cmd= cmd, auth_skipping = False, reuse = False)
3595 else:
3596 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3597 cmd= cmd, model=model,
3598 param_card=param_card,
3599 auth_skipping = False,
3600 output_path=output_path,
3601 reuse = False)
3602
3603 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3604
3605 for particle in evaluator.full_model.get('particles'):
3606 if particle.get('width') != 'ZERO':
3607 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3608
3609 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3610 evaluator, momentum,
3611 options=options)
3612 output = [processes_unit]
3613 for data in output_f:
3614 local_dico = {}
3615 local_dico['process'] = data['process']
3616 local_dico['value_feynm'] = data['value']
3617 local_dico['value_unit'] = [d['value'] for d in output_u
3618 if d['process'] == data['process']][0]
3619 output.append(local_dico)
3620
3621 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3622
3623 clean_up(output_path)
3624
3625
3626 cmd.options['loop_optimized_output'] = loop_optimized_bu
3627
3628 return output
3629
3630
3631
3632
3633 else:
3634 raise InvalidCmd("processes is of non-supported format")
3635
3641 """Check complex mass scheme consistency in the offshell region of s-channels
3642 detected for this process, by varying the expansion paramer consistently
3643 with the corresponding width and making sure that the difference between
3644 the complex mass-scheme and the narrow-width approximation is higher order.
3645 """
3646
3647 if not isinstance(process_line, str):
3648 raise InvalidCmd("Proces definition must be given as a stirng for this check")
3649
3650
3651 cmd.do_set('complex_mass_scheme False', log=False)
3652
3653 multiprocess_nwa = cmd.extract_process(process_line)
3654
3655
3656 has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
3657 'decays.py'))
3658
3659
3660 missing_perturbations = cmd._curr_model.get_coupling_orders()-\
3661 set(multiprocess_nwa.get('perturbation_couplings'))
3662
3663 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3664 len(missing_perturbations)>0:
3665 logger.warning("------------------------------------------------------")
3666 logger.warning("The process considered does not specify the following "+
3667 "type of loops to be included : %s"%str(list(missing_perturbations)))
3668 logger.warning("Consequently, the CMS check will be unsuccessful if the"+
3669 " process involves any resonating particle whose LO decay is "+
3670 "mediated by one of these orders.")
3671 logger.warning("You can use the syntax '[virt=all]' to automatically"+
3672 " include all loops supported by the model.")
3673 logger.warning("------------------------------------------------------")
3674
3675 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3676 len(multiprocess_nwa.get('legs'))<=4:
3677 logger.warning("------------------------------------------------------")
3678 logger.warning("Processes with four or less external states are typically not"+\
3679 " sensitive to incorrect Complex Mass Scheme implementations.")
3680 logger.warning("You can test this sensitivity by making sure that the"+
3681 " same check on the leading-order counterpart of this process *fails*"+
3682 " when using the option '--diff_lambda_power=2'.")
3683 logger.warning("If it does not, then consider adding a massless "+
3684 "gauge vector to the external states.")
3685 logger.warning("------------------------------------------------------")
3686
3687 if options['recompute_width']=='auto':
3688 if multiprocess_nwa.get('perturbation_couplings')!=[]:
3689
3690 options['recompute_width'] = 'first_time'
3691 else:
3692 options['recompute_width'] = 'never'
3693
3694
3695 if options['recompute_width'] in ['first_time', 'always'] and \
3696 not has_FRdecay and not 'cached_widths' in options:
3697 logger.info('The LO widths will need to be recomputed but the '+
3698 'model considered does not appear to have a decay module.\nThe widths'+
3699 ' will need to be computed numerically and it will slow down the test.\n'+
3700 'Consider using a param_card already specifying correct LO widths and'+
3701 " adding the option --recompute_width=never when doing this check.")
3702
3703 if options['recompute_width']=='never' and \
3704 any(order in multiprocess_nwa.get('perturbation_couplings') for order in
3705 options['expansion_orders']):
3706 logger.warning('You chose not to recompute the widths while including'+
3707 ' loop corrections. The check will be successful only if the width'+\
3708 ' specified in the default param_card is LO accurate (Remember that'+\
3709 ' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
3710 ' respectively by default).')
3711
3712
3713
3714
3715
3716 if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
3717 modelname = cmd._curr_model.get('modelpath+restriction')
3718 with misc.MuteLogger(['madgraph'], ['INFO']):
3719 model = import_ufo.import_model(modelname, decay=True,
3720 complex_mass_scheme=False)
3721 multiprocess_nwa.set('model', model)
3722
3723 run_options = copy.deepcopy(options)
3724
3725
3726 if options['seed'] > 0:
3727 random.seed(options['seed'])
3728
3729
3730 run_options['param_card'] = param_card
3731 if isinstance(cmd, FakeInterface):
3732 raise MadGraph5Error("Check CMS cannot be run with a FakeInterface.")
3733 run_options['cmd'] = cmd
3734 run_options['MLOptions'] = MLOptions
3735 if output_path:
3736 run_options['output_path'] = output_path
3737 else:
3738 run_options['output_path'] = cmd._mgme_dir
3739
3740
3741 run_options['has_FRdecay'] = has_FRdecay
3742
3743
3744 if 'cached_widths' not in run_options:
3745 run_options['cached_widths'] = {}
3746
3747
3748 run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
3749
3750 if options['tweak']['name']:
3751 logger.info("Now running the CMS check for tweak '%s'"\
3752 %options['tweak']['name'])
3753
3754 model = multiprocess_nwa.get('model')
3755
3756 for particle in model.get('particles'):
3757 mass_param = model.get_parameter(particle.get('mass'))
3758 if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
3759 if model.get('name') not in ['sm','loop_sm']:
3760 logger.warning("The mass '%s' of particle '%s' is not an external"%\
3761 (model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
3762 " parameter as required by this check. \nMG5_aMC will try to"+\
3763 " modify the model to remedy the situation. No guarantee.")
3764 status = model.change_electroweak_mode(set(['mz','mw','alpha']))
3765 if not status:
3766 raise InvalidCmd('The EW scheme could apparently not be changed'+\
3767 ' so as to have the W-boson mass external. The check cannot'+\
3768 ' proceed.')
3769 break
3770
3771 veto_orders = [order for order in model.get('coupling_orders') if \
3772 order not in options['expansion_orders']]
3773 if len(veto_orders)>0:
3774 logger.warning('You did not define any parameter scaling rule for the'+\
3775 " coupling orders %s. They will be "%','.join(veto_orders)+\
3776 "forced to zero in the tests. Consider adding the scaling rule to"+\
3777 "avoid this. (see option '--cms' in 'help check')")
3778 for order in veto_orders:
3779 multiprocess_nwa.get('orders')[order]==0
3780 multiprocess_nwa.set('perturbation_couplings', [order for order in
3781 multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
3782
3783 if multiprocess_nwa.get('perturbation_couplings')==[]:
3784 evaluator = MatrixElementEvaluator(model, param_card,
3785 cmd=cmd,auth_skipping = False, reuse = True)
3786 else:
3787 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3788 cmd=cmd, model=model,
3789 param_card=param_card,
3790 auth_skipping = False,
3791 output_path=output_path,
3792 reuse = False)
3793
3794 cached_information = []
3795 output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3796 multiprocess_nwa,
3797 evaluator,
3798
3799
3800
3801
3802
3803 opt = cached_information,
3804 options=run_options)
3805
3806
3807 clean_added_globals(ADDED_GLOBAL)
3808
3809
3810 cmd.do_set('complex_mass_scheme True', log=False)
3811
3812
3813 multiprocess_cms = cmd.extract_process(process_line)
3814 model = multiprocess_cms.get('model')
3815
3816 if len(veto_orders)>0:
3817 for order in veto_orders:
3818 multiprocess_cms.get('orders')[order]==0
3819 multiprocess_cms.set('perturbation_couplings', [order for order in
3820 multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
3821
3822 if multiprocess_cms.get('perturbation_couplings')==[]:
3823 evaluator = MatrixElementEvaluator(model, param_card,
3824 cmd=cmd,auth_skipping = False, reuse = True)
3825 else:
3826 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3827 cmd=cmd, model=model,
3828 param_card=param_card,
3829 auth_skipping = False,
3830 output_path=output_path,
3831 reuse = False)
3832
3833 output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3834 multiprocess_cms,
3835 evaluator,
3836
3837 opt = dict(cached_information),
3838 options=run_options)
3839
3840 if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
3841
3842 clean_up(output_path)
3843
3844
3845
3846
3847 result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
3848
3849 result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
3850 for i, proc_res in enumerate(output_nwa):
3851 result['ordered_processes'].append(proc_res[0])
3852 result[proc_res[0]] = {
3853 'NWA':proc_res[1]['resonances_result'],
3854 'CMS':output_cms[i][1]['resonances_result'],
3855 'born_order':proc_res[1]['born_order'],
3856 'loop_order':proc_res[1]['loop_order']}
3857
3858
3859
3860 options['cached_widths'] = run_options['cached_widths']
3861
3862
3863 result['recompute_width'] = options['recompute_width']
3864 result['has_FRdecay'] = has_FRdecay
3865 result['widths_computed'] = []
3866 cached_widths = sorted(list(options['cached_widths'].items()), key=lambda el: \
3867 abs(el[0][0]))
3868 for (pdg, lambda_value), width in cached_widths:
3869 if lambda_value != 1.0:
3870 continue
3871 result['widths_computed'].append((model.get_particle(pdg).get_name(),
3872 width))
3873
3874
3875 clean_added_globals(ADDED_GLOBAL)
3876
3877 return result
3878
3883 """Check CMS for the process in argument. The options 'opt' is quite important.
3884 When opt is a list, it means that we are doing NWA and we are filling the
3885 list with the following tuple
3886 ('proc_name',({'ParticlePDG':ParticlePDG,
3887 'FinalStateMothersNumbers':set([]),
3888 'PS_point_used':[]},...))
3889 When opt is a dictionary, we are in the CMS mode and it will be reused then.
3890 """
3891
3892
3893
3894 NLO = process.get('perturbation_couplings') != []
3895
3896 def glue_momenta(production, decay):
3897 """ Merge together the kinematics for the production of particle
3898 positioned last in the 'production' array with the 1>N 'decay' kinematic'
3899 provided where the decay particle is first."""
3900
3901 from MadSpin.decay import momentum
3902
3903 full = production[:-1]
3904
3905
3906
3907
3908
3909 for p in decay[1:]:
3910 bp = momentum(*p).boost(momentum(*production[-1]))
3911 full.append([bp.E,bp.px,bp.py,bp.pz])
3912
3913 return full
3914
3915 def find_resonances(diagrams):
3916 """ Find all the resonances in the matrix element in argument """
3917
3918 model = process['model']
3919 resonances_found = []
3920
3921 for ll, diag in enumerate(diagrams):
3922 for amp in diag.get('amplitudes'):
3923
3924
3925 s_channels, t_channels = amp.\
3926 get_s_and_t_channels(process.get_ninitial(), model, 0)
3927
3928
3929 replacement_dict = {}
3930 for s_channel in s_channels:
3931 new_resonance = {
3932 'ParticlePDG':s_channel.get('legs')[-1].get('id'),
3933 'FSMothersNumbers':[],
3934 'PS_point_used':[]}
3935 for leg in s_channel.get('legs')[:-1]:
3936 if leg.get('number')>0:
3937 new_resonance['FSMothersNumbers'].append(
3938 leg.get('number'))
3939 else:
3940 try:
3941 new_resonance['FSMothersNumbers'].extend(
3942 replacement_dict[leg.get('number')])
3943 except KeyError:
3944 raise Exception('The following diagram '+\
3945 'is malformed:'+diag.nice_string())
3946
3947 replacement_dict[s_channel.get('legs')[-1].get('number')] = \
3948 new_resonance['FSMothersNumbers']
3949 new_resonance['FSMothersNumbers'] = set(
3950 new_resonance['FSMothersNumbers'])
3951 if new_resonance not in resonances_found:
3952 resonances_found.append(new_resonance)
3953
3954
3955 kept_resonances = []
3956 for resonance in resonances_found:
3957
3958 if resonance['ParticlePDG'] == 0:
3959 continue
3960
3961
3962 if abs(resonance['ParticlePDG']) in \
3963 [abs(l.get('id')) for l in process.get('legs')]:
3964 continue
3965
3966 mass_string = evaluator.full_model.get_particle(
3967 resonance['ParticlePDG']).get('mass')
3968 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
3969
3970 if mass==0.0:
3971 continue
3972
3973 width_string = evaluator.full_model.get_particle(
3974 resonance['ParticlePDG']).get('width')
3975 width = evaluator.full_model.get('parameter_dict')[width_string].real
3976
3977
3978 if width==0.0:
3979 continue
3980
3981 final_state_energy = sum(
3982 evaluator.full_model.get('parameter_dict')[
3983 evaluator.full_model.get_particle(l.get('id')).get('mass')].real
3984 for l in process.get('legs') if l.get('number') in
3985 resonance['FSMothersNumbers'])
3986
3987
3988 special_mass = (1.0 + options['offshellness'])*mass
3989
3990
3991 if special_mass<final_state_energy:
3992 raise InvalidCmd('The offshellness specified (%s) is such'\
3993 %options['offshellness']+' that the resulting kinematic is '+\
3994 'impossible for resonance %s %s.'%(evaluator.full_model.
3995 get_particle(resonance['ParticlePDG']).get_name(),
3996 str(list(resonance['FSMothersNumbers']))))
3997 continue
3998
3999
4000 kept_resonances.append(resonance)
4001
4002 for resonance in kept_resonances:
4003
4004 set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
4005
4006 return tuple(kept_resonances)
4007
4008 def set_PSpoint(resonance, force_other_res_offshell=[],
4009 allow_energy_increase=1.5, isolation_cuts=True):
4010 """ Starting from the specified resonance, construct a phase space point
4011 for it and possibly also enforce other resonances to be onshell. Possibly
4012 allow to progressively increase enregy by steps of the integer specified
4013 (negative float to forbid it) and possible enforce default isolation cuts
4014 as well."""
4015
4016 def invmass(momenta):
4017 """ Computes the invariant mass of a list of momenta."""
4018 ptot = [sum(p[i] for p in momenta) for i in range(4)]
4019 return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
4020
4021 model = evaluator.full_model
4022 def getmass(pdg):
4023 """ Returns the mass of a particle given the current model and its
4024 pdg given in argument."""
4025 return model.get('parameter_dict')[
4026 model.get_particle(pdg).get('mass')].real
4027
4028 N_trials = 0
4029 max_trial = 1e4
4030 nstep_for_energy_increase = 1e3
4031 PS_point_found = None
4032 if options['offshellness'] > 0.0:
4033 offshellness = options['offshellness']
4034 else:
4035
4036
4037
4038
4039 offshellness = (0.25*(options['offshellness']+1.0))-1.0
4040
4041
4042
4043
4044 if options['offshellness'] < 0.0:
4045 energy_increase = math.sqrt(allow_energy_increase)
4046 else:
4047 energy_increase = allow_energy_increase
4048
4049 other_res_offshell = [res for res in force_other_res_offshell if
4050 res!=resonance]
4051
4052
4053
4054 all_other_res_masses = [getmass(res['ParticlePDG'])
4055 for res in other_res_offshell]
4056 resonance_mass = getmass(resonance['ParticlePDG'])
4057
4058 str_res = '%s %s'%(model.get_particle(
4059 resonance['ParticlePDG']).get_name(),
4060 str(list(resonance['FSMothersNumbers'])))
4061 leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
4062
4063
4064
4065 daughter_masses = sum(getmass(leg_number_to_leg[\
4066 number].get('id')) for number in resonance['FSMothersNumbers'])
4067 min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
4068
4069
4070
4071 min_energy = max(sum(getmass(l.get('id')) for l in \
4072 process.get('legs') if l.get('state')==True),
4073 sum(getmass(l.get('id')) for l in \
4074 process.get('legs') if l.get('state')==False))
4075
4076
4077
4078 daughter_offshellnesses = [(1.0+options['offshellness'])*mass
4079 for i, mass in enumerate(all_other_res_masses) if
4080 other_res_offshell[i]['FSMothersNumbers'].issubset(
4081 resonance['FSMothersNumbers'])]
4082
4083 if options['offshellness'] >= 0.0:
4084
4085 if len(daughter_offshellnesses)>0:
4086 max_mass = max(daughter_offshellnesses)
4087
4088 offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
4089 options['offshellness'])
4090
4091 max_mass = max([(1.0+options['offshellness'])*mass for mass in \
4092 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4093
4094
4095
4096 target = max(min_energy*1.2,max_mass*2.0)
4097 if target > options['energy']:
4098 logger.warning("The user-defined energy %f seems "%options['energy']+
4099 " insufficient to reach the minimum propagator invariant mass "+
4100 "%f required for the chosen offshellness %f."%(max_mass,
4101 options['offshellness']) + " Energy reset to %f."%target)
4102 options['energy'] = target
4103
4104 else:
4105 if len(daughter_offshellnesses) > 0:
4106 min_mass = min(daughter_offshellnesses)
4107
4108 offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
4109 options['offshellness'])
4110
4111
4112
4113 if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
4114 msg = 'The resonance %s cannot accomodate'%str_res+\
4115 ' an offshellness of %f because the daughter'%options['offshellness']+\
4116 ' masses are %f.'%daughter_masses
4117 if options['offshellness']<min_offshellnes:
4118 msg += ' Try again with an offshellness'+\
4119 ' smaller (in absolute value) of at least %f.'%min_offshellnes
4120 else:
4121 msg += ' Try again with a smalled offshellness (in absolute value).'
4122 raise InvalidCmd(msg)
4123
4124 min_mass = min([(1.0+options['offshellness'])*mass for mass in \
4125 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4126
4127
4128 if 2.0*min_mass < options['energy']:
4129 new_energy = max(min_energy*1.2, 2.0*min_mass)
4130 logger.warning("The user-defined energy %f seems "%options['energy']+
4131 " too large to not overshoot the maximum propagator invariant mass "+
4132 "%f required for the chosen offshellness %f."%(min_mass,
4133 options['offshellness']) + " Energy reset to %f."%new_energy)
4134 options['energy'] = new_energy
4135
4136 if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
4137 logger.debug("The target energy is not compatible with the mass"+
4138 " of the external states for this process (%f). It is "%min_mass+
4139 "unlikely that a valid kinematic configuration will be found.")
4140
4141 if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
4142 options['offshellness']>0.0 and offshellness>options['offshellness']:
4143 logger.debug("Offshellness increased to %f"%offshellness+
4144 " so as to try to find a kinematical configuration with"+
4145 " offshellness at least equal to %f"%options['offshellness']+
4146 " for all resonances.")
4147
4148 start_energy = options['energy']
4149 while N_trials<max_trial:
4150 N_trials += 1
4151 if N_trials%nstep_for_energy_increase==0:
4152 if allow_energy_increase > 0.0:
4153 old_offshellness = offshellness
4154 if offshellness > 0.0:
4155 options['energy'] *= energy_increase
4156 offshellness *= energy_increase
4157 else:
4158 options['energy'] = max(options['energy']/energy_increase,
4159 min_energy*1.2)
4160 offshellness = max(min_offshellnes,
4161 ((offshellness+1.0)/energy_increase)-1.0)
4162 if old_offshellness!=offshellness:
4163 logger.debug('Trying to find a valid kinematic'+\
4164 " configuration for resonance '%s'"%str_res+\
4165 ' with increased offshellness %f'%offshellness)
4166
4167 candidate = get_PSpoint_for_resonance(resonance, offshellness)
4168 pass_offshell_test = True
4169 for i, res in enumerate(other_res_offshell):
4170
4171 if offshellness > 0.0:
4172 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
4173 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4174 pass_offshell_test = False
4175 break
4176 else:
4177 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
4178 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4179 pass_offshell_test = False
4180 break
4181 if not pass_offshell_test:
4182 continue
4183
4184 if isolation_cuts:
4185
4186 if not evaluator.pass_isolation_cuts(candidate,
4187 ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
4188 continue
4189 PS_point_found = candidate
4190 break
4191
4192
4193 options['energy'] = start_energy
4194
4195 if PS_point_found is None:
4196 err_msg = 'Could not find a valid PS point in %d'%max_trial+\
4197 ' trials. Try increasing the energy, modify the offshellness '+\
4198 'or relax some constraints.'
4199 if options['offshellness']<0.0:
4200 err_msg +='Try with a positive offshellness instead (or a '+\
4201 'negative one of smaller absolute value)'
4202 raise InvalidCmd(err_msg)
4203 else:
4204
4205
4206 resonance['offshellnesses'] = []
4207 all_other_res_masses = [resonance_mass] + all_other_res_masses
4208 other_res_offshell = [resonance] + other_res_offshell
4209 for i, res in enumerate(other_res_offshell):
4210 if i==0:
4211 res_str = 'self'
4212 else:
4213 res_str = '%s %s'%(model.get_particle(
4214 res['ParticlePDG']).get_name(),
4215 str(list(res['FSMothersNumbers'])))
4216 resonance['offshellnesses'].append((res_str,(
4217 (invmass([PS_point_found[j-1] for j in
4218 res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
4219
4220 resonance['PS_point_used'] = PS_point_found
4221
4222 def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
4223 """ Assigns a kinematic configuration to the resonance dictionary
4224 given in argument."""
4225
4226
4227 mass_string = evaluator.full_model.get_particle(
4228 resonance['ParticlePDG']).get('mass')
4229 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
4230
4231
4232 special_mass = (1.0 + offshellness)*mass
4233
4234
4235 prod_proc = base_objects.Process({'legs':base_objects.LegList(
4236 copy.copy(leg) for leg in process.get('legs') if
4237 leg.get('number') not in resonance['FSMothersNumbers'])})
4238
4239
4240
4241 prod_proc.get('legs').append(base_objects.Leg({
4242 'number':max(l.get('number') for l in process.get('legs'))+1,
4243 'state':True,
4244 'id':0}))
4245
4246 decay_proc = base_objects.Process({'legs':base_objects.LegList(
4247 copy.copy(leg) for leg in process.get('legs') if leg.get('number')
4248 in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
4249
4250
4251
4252
4253 decay_proc.get('legs').insert(0,base_objects.Leg({
4254 'number':-1,
4255 'state':False,
4256 'id':0}))
4257 prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
4258 special_mass=special_mass)[0]
4259 decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
4260 special_mass=special_mass)[0]
4261 momenta = glue_momenta(prod_kinematic,decay_kinematic)
4262
4263
4264
4265 ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
4266 for i in range(len(prod_proc.get('legs'))-1)]
4267
4268 ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
4269 momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
4270
4271
4272 return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
4273
4274
4275
4276 @misc.mute_logger()
4277 def get_width(PDG, lambdaCMS, param_card):
4278 """ Returns the width to use for particle with absolute PDG 'PDG' and
4279 for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
4280
4281
4282
4283 if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
4284 return 0.0
4285
4286 particle = evaluator.full_model.get_particle(PDG)
4287
4288
4289
4290 if particle.get('ghost') or particle.get('goldstone'):
4291 return 0.0
4292
4293
4294 if particle.get('width')=='ZERO':
4295 return 0.0
4296
4297 if (PDG,lambdaCMS) in options['cached_widths']:
4298 return options['cached_widths'][(PDG,lambdaCMS)]
4299
4300 if options['recompute_width'] == 'never':
4301 width = evaluator.full_model.\
4302 get('parameter_dict')[particle.get('width')].real
4303 else:
4304
4305 if aloha.complex_mass:
4306 raise MadGraph5Error("The width for particle with PDG %d and"%PDG+\
4307 " lambdaCMS=%f should have already been "%lambdaCMS+\
4308 "computed during the NWA run.")
4309
4310
4311 if options['recompute_width'] in ['always','first_time']:
4312 particle_name = particle.get_name()
4313 with misc.TMP_directory(dir=options['output_path']) as path:
4314 param_card.write(pjoin(path,'tmp.dat'))
4315
4316
4317
4318 command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
4319 ' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
4320 ' --precision_channel=0.001'
4321
4322 param_card.write(pjoin(options['output_path'],'tmp.dat'))
4323
4324
4325
4326 orig_model = options['cmd']._curr_model
4327 orig_helas_model = options['cmd']._curr_helas_model
4328 options['cmd'].do_compute_widths(command, evaluator.full_model)
4329
4330 options['cmd']._curr_model = orig_model
4331 options['cmd']._curr_helas_model = orig_helas_model
4332
4333
4334 evaluator.full_model.set_parameters_and_couplings(
4335 param_card=param_card)
4336 try:
4337 tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
4338 except:
4339 raise MadGraph5Error('Error occured during width '+\
4340 'computation with command:\n compute_widths %s'%command)
4341 width = tmp_param_card['decay'].get(PDG).value
4342
4343
4344
4345
4346
4347
4348
4349 if options['recompute_width'] in ['never','first_time']:
4350
4351 for lam in options['lambdaCMS']:
4352 options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
4353 else:
4354 options['cached_widths'][(PDG,lambdaCMS)] = width
4355
4356 return options['cached_widths'][(PDG,lambdaCMS)]
4357
4358 def get_order(diagrams, diagsName):
4359 """Compute the common summed of coupling orders used for this cms check
4360 in the diagrams specified. When inconsistency occurs, use orderName
4361 in the warning message if throwm."""
4362
4363 orders = set([])
4364 for diag in diagrams:
4365 diag_orders = diag.calculate_orders()
4366 orders.add(sum((diag_orders[order] if order in diag_orders else 0)
4367 for order in options['expansion_orders']))
4368 if len(orders)>1:
4369 logger.warning(msg%('%s '%diagsName,str(orders)))
4370 return min(list(orders))
4371 else:
4372 return list(orders)[0]
4373
4374 MLoptions = copy.copy(options['MLOptions'])
4375
4376 MLoptions['DoubleCheckHelicityFilter'] = False
4377
4378
4379 for tweak in options['tweak']['custom']:
4380 if tweak.startswith('seed'):
4381 try:
4382 new_seed = int(tweak[4:])
4383 except ValueError:
4384 raise MadGraph5Error("Seed '%s' is not of the right format 'seed<int>'."%tweak)
4385 random.seed(new_seed)
4386
4387 mode = 'CMS' if aloha.complex_mass else 'NWA'
4388 for i, leg in enumerate(process.get('legs')):
4389 leg.set('number', i+1)
4390
4391 logger.info("Running CMS check for process %s (now doing %s scheme)" % \
4392 ( process.nice_string().replace('Process:', 'process'), mode))
4393
4394 proc_dir = None
4395 resonances = None
4396 warning_msg = "All %sdiagrams do not share the same sum of orders "+\
4397 "%s; found %%s."%(','.join(options['expansion_orders']))+\
4398 " This potentially problematic for the CMS check."
4399 if NLO:
4400
4401
4402
4403 if options['name']=='auto':
4404 proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
4405 temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
4406 ('_' if process.get('perturbation_couplings') else '')+
4407 '_'.join(process.get('perturbation_couplings')),mode)
4408 else:
4409 proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
4410 temp_dir_prefix,options['name'], mode)
4411
4412 timing, matrix_element = generate_loop_matrix_element(process,
4413 options['reuse'], output_path=options['output_path'],
4414 cmd = options['cmd'], proc_name=proc_name,
4415 loop_filter=options['loop_filter'])
4416 if matrix_element is None:
4417
4418 return None
4419
4420 reusing = isinstance(matrix_element, base_objects.Process)
4421 proc_dir = pjoin(options['output_path'],proc_name)
4422
4423
4424 infos = evaluator.setup_process(matrix_element, proc_dir,
4425 reusing = reusing, param_card = options['param_card'],
4426 MLOptions=MLoptions)
4427
4428 evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
4429 mp = None, loop_filter = True,MLOptions=MLoptions)
4430
4431
4432 tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
4433 if os.path.isfile(tmp_card_backup):
4434
4435 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4436 " Now reverting 'param_card.dat' to its original value.")
4437 shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
4438 else:
4439
4440 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
4441
4442 tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
4443 'model_functions.f__TemporaryBackup__')
4444 if os.path.isfile(tmp_modelfunc_backup):
4445
4446 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4447 " Now reverting 'model_functions.f' to its original value.")
4448 shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
4449 'model_functions.f'))
4450 evaluator.apply_log_tweak(proc_dir, 'recompile')
4451 else:
4452
4453 shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
4454 tmp_modelfunc_backup)
4455
4456
4457 MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
4458 read_ps = True, npoints = 1, hel_config = options['helicity'],
4459 split_orders=options['split_orders'])
4460
4461
4462
4463 for dir in misc.glob('P*_*', pjoin(proc_dir,'SubProcesses')):
4464 if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
4465 continue
4466 try:
4467 os.remove(pjoin(dir,'check'))
4468 os.remove(pjoin(dir,'check_sa.o'))
4469 except OSError:
4470 pass
4471
4472 with open(os.devnull, 'w') as devnull:
4473 retcode = subprocess.call(['make','check'],
4474 cwd=dir, stdout=devnull, stderr=devnull)
4475 if retcode != 0:
4476 raise MadGraph5Error("Compilation error with "+\
4477 "'make check' in %s"%dir)
4478
4479
4480 pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
4481 if reusing:
4482
4483
4484 if not os.path.isfile(pkl_path):
4485 raise InvalidCmd('The folder %s could'%proc_dir+\
4486 " not be reused because the resonance specification file "+
4487 "'resonance_specs.pkl' is missing.")
4488 else:
4489 proc_name, born_order, loop_order, resonances = \
4490 save_load_object.load_from_file(pkl_path)
4491
4492
4493 for res in resonances:
4494 set_PSpoint(res, force_other_res_offshell=resonances)
4495
4496
4497 if isinstance(opt, list):
4498 opt.append((proc_name, resonances))
4499 else:
4500 resonances = opt
4501 else:
4502 helas_born_diagrams = matrix_element.get_born_diagrams()
4503 if len(helas_born_diagrams)==0:
4504 logger.warning('The CMS check for loop-induced process is '+\
4505 'not yet available (nor is it very interesting).')
4506 return None
4507 born_order = get_order(helas_born_diagrams,'Born')
4508 loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
4509
4510
4511 if isinstance(opt, list):
4512 opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
4513 resonances = opt[-1][1]
4514 else:
4515 resonances = opt
4516
4517
4518 save_load_object.save_to_file(pkl_path, (process.base_string(),
4519 born_order, loop_order,resonances))
4520
4521 else:
4522
4523 try:
4524 amplitude = diagram_generation.Amplitude(process)
4525 except InvalidCmd:
4526 logging.info("No diagrams for %s" % \
4527 process.nice_string().replace('Process', 'process'))
4528 return None
4529 if not amplitude.get('diagrams'):
4530
4531 logging.info("No diagrams for %s" % \
4532 process.nice_string().replace('Process', 'process'))
4533 return None
4534
4535 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4536 gen_color=True)
4537 diagrams = matrix_element.get('diagrams')
4538 born_order = get_order(diagrams,'Born')
4539
4540 loop_order = -1
4541
4542 if isinstance(opt, list):
4543 opt.append((process.base_string(),find_resonances(diagrams)))
4544 resonances = opt[-1][1]
4545 else:
4546 resonances= opt
4547
4548 if len(resonances)==0:
4549 logger.info("No resonance found for process %s."\
4550 %process.base_string())
4551 return None
4552
4553
4554 if not options['cached_param_card'][mode][0]:
4555 if NLO:
4556 param_card = check_param_card.ParamCard(
4557 pjoin(proc_dir,'Cards','param_card.dat'))
4558 else:
4559 param_card = check_param_card.ParamCard(
4560 StringIO.StringIO(evaluator.full_model.write_param_card()))
4561 options['cached_param_card'][mode][0] = param_card
4562 name2block, _ = param_card.analyze_param_card()
4563 options['cached_param_card'][mode][1] = name2block
4564
4565 else:
4566 param_card = options['cached_param_card'][mode][0]
4567 name2block = options['cached_param_card'][mode][1]
4568
4569
4570 if loop_order != -1 and (loop_order+born_order)%2 != 0:
4571 raise MadGraph5Error('The summed squared matrix element '+\
4572 " order '%d' is not even."%(loop_order+born_order))
4573 result = {'born_order':born_order,
4574 'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
4575 'resonances_result':[]}
4576
4577
4578 if NLO:
4579 try:
4580 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
4581 pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
4582 except:
4583 pass
4584
4585
4586 had_log_tweaks=False
4587 if NLO:
4588 for tweak in options['tweak']['custom']:
4589 if tweak.startswith('seed'):
4590 continue
4591 try:
4592 logstart, logend = tweak.split('->')
4593 except:
4594 raise Madgraph5Error("Tweak '%s' not reckognized."%tweak)
4595 if logstart in ['logp','logm', 'log'] and \
4596 logend in ['logp','logm', 'log']:
4597 if NLO:
4598 evaluator.apply_log_tweak(proc_dir, [logstart, logend])
4599 had_log_tweaks = True
4600 else:
4601 raise Madgraph5Error("Tweak '%s' not reckognized."%tweak)
4602 if had_log_tweaks:
4603 evaluator.apply_log_tweak(proc_dir, 'recompile')
4604
4605
4606 if options['resonances']=='all':
4607 resonances_to_run = resonances
4608 elif isinstance(options['resonances'],int):
4609 resonances_to_run = resonances[:options['resonances']]
4610 elif isinstance(options['resonances'],list):
4611 resonances_to_run = []
4612 for res in resonances:
4613 for res_selection in options['resonances']:
4614 if abs(res['ParticlePDG'])==res_selection[0] and \
4615 res['FSMothersNumbers']==set(res_selection[1]):
4616 resonances_to_run.append(res)
4617 break
4618 else:
4619 raise InvalidCmd("Resonance selection '%s' not reckognized"%\
4620 str(options['resonances']))
4621
4622
4623
4624 if NLO and options['show_plot']:
4625 widgets = ['ME evaluations:', pbar.Percentage(), ' ',
4626 pbar.Bar(),' ', pbar.ETA(), ' ']
4627 progress_bar = pbar.ProgressBar(widgets=widgets,
4628 maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
4629 progress_bar.update(0)
4630
4631 sys.stdout.flush()
4632 else:
4633 progress_bar = None
4634
4635 for resNumber, res in enumerate(resonances_to_run):
4636
4637
4638 result['resonances_result'].append({'resonance':res,'born':[]})
4639 if NLO:
4640 result['resonances_result'][-1]['finite'] = []
4641
4642 for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
4643
4644
4645 new_param_card = check_param_card.ParamCard(param_card)
4646
4647 for param, replacement in options['expansion_parameters'].items():
4648
4649
4650 orig_param = param.replace('__tmpprefix__','')
4651 if orig_param not in name2block:
4652
4653
4654
4655 continue
4656 for block, lhaid in name2block[orig_param]:
4657 orig_value = float(param_card[block].get(lhaid).value)
4658 new_value = eval(replacement,
4659 {param:orig_value,'lambdacms':lambdaCMS})
4660 new_param_card[block].get(lhaid).value=new_value
4661
4662
4663
4664
4665
4666
4667
4668
4669 evaluator.full_model.set_parameters_and_couplings(
4670 param_card=new_param_card)
4671
4672 for decay in new_param_card['decay'].keys():
4673 if mode=='CMS':
4674 new_width = get_width(abs(decay[0]), lambdaCMS,
4675 new_param_card)
4676 else:
4677 new_width = 0.0
4678 new_param_card['decay'].get(decay).value= new_width
4679
4680
4681 evaluator.full_model.set_parameters_and_couplings(
4682 param_card=new_param_card)
4683 if NLO:
4684 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4685
4686
4687 if lambdaCMS==1.0 and mode=='CMS' and \
4688 options['recompute_width'] in ['always','first_time']:
4689 new_param_card.write(pjoin(proc_dir,
4690 'Cards','param_card.dat_recomputed_widths'))
4691
4692
4693
4694 if mode=='NWA' and (options['recompute_width']=='always' or (
4695 options['recompute_width']=='first_time' and lambdaCMS==1.0)):
4696
4697 tmp_param_card = check_param_card.ParamCard(new_param_card)
4698
4699
4700 for decay in new_param_card['decay'].keys():
4701 particle_name = evaluator.full_model.get_particle(\
4702 abs(decay[0])).get_name()
4703 new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
4704 tmp_param_card['decay'].get(decay).value = new_width
4705 if not options['has_FRdecay'] and new_width != 0.0 and \
4706 (abs(decay[0]),lambdaCMS) not in options['cached_widths']:
4707 logger.info('Numerically computed width of particle'+\
4708 ' %s for lambda=%.4g : %-9.6gGeV'%
4709 (particle_name,lambdaCMS,new_width))
4710
4711
4712
4713 if lambdaCMS==1.0 and NLO:
4714 tmp_param_card.write(pjoin(proc_dir,
4715 'Cards','param_card.dat_recomputed_widths'))
4716
4717
4718 for param, replacement in options['tweak']['params'].items():
4719
4720
4721 orig_param = param.replace('__tmpprefix__','')
4722
4723 if orig_param.lower() == 'allwidths':
4724
4725 for decay in new_param_card['decay'].keys():
4726 orig_value = float(new_param_card['decay'].get(decay).value)
4727 new_value = eval(replacement,
4728 {param:orig_value,'lambdacms':lambdaCMS})
4729 new_param_card['decay'].get(decay).value = new_value
4730 continue
4731 if orig_param not in name2block:
4732
4733
4734 continue
4735 for block, lhaid in name2block[orig_param]:
4736 orig_value = float(new_param_card[block].get(lhaid).value)
4737 new_value = eval(replacement,
4738 {param:orig_value,'lambdacms':lambdaCMS})
4739 new_param_card[block].get(lhaid).value=new_value
4740
4741 if options['tweak']['params']:
4742
4743 evaluator.full_model.set_parameters_and_couplings(
4744 param_card=new_param_card)
4745 if NLO:
4746 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4747
4748
4749 if NLO:
4750 ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
4751 proc_dir, PSpoint=res['PS_point_used'], verbose=False,
4752 format='dict', skip_compilation=True)
4753
4754
4755
4756
4757 result['resonances_result'][-1]['born'].append(ME_res['born'])
4758 result['resonances_result'][-1]['finite'].append(
4759 ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
4760 else:
4761 ME_res = evaluator.evaluate_matrix_element(matrix_element,
4762 p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
4763 result['resonances_result'][-1]['born'].append(ME_res)
4764 if not progress_bar is None:
4765 progress_bar.update(resNumber*len(options['lambdaCMS'])+\
4766 (lambdaNumber+1))
4767
4768 sys.stdout.flush()
4769
4770
4771 log_reversed = False
4772 for tweak in options['tweak']['custom']:
4773 if tweak.startswith('log') and had_log_tweaks:
4774 if log_reversed:
4775 continue
4776 if NLO:
4777 evaluator.apply_log_tweak(proc_dir, 'default')
4778 evaluator.apply_log_tweak(proc_dir, 'recompile')
4779 log_reversed = True
4780
4781
4782 evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
4783 if NLO:
4784 try:
4785 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
4786 pjoin(proc_dir,'Cards','param_card.dat'))
4787 except:
4788 param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4789
4790
4791
4792 try:
4793 os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
4794 os.remove(pjoin(proc_dir,'Source','MODEL',
4795 'model_functions.f__TemporaryBackup__'))
4796 except:
4797 pass
4798
4799 return (process.nice_string().replace('Process:', '').strip(),result)
4800
4801 -def get_value(process, evaluator, p=None, options=None):
4802 """Return the value/momentum for a phase space point"""
4803
4804 for i, leg in enumerate(process.get('legs')):
4805 leg.set('number', i+1)
4806
4807 logger.info("Checking %s in %s gauge" % \
4808 ( process.nice_string().replace('Process:', 'process'),
4809 'unitary' if aloha.unitary_gauge else 'feynman'))
4810
4811 legs = process.get('legs')
4812
4813
4814 try:
4815 if process.get('perturbation_couplings')==[]:
4816 amplitude = diagram_generation.Amplitude(process)
4817 else:
4818 amplitude = loop_diagram_generation.LoopAmplitude(process)
4819 except InvalidCmd:
4820 logging.info("No diagrams for %s" % \
4821 process.nice_string().replace('Process', 'process'))
4822 return None
4823
4824 if not amplitude.get('diagrams'):
4825
4826 logging.info("No diagrams for %s" % \
4827 process.nice_string().replace('Process', 'process'))
4828 return None
4829
4830 if not p:
4831
4832 p, w_rambo = evaluator.get_momenta(process, options)
4833
4834
4835 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
4836 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4837 gen_color = True)
4838 else:
4839 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
4840 gen_color = True, optimized_output = evaluator.loop_optimized_output)
4841
4842 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
4843 output='jamp',options=options)
4844
4845 if mvalue and mvalue['m2']:
4846 return {'process':process.base_string(),'value':mvalue,'p':p}
4847
4849 """Present the results of a comparison in a nice list format for loop
4850 processes. It detail the results from each lorentz transformation performed.
4851 """
4852
4853 process = comparison_results[0]['process']
4854 results = comparison_results[0]['results']
4855
4856
4857 threshold_rotations = 1e-6
4858
4859
4860
4861 threshold_boosts = 1e-3
4862 res_str = "%s" % process.base_string()
4863
4864 transfo_col_size = 17
4865 col_size = 18
4866 transfo_name_header = 'Transformation name'
4867
4868 if len(transfo_name_header) + 1 > transfo_col_size:
4869 transfo_col_size = len(transfo_name_header) + 1
4870
4871 for transfo_name, value in results:
4872 if len(transfo_name) + 1 > transfo_col_size:
4873 transfo_col_size = len(transfo_name) + 1
4874
4875 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
4876 fixed_string_length("Value", col_size) + \
4877 fixed_string_length("Relative diff.", col_size) + "Result"
4878
4879 ref_value = results[0]
4880 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
4881 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
4882
4883
4884 all_pass = True
4885 for res in results[1:]:
4886 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
4887 threshold_rotations
4888 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
4889 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
4890 this_pass = rel_diff <= threshold
4891 if not this_pass:
4892 all_pass = False
4893 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
4894 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
4895 fixed_string_length("%1.10e" % rel_diff, col_size) + \
4896 ("Passed" if this_pass else "Failed")
4897 if all_pass:
4898 res_str += '\n' + 'Summary: passed'
4899 else:
4900 res_str += '\n' + 'Summary: failed'
4901
4902 return res_str
4903
4905 """Present the results of a comparison in a nice list format
4906 if output='fail' return the number of failed process -- for test--
4907 """
4908
4909
4910 if comparison_results[0]['process']['perturbation_couplings']!=[]:
4911 return output_lorentz_inv_loop(comparison_results, output)
4912
4913 proc_col_size = 17
4914
4915 threshold=1e-10
4916 process_header = "Process"
4917
4918 if len(process_header) + 1 > proc_col_size:
4919 proc_col_size = len(process_header) + 1
4920
4921 for proc, values in comparison_results:
4922 if len(proc) + 1 > proc_col_size:
4923 proc_col_size = len(proc) + 1
4924
4925 col_size = 18
4926
4927 pass_proc = 0
4928 fail_proc = 0
4929 no_check_proc = 0
4930
4931 failed_proc_list = []
4932 no_check_proc_list = []
4933
4934 res_str = fixed_string_length(process_header, proc_col_size) + \
4935 fixed_string_length("Min element", col_size) + \
4936 fixed_string_length("Max element", col_size) + \
4937 fixed_string_length("Relative diff.", col_size) + \
4938 "Result"
4939
4940 for one_comp in comparison_results:
4941 proc = one_comp['process'].base_string()
4942 data = one_comp['results']
4943
4944 if data == 'pass':
4945 no_check_proc += 1
4946 no_check_proc_list.append(proc)
4947 continue
4948
4949 values = [data[i]['m2'] for i in range(len(data))]
4950
4951 min_val = min(values)
4952 max_val = max(values)
4953 diff = (max_val - min_val) / abs(max_val)
4954
4955 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4956 fixed_string_length("%1.10e" % min_val, col_size) + \
4957 fixed_string_length("%1.10e" % max_val, col_size) + \
4958 fixed_string_length("%1.10e" % diff, col_size)
4959
4960 if diff < threshold:
4961 pass_proc += 1
4962 proc_succeed = True
4963 res_str += "Passed"
4964 else:
4965 fail_proc += 1
4966 proc_succeed = False
4967 failed_proc_list.append(proc)
4968 res_str += "Failed"
4969
4970
4971
4972
4973
4974 if len(data[0]['jamp'])!=0:
4975 for k in range(len(data[0]['jamp'][0])):
4976 sum = [0] * len(data)
4977
4978 for j in range(len(data[0]['jamp'])):
4979
4980 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4981 sum = [sum[i] + values[i] for i in range(len(values))]
4982
4983
4984 min_val = min(sum)
4985 max_val = max(sum)
4986 if not max_val:
4987 continue
4988 diff = (max_val - min_val) / max_val
4989
4990 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
4991 fixed_string_length("%1.10e" % min_val, col_size) + \
4992 fixed_string_length("%1.10e" % max_val, col_size) + \
4993 fixed_string_length("%1.10e" % diff, col_size)
4994
4995 if diff > 1e-10:
4996 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4997 fail_proc += 1
4998 pass_proc -= 1
4999 failed_proc_list.append(proc)
5000 res_str += tmp_str + "Failed"
5001 elif not proc_succeed:
5002 res_str += tmp_str + "Passed"
5003
5004
5005
5006 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5007 (pass_proc, pass_proc + fail_proc,
5008 fail_proc, pass_proc + fail_proc)
5009
5010 if fail_proc != 0:
5011 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5012 if no_check_proc:
5013 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5014
5015 if output == 'text':
5016 return res_str
5017 else:
5018 return fail_proc
5019
5021 """Present the results of a comparison in a nice list format
5022 if output='fail' return the number of failed process -- for test--
5023 """
5024
5025 proc_col_size = 17
5026
5027
5028
5029 pert_coupl = comparison_results[0]['perturbation_couplings']
5030 comparison_results = comparison_results[1:]
5031
5032 if pert_coupl:
5033 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
5034 else:
5035 process_header = "Process"
5036
5037 if len(process_header) + 1 > proc_col_size:
5038 proc_col_size = len(process_header) + 1
5039
5040 for data in comparison_results:
5041 proc = data['process']
5042 if len(proc) + 1 > proc_col_size:
5043 proc_col_size = len(proc) + 1
5044
5045 pass_proc = 0
5046 fail_proc = 0
5047 no_check_proc = 0
5048
5049 failed_proc_list = []
5050 no_check_proc_list = []
5051
5052 col_size = 18
5053
5054 res_str = fixed_string_length(process_header, proc_col_size) + \
5055 fixed_string_length("Unitary", col_size) + \
5056 fixed_string_length("Feynman", col_size) + \
5057 fixed_string_length("Relative diff.", col_size) + \
5058 "Result"
5059
5060 for one_comp in comparison_results:
5061 proc = one_comp['process']
5062 data = [one_comp['value_unit'], one_comp['value_feynm']]
5063
5064
5065 if data[0] == 'pass':
5066 no_check_proc += 1
5067 no_check_proc_list.append(proc)
5068 continue
5069
5070 values = [data[i]['m2'] for i in range(len(data))]
5071
5072 min_val = min(values)
5073 max_val = max(values)
5074
5075
5076 diff = (max_val - min_val) / abs(max_val)
5077
5078 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
5079 fixed_string_length("%1.10e" % values[0], col_size) + \
5080 fixed_string_length("%1.10e" % values[1], col_size) + \
5081 fixed_string_length("%1.10e" % diff, col_size)
5082
5083 if diff < 1e-8:
5084 pass_proc += 1
5085 proc_succeed = True
5086 res_str += "Passed"
5087 else:
5088 fail_proc += 1
5089 proc_succeed = False
5090 failed_proc_list.append(proc)
5091 res_str += "Failed"
5092
5093
5094
5095
5096
5097 if len(data[0]['jamp'])>0:
5098 for k in range(len(data[0]['jamp'][0])):
5099 sum = [0, 0]
5100
5101 for j in range(len(data[0]['jamp'])):
5102
5103 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
5104 sum = [sum[i] + values[i] for i in range(len(values))]
5105
5106
5107 min_val = min(sum)
5108 max_val = max(sum)
5109 if not max_val:
5110 continue
5111 diff = (max_val - min_val) / max_val
5112
5113 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
5114 fixed_string_length("%1.10e" % sum[0], col_size) + \
5115 fixed_string_length("%1.10e" % sum[1], col_size) + \
5116 fixed_string_length("%1.10e" % diff, col_size)
5117
5118 if diff > 1e-10:
5119 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
5120 fail_proc += 1
5121 pass_proc -= 1
5122 failed_proc_list.append(proc)
5123 res_str += tmp_str + "Failed"
5124 elif not proc_succeed:
5125 res_str += tmp_str + "Passed"
5126
5127
5128
5129 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5130 (pass_proc, pass_proc + fail_proc,
5131 fail_proc, pass_proc + fail_proc)
5132
5133 if fail_proc != 0:
5134 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5135 if no_check_proc:
5136 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5137
5138
5139 if output == 'text':
5140 return res_str
5141 else:
5142 return fail_proc
5143
5144 -def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
5145 """Creates a suitable filename for saving these results."""
5146
5147 if opts['name']=='auto' and opts['analyze']!='None':
5148
5149 return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
5150 [0],extension)
5151
5152 if opts['name']!='auto':
5153 basename = opts['name']
5154 else:
5155 prefix = 'cms_check_'
5156
5157 if len(cms_res['ordered_processes'])==1:
5158 proc = cms_res['ordered_processes'][0]
5159 replacements = [('=>','gt'),('<=','lt'),('/','_no_'),
5160 (' ',''),('+','p'),('-','m'),
5161 ('~','x'), ('>','_'),('=','eq'),('^2','squared')]
5162
5163 try:
5164 proc=proc[:proc.index('[')]
5165 except ValueError:
5166 pass
5167
5168 for key, value in replacements:
5169 proc = proc.replace(key,value)
5170
5171 basename =prefix+proc+'_%s_'%used_model.get('name')+\
5172 ( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
5173 cms_res['perturbation_orders']!=[] else '')
5174
5175 else:
5176 basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
5177
5178 suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
5179 if output_path:
5180 return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
5181 else:
5182 return '%s%s.%s'%(basename,suffix,extension)
5183
5185 """ Outputs nicely the outcome of the complex mass scheme check performed
5186 by varying the width in the offshell region of resonances found for eahc process.
5187 Output just specifies whether text should be returned or a list of failed
5188 processes. Use 'concise_text' for a consise report of the results."""
5189
5190 pert_orders=result['perturbation_orders']
5191
5192
5193
5194
5195
5196
5197
5198
5199
5200 diff_lambda_power = options['diff_lambda_power']
5201
5202
5203
5204
5205
5206
5207
5208 if 'has_FRdecay' in result:
5209 has_FRdecay = result['has_FRdecay']
5210 else:
5211 has_FRdecay = False
5212
5213 if not pert_orders:
5214 CMS_test_threshold = 1e-3
5215 else:
5216
5217
5218
5219
5220
5221
5222 if not has_FRdecay and ('recomputed_with' not in result or \
5223 result['recompute_width'] in ['always','first_time']):
5224 CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
5225 else:
5226
5227
5228 CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
5229
5230
5231
5232
5233 consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
5234
5235
5236 group_val = 3
5237
5238
5239
5240
5241 diff_zero_threshold = 1e-3
5242
5243
5244 lambda_range = options['lambda_plot_range']
5245
5246
5247
5248
5249
5250
5251
5252
5253
5254
5255 res_str = ''
5256
5257 concise_str = ''
5258 concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
5259 concise_repl_dict = {'Header':{'process':'Process',
5260 'asymptot':'Asymptot',
5261 'cms_check':'Deviation to asymptot',
5262 'status':'Result'}}
5263
5264
5265
5266
5267
5268 useLatexParticleName = 'built-in'
5269 name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
5270 'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
5271 'mu+':r'\mu^+',
5272 'mu-':r'\mu^-',
5273 'ta+':r'\tau^+',
5274 'ta-':r'\tau^-'}
5275 for p in ['e','m','t']:
5276 d = {'e':'e','m':r'\mu','t':r'\tau'}
5277 name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
5278 name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
5279
5280 for p in ['u','d','c','s','b','t']:
5281 name2tex[p]=p
5282 name2tex['%s~'%p]=r'\bar{%s}'%p
5283
5284 def format_particle_name(particle, latex=useLatexParticleName):
5285 p_name = particle
5286 if latex=='model':
5287 try:
5288 texname = model.get_particle(particle).get('texname')
5289 if texname and texname!='none':
5290 p_name = r'$\displaystyle %s$'%texname
5291 except:
5292 pass
5293 elif latex=='built-in':
5294 try:
5295 p_name = r'$\displaystyle %s$'%name2tex[particle]
5296 except:
5297 pass
5298 return p_name
5299
5300 def resonance_str(resonance, latex=useLatexParticleName):
5301 """ Provides a concise string to characterize the resonance """
5302 particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
5303 mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
5304 return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
5305 ','.join(mothersID))
5306
5307 def format_title(process, resonance):
5308 """ Format the plot title given the process and resonance """
5309
5310 process_string = []
5311 for particle in process.split():
5312 if '<=' in particle:
5313 particle = particle.replace('<=',r'$\displaystyle <=$')
5314 if '^2' in particle:
5315 particle = particle.replace('^2',r'$\displaystyle ^2$')
5316 if particle=='$$':
5317 process_string.append(r'\$\$')
5318 continue
5319 if particle=='>':
5320 process_string.append(r'$\displaystyle \rightarrow$')
5321 continue
5322 if particle=='/':
5323 process_string.append(r'$\displaystyle /$')
5324 continue
5325 process_string.append(format_particle_name(particle))
5326
5327 if resonance=='':
5328 return r'CMS check for %s' %(' '.join(process_string))
5329 else:
5330 return r'CMS check for %s ( resonance %s )'\
5331 %(' '.join(process_string),resonance)
5332
5333 def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
5334 proc=None, res=None):
5335 """ Guess the lambda scaling from a list of ME values and return it.
5336 Also compare with the expected result if specified and trigger a
5337 warning if not in agreement."""
5338
5339 bpowers = []
5340 for i, lambdaCMS in enumerate(lambda_values[1:]):
5341 bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
5342 lambda_values[0]/lambdaCMS)))
5343
5344
5345 bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
5346 key = lambda elem: elem[1], reverse=True)[0][0]
5347 if not expected:
5348 return bpower
5349 if bpower != expected:
5350 logger.warning('The apparent scaling of the squared amplitude'+
5351 'seems inconsistent w.r.t to detected value '+
5352 '(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
5353 ' This happend for process %s and resonance %s'%(proc, res))
5354 return bpower
5355
5356 def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
5357 """ Checks if the values passed in argument are stable and return the
5358 stability check outcome warning if it is not precise enough. """
5359
5360 values = sorted([
5361 abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
5362 i, val in enumerate(ME_values)])
5363 median = values[len(values)//2]
5364 max_diff = max(abs(values[0]-median),abs(values[-1]-median))
5365 stability = max_diff/median
5366 stab_threshold = 1e-2
5367 if stability >= stab_threshold:
5368 return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
5369 %(values_name, stability)
5370 else:
5371 return None
5372
5373 if options['analyze']=='None':
5374 if options['reuse']:
5375 save_path = CMS_save_path('pkl', result, model, options,
5376 output_path=output_path)
5377 buff = "\nThe results of this check have been stored on disk and its "+\
5378 "analysis can be rerun at anytime with the MG5aMC command:\n "+\
5379 " check cms --analyze=%s\n"%save_path
5380 res_str += buff
5381 concise_str += buff
5382 save_load_object.save_to_file(save_path, result)
5383 elif len(result['ordered_processes'])>0:
5384 buff = "\nUse the following synthax if you want to store "+\
5385 "the raw results on disk.\n"+\
5386 " check cms -reuse <proc_def> <options>\n"
5387 res_str += buff
5388 concise_str += buff
5389
5390
5391
5392
5393
5394 checks = []
5395 for process in result['ordered_processes']:
5396 checks.extend([(process,resID) for resID in \
5397 range(len(result[process]['CMS']))])
5398
5399 if options['reuse']:
5400 logFile = open(CMS_save_path(
5401 'log', result, model, options, output_path=output_path),'w')
5402
5403 lambdaCMS_list=result['lambdaCMS']
5404
5405
5406 failed_procs = []
5407
5408
5409 bar = lambda char: char*47
5410
5411
5412 if 'widths_computed' in result:
5413 res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
5414 if result['recompute_width'] == 'never':
5415 res_str += '| Widths extracted from the param_card.dat'
5416 else:
5417 res_str += '| Widths computed %s'%('analytically' if has_FRdecay
5418 else 'numerically')
5419 if result['recompute_width'] == 'first_time':
5420 res_str += ' for \lambda = 1'
5421 elif result['recompute_width'] == 'always':
5422 res_str += ' for all \lambda values'
5423 res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
5424 for particle_name, width in result['widths_computed']:
5425 res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
5426 res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
5427
5428
5429
5430
5431 nstab_points=group_val
5432
5433 differences_target = {}
5434 for process, resID in checks:
5435
5436
5437 concise_repl_dict[process] = {'process':process,
5438 'asymptot':'N/A',
5439 'cms_check':'N/A',
5440 'status':'N/A'}
5441 proc_res = result[process]
5442 cms_res = proc_res['CMS'][resID]
5443 nwa_res = proc_res['NWA'][resID]
5444 resonance = resonance_str(cms_res['resonance'], latex='none')
5445 cms_born=cms_res['born']
5446 nwa_born=nwa_res['born']
5447
5448 res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
5449
5450 proc_title = "%s (resonance %s)"%(process,resonance)
5451 centering = (bar(2)+8-len(proc_title))//2
5452 res_str += "%s%s\n"%(' '*centering,proc_title)
5453
5454 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5455
5456
5457 if diff_lambda_power!=1:
5458 res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
5459 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5460
5461 born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
5462 expected=proc_res['born_order'], proc=process, res=resonance)
5463 stab_cms_born = check_stability(cms_born[-nstab_points:],
5464 lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
5465 if stab_cms_born:
5466 res_str += stab_cms_born
5467 stab_nwa_born = check_stability(nwa_born[-nstab_points:],
5468 lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
5469 if stab_nwa_born:
5470 res_str += stab_nwa_born
5471
5472 res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
5473 for i, p in enumerate(cms_res['resonance']['PS_point_used']):
5474 res_str += " | p%-2.d = "%(i+1)
5475 for pi in p:
5476 res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
5477 res_str += "\n"
5478
5479 res_str += "== Offshellnesses of all detected resonances\n"
5480 for res_name, offshellness in cms_res['resonance']['offshellnesses']:
5481 res_str += " | %-15s = %f\n"%(res_name, offshellness)
5482 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5483
5484 if not pert_orders:
5485 res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
5486 else:
5487 cms_finite=cms_res['finite']
5488 nwa_finite=nwa_res['finite']
5489 loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
5490 expected=proc_res['loop_order'], proc=process, res=resonance)
5491 res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
5492 %(born_power,loop_power)
5493 stab_cms_finite = check_stability(cms_finite[-nstab_points:],
5494 lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
5495 if stab_cms_finite:
5496 res_str += stab_cms_finite
5497 stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
5498 lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
5499 if stab_nwa_finite:
5500 res_str += stab_nwa_finite
5501
5502 CMSData = []
5503 NWAData = []
5504 DiffData = []
5505 for idata, lam in enumerate(lambdaCMS_list):
5506 if not pert_orders:
5507 new_cms=cms_born[idata]/(lam**born_power)
5508 new_nwa=nwa_born[idata]/(lam**born_power)
5509 else:
5510 new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
5511 new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
5512 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5513 CMSData.append(new_cms)
5514 NWAData.append(new_nwa)
5515 DiffData.append(new_diff)
5516
5517
5518
5519
5520
5521
5522 trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
5523 low_diff_median = sorted(DiffData[trim_range:-trim_range])\
5524 [(len(DiffData)-2*trim_range)//2]
5525
5526
5527
5528
5529
5530
5531 current_median = 0
5532
5533 scan_index = 0
5534 reference = abs(sorted(NWAData)[len(NWAData)//2])
5535 if low_diff_median!= 0.0:
5536 if abs(reference/low_diff_median)<diff_zero_threshold:
5537 reference = abs(low_diff_median)
5538 while True:
5539 scanner = DiffData[scan_index:group_val+scan_index]
5540 current_median = sorted(scanner)[len(scanner)//2]
5541
5542
5543 if abs(current_median-low_diff_median)/reference<\
5544 consideration_threshold:
5545 break;
5546 scan_index += 1
5547 if (group_val+scan_index)>=len(DiffData):
5548
5549
5550 logger.warning('The median scanning failed during the CMS check '+
5551 'for process %s'%proc_title+\
5552 'This is means that the difference plot has not stable'+\
5553 'intermediate region and MG5_aMC will arbitrarily consider the'+\
5554 'left half of the values.')
5555 scan_index = -1
5556 break;
5557
5558 if scan_index == -1:
5559 cms_check_data_range = len(DiffData)//2
5560 else:
5561 cms_check_data_range = scan_index + group_val
5562
5563 res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
5564 %(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
5565 len(lambdaCMS_list)-scan_index)
5566
5567 CMScheck_values = DiffData[cms_check_data_range:]
5568
5569
5570
5571
5572 if scan_index >= 0:
5573
5574 scan_index = len(CMScheck_values)
5575 used_group_val = max(3,group_val)
5576 unstability_found = True
5577 while True:
5578 scanner = CMScheck_values[scan_index-used_group_val:scan_index]
5579 maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
5580 if maxdiff/reference<consideration_threshold:
5581 break;
5582 if (scan_index-used_group_val)==0:
5583
5584
5585 unstability_found = False
5586 break;
5587
5588 scan_index -= 1
5589
5590
5591 if unstability_found:
5592 unstab_check=CMScheck_values[scan_index:]
5593 relative_array = [val > CMScheck_values[scan_index-1] for
5594 val in unstab_check]
5595 upper = relative_array.count(True)
5596 lower = relative_array.count(False)
5597 if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
5598 logger.warning(
5599 """For process %s, a numerically unstable region was detected starting from lambda < %.1e.
5600 Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
5601 If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
5602 minimum value of lambda to be considered in the CMS check."""\
5603 %(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
5604
5605
5606
5607
5608 scan_index = 0
5609 max_diff = 0.0
5610 res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
5611 %('%.3g'%reference)
5612 res_str += "== Asymptotic difference value detected = %s\n"\
5613 %('%.3g'%low_diff_median)
5614 concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
5615
5616
5617 differences_target[(process,resID)]= low_diff_median
5618
5619 while True:
5620 current_vals = CMScheck_values[scan_index:scan_index+group_val]
5621 max_diff = max(max_diff, abs(low_diff_median-
5622 sorted(current_vals)[len(current_vals)//2])/reference)
5623 if (scan_index+group_val)>=len(CMScheck_values):
5624 break
5625 scan_index += 1
5626
5627
5628 cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
5629 CMS_test_threshold*100.0)
5630 res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
5631 concise_repl_dict[process]['cms_check'] = \
5632 "%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
5633
5634 if max_diff>CMS_test_threshold:
5635 failed_procs.append((process,resonance))
5636 res_str += "%s %s %s\n"%(bar('='),
5637 'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
5638 concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
5639 else 'Passed'
5640
5641 if output=='concise_text':
5642
5643 max_proc_size = max(
5644 [len(process) for process in result['ordered_processes']]+[10])
5645
5646 res_str = concise_str
5647 res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
5648 for process in result['ordered_processes']:
5649 res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
5650
5651 if len(checks):
5652 res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
5653 ('.\n' if not failed_procs else ', failed checks are for:\n')
5654 else:
5655 return "\nNo CMS check to perform, the process either has no diagram or does not "+\
5656 "not feature any massive s-channel resonance."
5657
5658 for process, resonance in failed_procs:
5659 res_str += "> %s, %s\n"%(process, resonance)
5660
5661 if output=='concise_text':
5662 res_str += '\nMore detailed information on this check available with the command:\n'
5663 res_str += ' MG5_aMC>display checks\n'
5664
5665
5666
5667
5668 if not options['show_plot']:
5669 if options['reuse']:
5670 logFile.write(res_str)
5671 logFile.close()
5672 if output.endswith('text'):
5673 return res_str
5674 else:
5675 return failed_procs
5676
5677 fig_output_file = CMS_save_path('pdf', result, model, options,
5678 output_path=output_path)
5679 base_fig_name = fig_output_file[:-4]
5680 suffix = 1
5681 while os.path.isfile(fig_output_file):
5682 fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
5683 suffix+=1
5684
5685 process_data_plot_dict={}
5686
5687
5688
5689 all_res = [(result, None)]
5690 for i, add_res in enumerate(options['analyze'].split(',')[1:]):
5691 specs =re.match(r'^(?P<filename>.*)\((?P<title>.*)\)$', add_res)
5692 if specs:
5693 filename = specs.group('filename')
5694 title = specs.group('title')
5695 else:
5696 filename = add_res
5697 title = '#%d'%(i+1)
5698
5699 new_result = save_load_object.load_from_file(filename)
5700 if new_result is None:
5701 raise InvalidCmd('The complex mass scheme check result'+
5702 " file below could not be read.\n %s"%filename)
5703 if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
5704 or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
5705 raise self.InvalidCmd('The complex mass scheme check result'+
5706 " file below does not seem compatible.\n %s"%filename)
5707 all_res.append((new_result,title))
5708
5709
5710 for process, resID in checks:
5711 data1=[]
5712 data2=[]
5713 info ={}
5714 for res in all_res:
5715 proc_res = res[0][process]
5716 cms_res = proc_res['CMS'][resID]
5717 nwa_res = proc_res['NWA'][resID]
5718 resonance = resonance_str(cms_res['resonance'])
5719 if options['resonances']!=1:
5720 info['title'] = format_title(process, resonance)
5721 else:
5722 info['title'] = format_title(process, '')
5723
5724 cms_born=cms_res['born']
5725 nwa_born=nwa_res['born']
5726 if len(cms_born) != len(lambdaCMS_list) or\
5727 len(nwa_born) != len(lambdaCMS_list):
5728 raise MadGraph5Error('Inconsistent list of results w.r.t. the'+\
5729 ' lambdaCMS values specified for process %s'%process)
5730 if pert_orders:
5731 cms_finite=cms_res['finite']
5732 nwa_finite=nwa_res['finite']
5733 if len(cms_finite) != len(lambdaCMS_list) or\
5734 len(nwa_finite) != len(lambdaCMS_list):
5735 raise MadGraph5Error('Inconsistent list of results w.r.t. the'+\
5736 ' lambdaCMS values specified for process %s'%process)
5737
5738 bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
5739 expected=proc_res['born_order'], proc=process, res=resonance)
5740
5741 CMSData = []
5742 NWAData = []
5743 DiffData = []
5744 for idata, lam in enumerate(lambdaCMS_list):
5745 if not pert_orders:
5746 new_cms = cms_born[idata]/lam**bpower
5747 new_nwa = nwa_born[idata]/lam**bpower
5748 else:
5749 new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
5750 new_nwa=nwa_finite[idata]
5751 new_cms /= lam*nwa_born[idata]
5752 new_nwa /= lam*nwa_born[idata]
5753 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5754 CMSData.append(new_cms)
5755 NWAData.append(new_nwa)
5756 DiffData.append(new_diff)
5757 if res[1] is None:
5758 if not pert_orders:
5759 data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
5760 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
5761 else:
5762 data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
5763 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
5764 data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
5765 %('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
5766 ,DiffData])
5767 data2.append([r'Detected asymptot',[differences_target[(process,resID)]
5768 for i in range(len(lambdaCMS_list))]])
5769 else:
5770 data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' ').replace('#','\#'), CMSData])
5771 data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' ').replace('#','\#'), NWAData])
5772 data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' ').replace('#','\#'), DiffData])
5773
5774 process_data_plot_dict[(process,resID)]=(data1,data2, info)
5775
5776
5777 try:
5778 import matplotlib.pyplot as plt
5779 from matplotlib.backends.backend_pdf import PdfPages
5780 logger.info('Rendering plots... (this can take some time because of the latex labels)')
5781
5782 res_str += \
5783 """\n-----------------------------------------------------------------------------------------------
5784 | In the plots, the Complex Mass Scheme check is successful if the normalized difference |
5785 | between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
5786 -----------------------------------------------------------------------------------------------\n"""
5787
5788
5789 if lambda_range[1]>0:
5790 min_lambda_index = -1
5791 for i, lam in enumerate(lambdaCMS_list):
5792 if lam<=lambda_range[1]:
5793 min_lambda_index = i
5794 break
5795 else:
5796 min_lambda_index = 0
5797 if lambda_range[0]>0:
5798 max_lambda_index = -1
5799 for i, lam in enumerate(lambdaCMS_list):
5800 if lam<=lambda_range[0]:
5801 max_lambda_index=i-1
5802 break
5803 else:
5804 max_lambda_index=len(lambdaCMS_list)-1
5805
5806 if max_lambda_index==-1 or min_lambda_index==-1 or \
5807 min_lambda_index==max_lambda_index:
5808 raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
5809 (lambda_range[0],lambda_range[1]))
5810
5811 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5812 lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
5813
5814 plt.rc('text', usetex=True)
5815 plt.rc('font', family='serif')
5816 pp=PdfPages(fig_output_file)
5817 if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
5818 colorlist=['b','r','g','k','c','m','y']
5819 else:
5820 import matplotlib.colors as colors
5821 import matplotlib.cm as mplcm
5822 import matplotlib.colors as colors
5823
5824
5825 cm = plt.get_cmap('gist_rainbow')
5826 cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
5827 scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
5828
5829 colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
5830
5831
5832
5833
5834
5835
5836 legend_size = 10
5837 for iproc, (process, resID) in enumerate(checks):
5838 data1,data2, info=process_data_plot_dict[(process,resID)]
5839
5840 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5841 for i in range(len(data1)):
5842 data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
5843 for i in range(len(data2)):
5844 data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
5845 plt.figure(iproc+1)
5846 plt.subplot(211)
5847 minvalue=1e+99
5848 maxvalue=-1e+99
5849 for i, d1 in enumerate(data1):
5850
5851 color=colorlist[i//2]
5852 data_plot=d1[1]
5853 minvalue=min(min(data_plot),minvalue)
5854 maxvalue=max(max(data_plot),maxvalue)
5855 plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
5856 linestyle=('-' if i%2==0 else '--'),
5857 label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
5858 ymin = minvalue-(maxvalue-minvalue)/5.
5859 ymax = maxvalue+(maxvalue-minvalue)/5.
5860
5861 plt.yscale('linear')
5862 plt.xscale('log')
5863 plt.title(info['title'],fontsize=12,y=1.08)
5864 plt.ylabel(r'$\displaystyle \mathcal{M}$')
5865
5866 if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
5867 for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
5868 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5869 else:
5870 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5871
5872 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
5873
5874 plt.subplot(212)
5875 minvalue=1e+99
5876 maxvalue=-1e+99
5877
5878 try:
5879 asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
5880 plt.plot(lambdaCMS_list, data2[asymptot_index][1],
5881 color='0.75', marker='', linestyle='-', label='')
5882 except ValueError:
5883 pass
5884
5885 color_ID = -1
5886 for d2 in data2:
5887
5888 if d2[0]=='Detected asymptot':
5889 continue
5890 color_ID += 1
5891 color=colorlist[color_ID]
5892 data_plot=d2[1]
5893 minvalue=min(min(data_plot),minvalue)
5894 maxvalue=max(max(data_plot),maxvalue)
5895 plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
5896 linestyle='-', label=d2[0])
5897 ymin = minvalue-(maxvalue-minvalue)/5.
5898 ymax = maxvalue+(maxvalue-minvalue)/5.
5899
5900 plt.yscale('linear')
5901 plt.xscale('log')
5902 plt.ylabel(r'$\displaystyle \Delta$')
5903 plt.xlabel(r'$\displaystyle \lambda$')
5904
5905
5906 sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
5907 left_stability = sum(abs(s[0]-s[-1]) for s in sd)
5908 sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
5909 right_stability = sum(abs(s[0]-s[-1]) for s in sd)
5910 left_stable = False if right_stability==0.0 else \
5911 (left_stability/right_stability)<0.1
5912
5913 if left_stable:
5914 if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
5915 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5916 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5917 else:
5918 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5919 else:
5920 if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
5921 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5922 plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
5923 else:
5924 plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
5925
5926 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
5927 minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
5928
5929 plt.savefig(pp,format='pdf')
5930
5931 pp.close()
5932
5933 if len(checks)>0:
5934 logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
5935
5936 if sys.platform.startswith('linux'):
5937 misc.call(["xdg-open", fig_output_file])
5938 elif sys.platform.startswith('darwin'):
5939 misc.call(["open", fig_output_file])
5940
5941 plt.close("all")
5942
5943 except Exception as e:
5944 if isinstance(e, ImportError):
5945 res_str += "\n= Install matplotlib to get a "+\
5946 "graphical display of the results of the cms check."
5947 else:
5948 general_error = "\n= Could not produce the cms check plot because of "+\
5949 "the following error: %s"%str(e)
5950 try:
5951 import six.moves.tkinter
5952 if isinstance(e, six.moves.tkinter.TclError):
5953 res_str += "\n= Plots are not generated because your system"+\
5954 " does not support graphical display."
5955 else:
5956 res_str += general_error
5957 except:
5958 res_str += general_error
5959
5960 if options['reuse']:
5961 logFile.write(res_str)
5962 logFile.close()
5963
5964 if output.endswith('text'):
5965 return res_str
5966 else:
5967 return failed_procs
5968