1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 from __future__ import absolute_import
23 import array
24 import copy
25 import fractions
26 import itertools
27 import logging
28 import math
29 import os
30 import sys
31 import re
32 import shutil
33 import random
34 import glob
35 import re
36 import subprocess
37 import time
38 import datetime
39 import errno
40 import pickle
41
42
43
44 import aloha
45 import aloha.aloha_writers as aloha_writers
46 import aloha.create_aloha as create_aloha
47
48 import madgraph.iolibs.export_python as export_python
49 import madgraph.iolibs.helas_call_writers as helas_call_writers
50 import models.import_ufo as import_ufo
51 import madgraph.iolibs.save_load_object as save_load_object
52 import madgraph.iolibs.file_writers as writers
53
54 import madgraph.core.base_objects as base_objects
55 import madgraph.core.color_algebra as color
56 import madgraph.core.color_amp as color_amp
57 import madgraph.core.helas_objects as helas_objects
58 import madgraph.core.diagram_generation as diagram_generation
59
60 import madgraph.various.rambo as rambo
61 import madgraph.various.misc as misc
62 import madgraph.various.progressbar as pbar
63 import madgraph.various.banner as bannermod
64 import madgraph.various.progressbar as pbar
65
66 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
67 import madgraph.loop.loop_helas_objects as loop_helas_objects
68 import madgraph.loop.loop_base_objects as loop_base_objects
69 import models.check_param_card as check_param_card
70
71 from madgraph.interface.madevent_interface import MadLoopInitializer
72 from madgraph.interface.common_run_interface import AskforEditCard
73 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
74
75 from madgraph.iolibs.files import cp
76
77 import models.model_reader as model_reader
78 import aloha.template_files.wavefunctions as wavefunctions
79 from aloha.template_files.wavefunctions import \
80 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
81 import six
82 StringIO = six
83 from six.moves import range
84 from six.moves import zip
85 import io
86 if six.PY3:
87 file = io.FileIO
88
89
90 ADDED_GLOBAL = []
91
92 temp_dir_prefix = "TMP_CHECK"
93
94 pjoin = os.path.join
97 for value in list(to_clean):
98 del globals()[value]
99 to_clean.remove(value)
100
105 """ Just an 'option container' to mimick the interface which is passed to the
106 tests. We put in only what is now used from interface by the test:
107 cmd.options['fortran_compiler']
108 cmd.options['complex_mass_scheme']
109 cmd._mgme_dir"""
110 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
111 fortran_compiler = 'gfortran' ):
112 self._mgme_dir = mgme_dir
113 self.options = {}
114 self.options['complex_mass_scheme']=complex_mass_scheme
115 self.options['fortran_compiler']=fortran_compiler
116
117
118
119
120
121 logger = logging.getLogger('madgraph.various.process_checks')
126 """boost the set momenta in the 'boost direction' by the 'beta'
127 factor"""
128
129 boost_p = []
130 gamma = 1/ math.sqrt(1 - beta**2)
131 for imp in p:
132 bosst_p = imp[boost_direction]
133 E, px, py, pz = imp
134 boost_imp = []
135
136 boost_imp.append(gamma * E - gamma * beta * bosst_p)
137
138 if boost_direction == 1:
139 boost_imp.append(-gamma * beta * E + gamma * px)
140 else:
141 boost_imp.append(px)
142
143 if boost_direction == 2:
144 boost_imp.append(-gamma * beta * E + gamma * py)
145 else:
146 boost_imp.append(py)
147
148 if boost_direction == 3:
149 boost_imp.append(-gamma * beta * E + gamma * pz)
150 else:
151 boost_imp.append(pz)
152
153 boost_p.append(boost_imp)
154
155 return boost_p
156
161 """Class taking care of matrix element evaluation, storing
162 relevant quantities for speedup."""
163
164 - def __init__(self, model , param_card = None,
165 auth_skipping = False, reuse = True, cmd = FakeInterface()):
166 """Initialize object with stored_quantities, helas_writer,
167 model, etc.
168 auth_skipping = True means that any identical matrix element will be
169 evaluated only once
170 reuse = True means that the matrix element corresponding to a
171 given process can be reused (turn off if you are using
172 different models for the same process)"""
173
174 self.cmd = cmd
175
176
177 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
178
179
180 self.full_model = model_reader.ModelReader(model)
181 try:
182 self.full_model.set_parameters_and_couplings(param_card)
183 except MadGraph5Error:
184 if isinstance(param_card, (str,file)):
185 raise
186 logger.warning('param_card present in the event file not compatible.'+
187 ' We will use the default one.')
188 self.full_model.set_parameters_and_couplings()
189
190 self.auth_skipping = auth_skipping
191 self.reuse = reuse
192 self.cmass_scheme = cmd.options['complex_mass_scheme']
193 self.store_aloha = []
194 self.stored_quantities = {}
195
196
197
198
199 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
200 gauge_check=False, auth_skipping=None, output='m2',
201 options=None):
202 """Calculate the matrix element and evaluate it for a phase space point
203 output is either m2, amp, jamp
204 """
205
206 if full_model:
207 self.full_model = full_model
208 process = matrix_element.get('processes')[0]
209 model = process.get('model')
210
211
212 if "matrix_elements" not in self.stored_quantities:
213 self.stored_quantities['matrix_elements'] = []
214 matrix_methods = {}
215
216 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
217 if matrix_element not in self.stored_quantities['matrix_elements']:
218 self.stored_quantities['matrix_elements'].append(matrix_element)
219
220 matrix = eval("Matrix_%s()" % process.shell_string(), globals())
221 me_value = matrix.smatrix(p, self.full_model)
222 if output == "m2":
223 return matrix.smatrix(p, self.full_model), matrix.amp2
224 else:
225 m2 = matrix.smatrix(p, self.full_model)
226 return {'m2': m2, output:getattr(matrix, output)}
227
228 if (auth_skipping or self.auth_skipping) and matrix_element in \
229 self.stored_quantities['matrix_elements']:
230
231 logger.info("Skipping %s, " % process.nice_string() + \
232 "identical matrix element already tested" \
233 )
234 return None
235
236
237 self.stored_quantities['matrix_elements'].append(matrix_element)
238
239
240
241 if "list_colorize" not in self.stored_quantities:
242 self.stored_quantities["list_colorize"] = []
243 if "list_color_basis" not in self.stored_quantities:
244 self.stored_quantities["list_color_basis"] = []
245 if "list_color_matrices" not in self.stored_quantities:
246 self.stored_quantities["list_color_matrices"] = []
247
248 col_basis = color_amp.ColorBasis()
249 new_amp = matrix_element.get_base_amplitude()
250 matrix_element.set('base_amplitude', new_amp)
251 colorize_obj = col_basis.create_color_dict_list(new_amp)
252
253 try:
254
255
256
257 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
258 except ValueError:
259
260
261 self.stored_quantities['list_colorize'].append(colorize_obj)
262 col_basis.build()
263 self.stored_quantities['list_color_basis'].append(col_basis)
264 col_matrix = color_amp.ColorMatrix(col_basis)
265 self.stored_quantities['list_color_matrices'].append(col_matrix)
266 col_index = -1
267
268
269 matrix_element.set('color_basis',
270 self.stored_quantities['list_color_basis'][col_index])
271 matrix_element.set('color_matrix',
272 self.stored_quantities['list_color_matrices'][col_index])
273
274
275 if "used_lorentz" not in self.stored_quantities:
276 self.stored_quantities["used_lorentz"] = []
277
278 me_used_lorentz = set(matrix_element.get_used_lorentz())
279 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
280 if lorentz not in self.store_aloha]
281
282 aloha_model = create_aloha.AbstractALOHAModel(model.get('modelpath'))
283 aloha_model.add_Lorentz_object(model.get('lorentz'))
284 aloha_model.compute_subset(me_used_lorentz)
285
286
287 aloha_routines = []
288 for routine in aloha_model.values():
289 aloha_routines.append(routine.write(output_dir = None,
290 mode='mg5',
291 language = 'Python'))
292 for routine in aloha_model.external_routines:
293 for path in aloha_model.locate_external(routine, 'Python'):
294 aloha_routines.append(open(path).read())
295
296
297 previous_globals = list(globals().keys())
298 for routine in aloha_routines:
299 exec(routine, globals())
300 for key in globals().keys():
301 if key not in previous_globals:
302 ADDED_GLOBAL.append(key)
303
304
305 self.store_aloha.extend(me_used_lorentz)
306
307 exporter = export_python.ProcessExporterPython(matrix_element,
308 self.helas_writer)
309 try:
310 matrix_methods = exporter.get_python_matrix_methods(\
311 gauge_check=gauge_check)
312
313 except helas_call_writers.HelasWriterError as error:
314 logger.info(error)
315 return None
316
317
318
319 if self.reuse:
320
321 exec(matrix_methods[process.shell_string()], globals())
322 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
323 else:
324
325 exec(matrix_methods[process.shell_string()], globals())
326
327 if not p:
328 p, w_rambo = self.get_momenta(process, options)
329
330 exec("data = Matrix_%s()" % process.shell_string(), globals())
331 if output == "m2":
332 return data.smatrix(p, self.full_model), data.amp2
333 else:
334 m2 = data.smatrix(p,self.full_model)
335 return {'m2': m2, output:getattr(data, output)}
336
337 @staticmethod
339 """ Check whether the specified kinematic point passes isolation cuts
340 """
341
342 def Pt(pmom):
343 """ Computes the pt of a 4-momentum"""
344 return math.sqrt(pmom[1]**2+pmom[2]**2)
345
346 def DeltaR(p1,p2):
347 """ Computes the DeltaR between two 4-momenta"""
348
349 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
350 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
351 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
352 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
353
354 phi1=math.atan2(p1[2],p1[1])
355 phi2=math.atan2(p2[2],p2[1])
356 dphi=abs(phi2-phi1)
357
358 dphi=abs(abs(dphi-math.pi)-math.pi)
359
360 return math.sqrt(dphi**2+(eta2-eta1)**2)
361
362 for i, pmom in enumerate(pmoms[2:]):
363
364 if Pt(pmom)<ptcut:
365 return False
366
367 for pmom2 in pmoms[3+i:]:
368 if DeltaR(pmom,pmom2)<drcut:
369 return False
370 return True
371
372
373
374
375 - def get_momenta(self, process, options=None, special_mass=None):
376 """Get a point in phase space for the external states in the given
377 process, with the CM energy given. The incoming particles are
378 assumed to be oriented along the z axis, with particle 1 along the
379 positive z axis.
380 For the CMS check, one must be able to chose the mass of the special
381 resonance particle with id = -1, and the special_mass option allows
382 to specify it."""
383
384 if not options:
385 energy=1000
386 events=None
387 else:
388 energy = options['energy']
389 events = options['events']
390 to_skip = options['skip_evt']
391
392 if not (isinstance(process, base_objects.Process) and \
393 isinstance(energy, (float,int))):
394 raise rambo.RAMBOError("Not correct type for arguments to get_momenta")
395
396
397 sorted_legs = sorted(process.get('legs'), key=lambda l: l.get('number'))
398
399
400 if events:
401 ids = [l.get('id') for l in sorted_legs]
402 import MadSpin.decay as madspin
403 if not hasattr(self, 'event_file') or self.event_file.inputfile.closed:
404 print( "reset")
405 fsock = open(events)
406 self.event_file = madspin.Event(fsock)
407
408 skip = 0
409 while self.event_file.get_next_event() != 'no_event':
410 event = self.event_file.particle
411
412 event_ids = [p['pid'] for p in event.values()]
413 if event_ids == ids:
414 skip += 1
415 if skip > to_skip:
416 break
417 else:
418 raise MadGraph5Error('No compatible events for %s' % ids)
419 p = []
420 for part in event.values():
421 m = part['momentum']
422 p.append([m.E, m.px, m.py, m.pz])
423 fsock.close()
424 return p, 1
425
426 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
427 nfinal = len(sorted_legs) - nincoming
428
429
430 mass = []
431 for l in sorted_legs:
432 if l.get('id') != 0:
433 mass_string = self.full_model.get_particle(l.get('id')).get('mass')
434 mass.append(self.full_model.get('parameter_dict')[mass_string].real)
435 else:
436 if isinstance(special_mass, float):
437 mass.append(special_mass)
438 else:
439 raise Exception("A 'special_mass' option must be specified"+\
440 " in get_momenta when a leg with id=-10 is present (for CMS check)")
441
442
443
444
445
446
447
448
449
450 energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
451
452
453
454
455
456
457
458 if nfinal == 1:
459 p = []
460 energy = mass[-1]
461 p.append([energy/2,0,0,energy/2])
462 p.append([energy/2,0,0,-energy/2])
463 p.append([mass[-1],0,0,0])
464 return p, 1.0
465
466 e2 = energy**2
467 m1 = mass[0]
468 p = []
469
470 masses = rambo.FortranList(nfinal)
471 for i in range(nfinal):
472 masses[i+1] = mass[nincoming + i]
473
474 if nincoming == 1:
475
476 p.append([abs(m1), 0., 0., 0.])
477 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
478
479 for i in range(1, nfinal+1):
480 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
481 p_rambo[(2,i)], p_rambo[(3,i)]]
482 p.append(momi)
483
484 return p, w_rambo
485
486 if nincoming != 2:
487 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
488
489 if nfinal == 1:
490 energy = masses[1]
491 if masses[1] == 0.0:
492 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
493 ' state particle massless is invalid')
494
495 e2 = energy**2
496 m2 = mass[1]
497
498 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
499 2*m1**2*m2**2 + m2**4) / (4*e2))
500 e1 = math.sqrt(mom**2+m1**2)
501 e2 = math.sqrt(mom**2+m2**2)
502
503 p.append([e1, 0., 0., mom])
504 p.append([e2, 0., 0., -mom])
505
506 if nfinal == 1:
507 p.append([energy, 0., 0., 0.])
508 return p, 1.
509
510 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
511
512
513 for i in range(1, nfinal+1):
514 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
515 p_rambo[(2,i)], p_rambo[(3,i)]]
516 p.append(momi)
517
518 return p, w_rambo
519
525 """Class taking care of matrix element evaluation for loop processes."""
526
527 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
528 cmd=FakeInterface(),*args,**kwargs):
529 """Allow for initializing the MG5 root where the temporary fortran
530 output for checks is placed."""
531
532 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
533
534 self.mg_root=self.cmd._mgme_dir
535
536 if output_path is None:
537 self.output_path = self.cmd._mgme_dir
538 else:
539 self.output_path = output_path
540
541 self.cuttools_dir=cuttools_dir
542 self.tir_dir=tir_dir
543 self.loop_optimized_output = cmd.options['loop_optimized_output']
544
545
546 self.proliferate=True
547
548
549
550
551 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
552 gauge_check=False, auth_skipping=None, output='m2',
553 PS_name = None, MLOptions={}):
554 """Calculate the matrix element and evaluate it for a phase space point
555 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
556 empty lists at this point.
557 If PS_name is not none the written out PS.input will be saved in
558 the file PS.input_<PS_name> as well."""
559
560 process = matrix_element.get('processes')[0]
561 model = process.get('model')
562
563 if options and 'split_orders' in list(options.keys()):
564 split_orders = options['split_orders']
565 else:
566 split_orders = -1
567
568 if "loop_matrix_elements" not in self.stored_quantities:
569 self.stored_quantities['loop_matrix_elements'] = []
570
571 if (auth_skipping or self.auth_skipping) and matrix_element in \
572 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
573
574 logger.info("Skipping %s, " % process.nice_string() + \
575 "identical matrix element already tested" )
576 return None
577
578
579 if not p:
580 p, w_rambo = self.get_momenta(process, options=options)
581
582 if matrix_element in [el[0] for el in \
583 self.stored_quantities['loop_matrix_elements']]:
584 export_dir=self.stored_quantities['loop_matrix_elements'][\
585 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
586 ].index(matrix_element)][1]
587 logger.debug("Reusing generated output %s"%str(export_dir))
588 else:
589 export_dir=pjoin(self.output_path,temp_dir_prefix)
590 if os.path.isdir(export_dir):
591 if not self.proliferate:
592 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
593 else:
594 id=1
595 while os.path.isdir(pjoin(self.output_path,\
596 '%s_%i'%(temp_dir_prefix,id))):
597 id+=1
598 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
599
600 if self.proliferate:
601 self.stored_quantities['loop_matrix_elements'].append(\
602 (matrix_element,export_dir))
603
604
605
606 import madgraph.loop.loop_exporters as loop_exporters
607 if self.loop_optimized_output:
608 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
609 else:
610 exporter_class=loop_exporters.LoopProcessExporterFortranSA
611
612 MLoptions = {'clean': True,
613 'complex_mass': self.cmass_scheme,
614 'export_format':'madloop',
615 'mp':True,
616 'SubProc_prefix':'P',
617 'compute_color_flows': not process.get('has_born'),
618 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
619 'cuttools_dir': self.cuttools_dir,
620 'fortran_compiler': self.cmd.options['fortran_compiler'],
621 'output_dependencies': self.cmd.options['output_dependencies']}
622
623 MLoptions.update(self.tir_dir)
624
625 FortranExporter = exporter_class(export_dir, MLoptions)
626 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
627 FortranExporter.copy_template(model)
628 FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
629 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
630 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
631 for c in l]))
632 FortranExporter.convert_model(model,wanted_lorentz,wanted_couplings)
633 FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
634
635 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
636 split_orders=split_orders)
637
638 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
639 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
640
641 if gauge_check:
642 file_path, orig_file_content, new_file_content = \
643 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
644 ['helas_calls_ampb_1.f','loop_matrix.f'])
645 file = open(file_path,'w')
646 file.write(new_file_content)
647 file.close()
648 if self.loop_optimized_output:
649 mp_file_path, mp_orig_file_content, mp_new_file_content = \
650 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
651 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
652 mp_file = open(mp_file_path,'w')
653 mp_file.write(mp_new_file_content)
654 mp_file.close()
655
656
657 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
658 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
659
660
661 if gauge_check:
662 file = open(file_path,'w')
663 file.write(orig_file_content)
664 file.close()
665 if self.loop_optimized_output:
666 mp_file = open(mp_file_path,'w')
667 mp_file.write(mp_orig_file_content)
668 mp_file.close()
669
670
671 if not self.proliferate:
672 shutil.rmtree(export_dir)
673
674 if output == "m2":
675
676
677 return finite_m2, []
678 else:
679 return {'m2': finite_m2, output:[]}
680
681 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
682 DoubleCheckHelicityFilter=False, MLOptions={}):
683 """ Set parameters in MadLoopParams.dat suited for these checks.MP
684 stands for multiple precision and can either be a bool or an integer
685 to specify the mode."""
686
687
688 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
689 MLCard = bannermod.MadLoopParam(file)
690
691 if isinstance(mp,bool):
692 mode = 4 if mp else 1
693 else:
694 mode = mp
695
696 for key, value in MLOptions.items():
697 if key == "MLReductionLib":
698 if isinstance(value, int):
699 ml_reds = str(value)
700 if isinstance(value,list):
701 if len(value)==0:
702 ml_reds = '1'
703 else:
704 ml_reds="|".join([str(vl) for vl in value])
705 elif isinstance(value, str):
706 ml_reds = value
707 elif isinstance(value, int):
708 ml_reds = str(value)
709 else:
710 raise MadGraph5Error('The argument %s '%str(value)+\
711 ' in fix_MadLoopParamCard must be a string, integer'+\
712 ' or a list.')
713 MLCard.set("MLReductionLib",ml_reds)
714 elif key == 'ImprovePS':
715 MLCard.set('ImprovePSPoint',2 if value else -1)
716 elif key == 'ForceMP':
717 mode = 4
718 elif key in MLCard:
719 MLCard.set(key,value)
720 else:
721 raise Exception('The MadLoop options %s specified in function'%key+\
722 ' fix_MadLoopParamCard does not correspond to an option defined'+\
723 ' MadLoop nor is it specially handled in this function.')
724 if not mode is None:
725 MLCard.set('CTModeRun',mode)
726 MLCard.set('CTModeInit',mode)
727 MLCard.set('UseLoopFilter',loop_filter)
728 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
729
730 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
731
732 @classmethod
733 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
734 verbose=True, format='tuple', skip_compilation=False):
735 """Compile and run ./check, then parse the output and return the result
736 for process with id = proc_id and PSpoint if specified.
737 If PS_name is not none the written out PS.input will be saved in
738 the file PS.input_<PS_name> as well"""
739 if verbose:
740 sys.stdout.write('.')
741 sys.stdout.flush()
742
743 shell_name = None
744 directories = misc.glob('P%i_*' % proc_id, pjoin(working_dir, 'SubProcesses'))
745 if directories and os.path.isdir(directories[0]):
746 shell_name = os.path.basename(directories[0])
747
748
749 if not shell_name:
750 logging.info("Directory hasn't been created for process %s: %s", proc, directories)
751 return ((0.0, 0.0, 0.0, 0.0, 0), [])
752
753 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
754
755 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
756 if not skip_compilation:
757
758 if os.path.isfile(pjoin(dir_name,'check')):
759 os.remove(pjoin(dir_name,'check'))
760 try:
761 os.remove(pjoin(dir_name,'check_sa.o'))
762 os.remove(pjoin(dir_name,'loop_matrix.o'))
763 except OSError:
764 pass
765
766 devnull = open(os.devnull, 'w')
767 retcode = subprocess.call(['make','check'],
768 cwd=dir_name, stdout=devnull, stderr=devnull)
769 devnull.close()
770
771 if retcode != 0:
772 logging.info("Error while executing make in %s" % shell_name)
773 return ((0.0, 0.0, 0.0, 0.0, 0), [])
774
775
776 if PSpoint:
777 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
778
779
780 if not PS_name is None:
781 misc.write_PS_input(pjoin(dir_name, \
782 'PS.input_%s'%PS_name),PSpoint)
783
784 try:
785 output = subprocess.Popen('./check',
786 cwd=dir_name,
787 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
788 output.read()
789 output.close()
790 if os.path.exists(pjoin(dir_name,'result.dat')):
791 return cls.parse_check_output(open(pjoin(dir_name,\
792 'result.dat')),format=format)
793 else:
794 logging.warning("Error while looking for file %s"%str(os.path\
795 .join(dir_name,'result.dat')))
796 return ((0.0, 0.0, 0.0, 0.0, 0), [])
797 except IOError:
798 logging.warning("Error while executing ./check in %s" % shell_name)
799 return ((0.0, 0.0, 0.0, 0.0, 0), [])
800
801 @classmethod
803 """Parse the output string and return a pair where first four values are
804 the finite, born, single and double pole of the ME and the fourth is the
805 GeV exponent and the second value is a list of 4 momenta for all particles
806 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
807
808 res_dict = {'res_p':[],
809 'born':0.0,
810 'finite':0.0,
811 '1eps':0.0,
812 '2eps':0.0,
813 'gev_pow':0,
814 'export_format':'Default',
815 'accuracy':0.0,
816 'return_code':0,
817 'Split_Orders_Names':[],
818 'Loop_SO_Results':[],
819 'Born_SO_Results':[],
820 'Born_kept':[],
821 'Loop_kept':[]
822 }
823 res_p = []
824
825
826
827 if isinstance(output,(file,io.TextIOWrapper)) or isinstance(output,list):
828 text=output
829 elif isinstance(output,(str)) or (six.PY2 and isinstance(output, six.text_type)):
830 text=output.split('\n')
831 elif isinstance(output, bytes):
832 text=output.decode().split('\n')
833 else:
834 raise MadGraph5Error('Type for argument output not supported in'+\
835 ' parse_check_output: %s' % type(output))
836 for line in text:
837 splitline=line.split()
838 if len(splitline)==0:
839 continue
840 elif splitline[0]=='PS':
841 res_p.append([float(s) for s in splitline[1:]])
842 elif splitline[0]=='ASO2PI':
843 res_dict['alphaS_over_2pi']=float(splitline[1])
844 elif splitline[0]=='BORN':
845 res_dict['born']=float(splitline[1])
846 elif splitline[0]=='FIN':
847 res_dict['finite']=float(splitline[1])
848 elif splitline[0]=='1EPS':
849 res_dict['1eps']=float(splitline[1])
850 elif splitline[0]=='2EPS':
851 res_dict['2eps']=float(splitline[1])
852 elif splitline[0]=='EXP':
853 res_dict['gev_pow']=int(splitline[1])
854 elif splitline[0]=='Export_Format':
855 res_dict['export_format']=splitline[1]
856 elif splitline[0]=='ACC':
857 res_dict['accuracy']=float(splitline[1])
858 elif splitline[0]=='RETCODE':
859 res_dict['return_code']=int(splitline[1])
860 elif splitline[0]=='Split_Orders_Names':
861 res_dict['Split_Orders_Names']=splitline[1:]
862 elif splitline[0] in ['Born_kept', 'Loop_kept']:
863 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
864 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
865
866
867
868
869 res_dict[splitline[0]].append(\
870 ([int(el) for el in splitline[1:]],{}))
871 elif splitline[0]=='SO_Loop':
872 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
873 float(splitline[2])
874 elif splitline[0]=='SO_Born':
875 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
876 float(splitline[2])
877
878 res_dict['res_p'] = res_p
879
880 if format=='tuple':
881 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
882 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
883 else:
884 return res_dict
885
886 @staticmethod
888 """ Changes the file model_functions.f in the SOURCE of the process output
889 so as to change how logarithms are analytically continued and see how
890 it impacts the CMS check."""
891 valid_modes = ['default','recompile']
892 if not (mode in valid_modes or (isinstance(mode, list) and
893 len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
894 raise MadGraph5Error("Mode '%s' not reckonized"%mode+
895 " in function apply_log_tweak.")
896
897 model_path = pjoin(proc_path,'Source','MODEL')
898 directories = misc.glob('P0_*', pjoin(proc_path,'SubProcesses'))
899 if directories and os.path.isdir(directories[0]):
900 exe_path = directories[0]
901 else:
902 raise MadGraph5Error('Could not find a process executable '+\
903 'directory in %s'%proc_dir)
904 bu_path = pjoin(model_path, 'model_functions.f__backUp__')
905
906 if mode=='default':
907
908 if not os.path.isfile(bu_path):
909 raise MadGraph5Error('Back up file %s could not be found.'%bu_path)
910 shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
911 return
912
913 if mode=='recompile':
914 try:
915 os.remove(pjoin(model_path,'model_functions.o'))
916 os.remove(pjoin(proc_path,'lib','libmodel.a'))
917 except:
918 pass
919 misc.compile(cwd=model_path)
920
921 try:
922 os.remove(pjoin(exe_path,'check'))
923 except:
924 pass
925 misc.compile(arg=['check'], cwd=exe_path)
926 return
927
928 if mode[0]==mode[1]:
929 return
930
931
932 mp_prefix = 'MP_'
933 target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
934
935
936 if not os.path.isfile(bu_path):
937 shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
938 model_functions = open(pjoin(model_path,'model_functions.f'),'r')
939
940 new_model_functions = []
941 has_replaced = False
942 just_replaced = False
943 find_one_replacement= False
944 mp_mode = None
945 suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
946 replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
947 for line in model_functions:
948
949 if just_replaced:
950 if not re.match(r'\s{6}', line):
951 continue
952 else:
953 just_replaced = False
954 if mp_mode is None:
955
956 new_model_functions.append(line)
957 if (target_line%mp_prefix).lower() in line.lower():
958 mp_mode = mp_prefix
959 elif (target_line%'').lower() in line.lower():
960 mp_mode = ''
961 else:
962
963 if not has_replaced and re.match(replace_regex%mp_mode,line,
964 re.IGNORECASE):
965
966 if mode[0]=='log':
967 if mp_mode=='':
968 new_line =\
969 """ if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
970 reg%s=log(arg) %s TWOPII
971 else
972 reg%s=log(arg)
973 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
974 else:
975 new_line =\
976 """ if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
977 mp_reg%s=log(arg) %s TWOPII
978 else
979 mp_reg%s=log(arg)
980 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
981 else:
982 new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
983 ('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
984 new_model_functions.append(new_line)
985 just_replaced = True
986 has_replaced = True
987 find_one_replacement = True
988 else:
989 new_model_functions.append(line)
990 if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
991 mp_mode = None
992 has_replaced = False
993
994 if not find_one_replacement:
995 logger.warning('No replacement was found/performed for token '+
996 "'%s->%s'."%(mode[0],mode[1]))
997 else:
998 open(pjoin(model_path,'model_functions.f'),'w').\
999 write(''.join(new_model_functions))
1000 return
1001
1003 """ Modify loop_matrix.f so to have one external massless gauge boson
1004 polarization vector turned into its momentum. It is not a pretty and
1005 flexible solution but it works for this particular case."""
1006
1007 shell_name = None
1008 directories = misc.glob('P0_*', working_dir)
1009 if directories and os.path.isdir(directories[0]):
1010 shell_name = os.path.basename(directories[0])
1011
1012 dir_name = pjoin(working_dir, shell_name)
1013
1014
1015 ind=0
1016 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
1017 file_names[ind])):
1018 ind += 1
1019 if ind==len(file_names):
1020 raise Exception("No helas calls output file found.")
1021
1022 helas_file_name=pjoin(dir_name,file_names[ind])
1023 file = open(pjoin(dir_name,helas_file_name), 'r')
1024
1025 helas_calls_out=""
1026 original_file=""
1027 gaugeVectorRegExp=re.compile(\
1028 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
1029 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
1030 foundGauge=False
1031
1032 for line in file:
1033 helas_calls_out+=line
1034 original_file+=line
1035 if line.find("INCLUDE 'coupl.inc'") != -1 or \
1036 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
1037 helas_calls_out+=" INTEGER WARDINT\n"
1038 if not foundGauge:
1039 res=gaugeVectorRegExp.search(line)
1040 if res!=None:
1041 foundGauge=True
1042 helas_calls_out+=" DO WARDINT=1,4\n"
1043 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
1044 if not mp:
1045 helas_calls_out+=\
1046 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
1047 else:
1048 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1049 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1050 helas_calls_out+=" ENDDO\n"
1051 file.close()
1052
1053 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1054
1059 """Class taking care of matrix element evaluation and running timing for
1060 loop processes."""
1061
1065
1066 @classmethod
1068 """ Return a dictionary of the parameter of the MadLoopParamCard.
1069 The key is the name of the parameter and the value is the corresponding
1070 string read from the card."""
1071
1072 return bannermod.MadLoopParam(MLCardPath)
1073
1074
1075 @classmethod
1077 """ Set the parameters in MadLoopParamCard to the values specified in
1078 the dictionary params.
1079 The key is the name of the parameter and the value is the corresponding
1080 string to write in the card."""
1081
1082 MLcard = bannermod.MadLoopParam(MLCardPath)
1083 for key,value in params.items():
1084 MLcard.set(key, value, changeifuserset=False)
1085 MLcard.write(MLCardPath, commentdefault=True)
1086
1088 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1089 Notice this only affects the double precision evaluation which is
1090 normally fine as we do not make the timing check on mp."""
1091
1092 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1093 loop_matrix = file.read()
1094 file.close()
1095
1096 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1097 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1098 if skip else '.FALSE.'), loop_matrix)
1099 file.write(loop_matrix)
1100 file.close()
1101
1103 """ Edit loop_matrix.f in order to set the flag which stops the
1104 execution after booting the program (i.e. reading the color data)."""
1105
1106 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1107 loop_matrix = file.read()
1108 file.close()
1109
1110 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1111 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1112 if bootandstop else '.FALSE.'), loop_matrix)
1113 file.write(loop_matrix)
1114 file.close()
1115
1116 - def setup_process(self, matrix_element, export_dir, reusing = False,
1117 param_card = None, MLOptions={},clean=True):
1118 """ Output the matrix_element in argument and perform the initialization
1119 while providing some details about the output in the dictionary returned.
1120 Returns None if anything fails"""
1121
1122 infos={'Process_output': None,
1123 'HELAS_MODEL_compilation' : None,
1124 'dir_path' : None,
1125 'Initialization' : None,
1126 'Process_compilation' : None}
1127
1128 if not reusing and clean:
1129 if os.path.isdir(export_dir):
1130 clean_up(self.output_path)
1131 if os.path.isdir(export_dir):
1132 raise InvalidCmd(\
1133 "The directory %s already exist. Please remove it."\
1134 %str(export_dir))
1135 else:
1136 if not os.path.isdir(export_dir):
1137 raise InvalidCmd(\
1138 "Could not find the directory %s to reuse."%str(export_dir))
1139
1140
1141 if not reusing and clean:
1142 model = matrix_element['processes'][0].get('model')
1143
1144
1145 import madgraph.loop.loop_exporters as loop_exporters
1146 if self.loop_optimized_output:
1147 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1148 else:
1149 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1150
1151 MLoptions = {'clean': True,
1152 'complex_mass': self.cmass_scheme,
1153 'export_format':'madloop',
1154 'mp':True,
1155 'SubProc_prefix':'P',
1156 'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
1157 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1158 'cuttools_dir': self.cuttools_dir,
1159 'fortran_compiler':self.cmd.options['fortran_compiler'],
1160 'output_dependencies':self.cmd.options['output_dependencies']}
1161
1162 MLoptions.update(self.tir_dir)
1163
1164 start=time.time()
1165 FortranExporter = exporter_class(export_dir, MLoptions)
1166 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1167 FortranExporter.copy_template(model)
1168 FortranExporter.generate_subprocess_directory(matrix_element, FortranModel)
1169 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1170 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1171 for c in l]))
1172 FortranExporter.convert_model(self.full_model,wanted_lorentz,wanted_couplings)
1173 infos['Process_output'] = time.time()-start
1174 start=time.time()
1175 FortranExporter.finalize(matrix_element,"",self.cmd.options, ['nojpeg'])
1176 infos['HELAS_MODEL_compilation'] = time.time()-start
1177
1178
1179 if param_card != None:
1180 if isinstance(param_card, str):
1181 cp(pjoin(param_card),\
1182 pjoin(export_dir,'Cards','param_card.dat'))
1183 else:
1184 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1185
1186
1187
1188 MadLoopInitializer.fix_PSPoint_in_check(
1189 pjoin(export_dir,'SubProcesses'), read_ps = False, npoints = 4)
1190
1191 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1192 mp = False, loop_filter = True,MLOptions=MLOptions)
1193
1194 shell_name = None
1195 directories = misc.glob('P0_*', pjoin(export_dir, 'SubProcesses'))
1196 if directories and os.path.isdir(directories[0]):
1197 shell_name = os.path.basename(directories[0])
1198 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1199 infos['dir_path']=dir_name
1200
1201
1202
1203 if not MadLoopInitializer.need_MadLoopInit(
1204 export_dir, subproc_prefix='P'):
1205 return infos
1206
1207 attempts = [3,15]
1208
1209 try:
1210 os.remove(pjoin(dir_name,'check'))
1211 os.remove(pjoin(dir_name,'check_sa.o'))
1212 except OSError:
1213 pass
1214
1215 nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
1216 pjoin(export_dir,'SubProcesses'),infos,\
1217 req_files = ['HelFilter.dat','LoopFilter.dat'],
1218 attempts = attempts)
1219 if attempts is None:
1220 logger.error("Could not compile the process %s,"%shell_name+\
1221 " try to generate it via the 'generate' command.")
1222 return None
1223 if nPS_necessary is None:
1224 logger.error("Could not initialize the process %s"%shell_name+\
1225 " with %s PS points."%max(attempts))
1226 return None
1227 elif nPS_necessary > min(attempts):
1228 logger.warning("Could not initialize the process %s"%shell_name+\
1229 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1230
1231 return infos
1232
1233 - def time_matrix_element(self, matrix_element, reusing = False,
1234 param_card = None, keep_folder = False, options=None,
1235 MLOptions = {}):
1236 """ Output the matrix_element in argument and give detail information
1237 about the timing for its output and running"""
1238
1239
1240
1241 make_it_quick=False
1242
1243 if options and 'split_orders' in list(options.keys()):
1244 split_orders = options['split_orders']
1245 else:
1246 split_orders = -1
1247
1248 assert ((not reusing and isinstance(matrix_element, \
1249 helas_objects.HelasMatrixElement)) or (reusing and
1250 isinstance(matrix_element, base_objects.Process)))
1251 if not reusing:
1252 proc_name = matrix_element['processes'][0].shell_string()[2:]
1253 else:
1254 proc_name = matrix_element.shell_string()[2:]
1255
1256 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1257 temp_dir_prefix+"_%s"%proc_name)
1258
1259 res_timings = self.setup_process(matrix_element,export_dir, \
1260 reusing, param_card,MLOptions = MLOptions,clean=True)
1261
1262 if res_timings == None:
1263 return None
1264 dir_name=res_timings['dir_path']
1265
1266 def check_disk_usage(path):
1267 return subprocess.Popen("du -shc -L "+str(path), \
1268 stdout=subprocess.PIPE, shell=True).communicate()[0].decode().split()[-2]
1269
1270
1271
1272
1273
1274 res_timings['du_source']=check_disk_usage(pjoin(\
1275 export_dir,'Source','*','*.f'))
1276 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1277 res_timings['du_color']=check_disk_usage(pjoin(dir_name,
1278 'MadLoop5_resources','*.dat'))
1279 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1280
1281 if not res_timings['Initialization']==None:
1282 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1283 elif make_it_quick:
1284 time_per_ps_estimate = -1.0
1285 else:
1286
1287
1288 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1289 read_ps = False, npoints = 3, hel_config = -1,
1290 split_orders=split_orders)
1291 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1292 time_per_ps_estimate = run_time/3.0
1293
1294 self.boot_time_setup(dir_name,bootandstop=True)
1295 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1296 res_timings['Booting_time'] = run_time
1297 self.boot_time_setup(dir_name,bootandstop=False)
1298
1299
1300 contributing_hel=0
1301 n_contrib_hel=0
1302 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1303 proc_prefix = proc_prefix_file.read()
1304 proc_prefix_file.close()
1305 helicities = open(pjoin(dir_name,'MadLoop5_resources',
1306 '%sHelFilter.dat'%proc_prefix)).read().split()
1307 for i, hel in enumerate(helicities):
1308 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1309 if contributing_hel==0:
1310 contributing_hel=i+1
1311 n_contrib_hel += 1
1312
1313 if contributing_hel==0:
1314 logger.error("Could not find a contributing helicity "+\
1315 "configuration for process %s."%proc_name)
1316 return None
1317
1318 res_timings['n_contrib_hel']=n_contrib_hel
1319 res_timings['n_tot_hel']=len(helicities)
1320
1321
1322 if not make_it_quick:
1323 target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,50)
1324 else:
1325 target_pspoints_number = 10
1326
1327 logger.info("Checking timing for process %s "%proc_name+\
1328 "with %d PS points."%target_pspoints_number)
1329
1330 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1331 read_ps = False, npoints = target_pspoints_number*2, \
1332 hel_config = contributing_hel, split_orders=split_orders)
1333 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1334
1335 if compile_time == None: return None
1336
1337 res_timings['run_polarized_total']=\
1338 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1339
1340 if make_it_quick:
1341 res_timings['run_unpolarized_total'] = 1.0
1342 res_timings['ram_usage'] = 0.0
1343 else:
1344 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1345 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1346 split_orders=split_orders)
1347 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
1348 checkRam=True)
1349
1350 if compile_time == None: return None
1351 res_timings['run_unpolarized_total']=\
1352 (run_time-res_timings['Booting_time'])/target_pspoints_number
1353 res_timings['ram_usage'] = ram_usage
1354
1355 if not self.loop_optimized_output:
1356 return res_timings
1357
1358
1359
1360
1361
1362 self.skip_loop_evaluation_setup(dir_name,skip=True)
1363
1364 if make_it_quick:
1365 res_timings['run_unpolarized_coefs'] = 1.0
1366 else:
1367 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1368 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1369 split_orders=split_orders)
1370 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1371 if compile_time == None: return None
1372 res_timings['run_unpolarized_coefs']=\
1373 (run_time-res_timings['Booting_time'])/target_pspoints_number
1374
1375 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1376 read_ps = False, npoints = target_pspoints_number*2, \
1377 hel_config = contributing_hel, split_orders=split_orders)
1378 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1379 if compile_time == None: return None
1380 res_timings['run_polarized_coefs']=\
1381 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1382
1383
1384 self.skip_loop_evaluation_setup(dir_name,skip=False)
1385
1386 return res_timings
1387
1388
1389
1390
1391
1392 - def check_matrix_element_stability(self, matrix_element,options=None,
1393 infos_IN = None, param_card = None, keep_folder = False,
1394 MLOptions = {}):
1395 """ Output the matrix_element in argument, run in for nPoints and return
1396 a dictionary containing the stability information on each of these points.
1397 If infos are provided, then the matrix element output is skipped and
1398 reused from a previous run and the content of infos.
1399 """
1400
1401 if not options:
1402 reusing = False
1403 nPoints = 100
1404 split_orders = -1
1405 else:
1406 reusing = options['reuse']
1407 nPoints = options['npoints']
1408 split_orders = options['split_orders']
1409
1410 assert ((not reusing and isinstance(matrix_element, \
1411 helas_objects.HelasMatrixElement)) or (reusing and
1412 isinstance(matrix_element, base_objects.Process)))
1413
1414
1415 def format_PS_point(ps, rotation=0):
1416 """ Write out the specified PS point to the file dir_path/PS.input
1417 while rotating it if rotation!=0. We consider only rotations of 90
1418 but one could think of having rotation of arbitrary angle too.
1419 The first two possibilities, 1 and 2 are a rotation and boost
1420 along the z-axis so that improve_ps can still work.
1421 rotation=0 => No rotation
1422 rotation=1 => Z-axis pi/2 rotation
1423 rotation=2 => Z-axis pi/4 rotation
1424 rotation=3 => Z-axis boost
1425 rotation=4 => (x'=z,y'=-x,z'=-y)
1426 rotation=5 => (x'=-z,y'=y,z'=x)"""
1427 if rotation==0:
1428 p_out=copy.copy(ps)
1429 elif rotation==1:
1430 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1431 elif rotation==2:
1432 sq2 = math.sqrt(2.0)
1433 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1434 elif rotation==3:
1435 p_out = boost_momenta(ps, 3)
1436
1437
1438 elif rotation==4:
1439 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1440 elif rotation==5:
1441 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1442 else:
1443 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1444
1445 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1446
1447 def pick_PS_point(proc, options):
1448 """ Randomly generate a PS point and make sure it is eligible. Then
1449 return it. Users can edit the cuts here if they want."""
1450
1451 p, w_rambo = self.get_momenta(proc, options)
1452 if options['events']:
1453 return p
1454
1455 while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
1456 p, w_rambo = self.get_momenta(proc, options)
1457
1458
1459
1460
1461 if len(p)==3:
1462 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1463 return p
1464
1465
1466
1467
1468 accuracy_threshold=1.0e-1
1469
1470
1471
1472 num_rotations = 1
1473
1474 if "MLReductionLib" not in MLOptions:
1475 tools=[1]
1476 else:
1477 tools=MLOptions["MLReductionLib"]
1478 tools=list(set(tools))
1479
1480
1481 tool_var={'pjfry':2,'golem':4,'samurai':5,'ninja':6,'collier':7}
1482 for tool in ['pjfry','golem','samurai','ninja','collier']:
1483 tool_dir='%s_dir'%tool
1484 if not tool_dir in self.tir_dir:
1485 continue
1486 tool_libpath=self.tir_dir[tool_dir]
1487 tool_libname="lib%s.a"%tool
1488 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1489 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1490 if tool_var[tool] in tools:
1491 tools.remove(tool_var[tool])
1492 if not tools:
1493 return None
1494
1495
1496 if not reusing:
1497 process = matrix_element['processes'][0]
1498 else:
1499 process = matrix_element
1500 proc_name = process.shell_string()[2:]
1501 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1502 temp_dir_prefix+"_%s"%proc_name)
1503
1504 tools_name=bannermod.MadLoopParam._ID_reduction_tool_map
1505
1506 return_dict={}
1507 return_dict['Stability']={}
1508 infos_save={'Process_output': None,
1509 'HELAS_MODEL_compilation' : None,
1510 'dir_path' : None,
1511 'Initialization' : None,
1512 'Process_compilation' : None}
1513
1514 for tool in tools:
1515 tool_name=tools_name[tool]
1516
1517
1518
1519
1520
1521 DP_stability = []
1522 QP_stability = []
1523
1524 Unstable_PS_points = []
1525
1526 Exceptional_PS_points = []
1527
1528 MLoptions=MLOptions
1529 MLoptions["MLReductionLib"]=tool
1530 clean = (tool==tools[0]) and not nPoints==0
1531 if infos_IN==None or (tool_name not in infos_IN):
1532 infos=infos_IN
1533 else:
1534 infos=infos_IN[tool_name]
1535
1536 if not infos:
1537 infos = self.setup_process(matrix_element,export_dir, \
1538 reusing, param_card,MLoptions,clean)
1539 if not infos:
1540 return None
1541
1542 if clean:
1543 infos_save['Process_output']=infos['Process_output']
1544 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1545 infos_save['dir_path']=infos['dir_path']
1546 infos_save['Process_compilation']=infos['Process_compilation']
1547 else:
1548 if not infos['Process_output']:
1549 infos['Process_output']=infos_save['Process_output']
1550 if not infos['HELAS_MODEL_compilation']:
1551 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1552 if not infos['dir_path']:
1553 infos['dir_path']=infos_save['dir_path']
1554 if not infos['Process_compilation']:
1555 infos['Process_compilation']=infos_save['Process_compilation']
1556
1557 dir_path=infos['dir_path']
1558
1559
1560 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1561 data_i = 0
1562
1563 if reusing:
1564
1565 data_i=0
1566 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1567 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1568 saved_run = save_load_object.load_from_file(pickle_path)
1569 if data_i>0:
1570 logger.info("Loading additional data stored in %s."%
1571 str(pickle_path))
1572 logger.info("Loaded data moved to %s."%str(pjoin(
1573 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1574 shutil.move(pickle_path,
1575 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1576 DP_stability.extend(saved_run['DP_stability'])
1577 QP_stability.extend(saved_run['QP_stability'])
1578 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1579 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1580 data_i += 1
1581
1582 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1583 'QP_stability':QP_stability,
1584 'Unstable_PS_points':Unstable_PS_points,
1585 'Exceptional_PS_points':Exceptional_PS_points}
1586
1587 if nPoints==0:
1588 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1589
1590 if data_i>1:
1591 save_load_object.save_to_file(pjoin(dir_path,
1592 savefile%'_0'),return_dict['Stability'][tool_name])
1593 continue
1594 else:
1595 logger.info("ERROR: Not reusing a directory or any pickled"+
1596 " result for tool %s and the number"%tool_name+\
1597 " of point for the check is zero.")
1598 return None
1599
1600 logger.info("Checking stability of process %s "%proc_name+\
1601 "with %d PS points by %s."%(nPoints,tool_name))
1602 if infos['Initialization'] != None:
1603 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1604 sec_needed = int(time_per_ps_estimate*nPoints*4)
1605 else:
1606 sec_needed = 0
1607
1608 progress_bar = None
1609 time_info = False
1610 if sec_needed>5:
1611 time_info = True
1612 logger.info("This check should take about "+\
1613 "%s to run. Started on %s."%(\
1614 str(datetime.timedelta(seconds=sec_needed)),\
1615 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1616 if logger.getEffectiveLevel()<logging.WARNING and \
1617 (sec_needed>5 or infos['Initialization'] == None):
1618 widgets = ['Stability check:', pbar.Percentage(), ' ',
1619 pbar.Bar(),' ', pbar.ETA(), ' ']
1620 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1621 fd=sys.stdout)
1622 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1623 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1624
1625
1626
1627 try:
1628 os.remove(pjoin(dir_path,'check'))
1629 os.remove(pjoin(dir_path,'check_sa.o'))
1630 except OSError:
1631 pass
1632
1633 devnull = open(os.devnull, 'w')
1634 retcode = subprocess.call(['make','check'],
1635 cwd=dir_path, stdout=devnull, stderr=devnull)
1636 devnull.close()
1637 if retcode != 0:
1638 logging.info("Error while executing make in %s" % dir_path)
1639 return None
1640
1641
1642
1643
1644 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1645
1646
1647 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1648 checkerName = 'StabilityCheckDriver.f'
1649 else:
1650 checkerName = 'StabilityCheckDriver_loop_induced.f'
1651
1652 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1653 checkerName),'r') as checkerFile:
1654 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1655 checkerToWrite = checkerFile.read()%{'proc_prefix':
1656 proc_prefix.read()}
1657 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1658 checkerFile.write(checkerToWrite)
1659 checkerFile.close()
1660
1661
1662
1663
1664
1665 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1666 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1667 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1668 os.remove(pjoin(dir_path,'loop_matrix.o'))
1669 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1670 mode='fortran', job_specs = False)
1671
1672
1673
1674
1675 if len(process['legs'])==3:
1676 self.fix_MadLoopParamCard(dir_path, mp=False,
1677 loop_filter=False, DoubleCheckHelicityFilter=True)
1678
1679 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1680 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1681 cwd=dir_path, bufsize=0)
1682 start_index = len(DP_stability)
1683 if progress_bar!=None:
1684 progress_bar.start()
1685
1686
1687 interrupted = False
1688
1689
1690 retry = 0
1691
1692 i=start_index
1693 if options and 'events' in options and options['events']:
1694
1695 import MadSpin.decay as madspin
1696 fsock = open(options['events'])
1697 self.event_file = madspin.Event(fsock)
1698 while i<(start_index+nPoints):
1699
1700 qp_dict={}
1701 dp_dict={}
1702 UPS = None
1703 EPS = None
1704
1705 if retry==0:
1706 p = pick_PS_point(process, options)
1707
1708 try:
1709 if progress_bar!=None:
1710 progress_bar.update(i+1-start_index)
1711
1712 PSPoint = format_PS_point(p,0)
1713 dp_res=[]
1714 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1715 split_orders=split_orders))
1716 dp_dict['CTModeA']=dp_res[-1]
1717 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1718 split_orders=split_orders))
1719 dp_dict['CTModeB']=dp_res[-1]
1720 for rotation in range(1,num_rotations+1):
1721 PSPoint = format_PS_point(p,rotation)
1722 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1723 split_orders=split_orders))
1724 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1725
1726 if any([not res for res in dp_res]):
1727 return None
1728 dp_accuracy =((max(dp_res)-min(dp_res))/
1729 abs(sum(dp_res)/len(dp_res)))
1730 dp_dict['Accuracy'] = dp_accuracy
1731 if dp_accuracy>accuracy_threshold:
1732 if tool in [1,6]:
1733
1734 UPS = [i,p]
1735 qp_res=[]
1736 PSPoint = format_PS_point(p,0)
1737 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1738 split_orders=split_orders))
1739 qp_dict['CTModeA']=qp_res[-1]
1740 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1741 split_orders=split_orders))
1742 qp_dict['CTModeB']=qp_res[-1]
1743 for rotation in range(1,num_rotations+1):
1744 PSPoint = format_PS_point(p,rotation)
1745 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1746 split_orders=split_orders))
1747 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1748
1749 if any([not res for res in qp_res]):
1750 return None
1751
1752 qp_accuracy = ((max(qp_res)-min(qp_res))/
1753 abs(sum(qp_res)/len(qp_res)))
1754 qp_dict['Accuracy']=qp_accuracy
1755 if qp_accuracy>accuracy_threshold:
1756 EPS = [i,p]
1757 else:
1758
1759
1760 UPS = [i,p]
1761
1762 except KeyboardInterrupt:
1763 interrupted = True
1764 break
1765 except IOError as e:
1766 if e.errno == errno.EINTR:
1767 if retry==100:
1768 logger.error("Failed hundred times consecutively because"+
1769 " of system call interruptions.")
1770 raise
1771 else:
1772 logger.debug("Recovered from a system call interruption."+\
1773 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1774
1775 time.sleep(0.5)
1776
1777 retry = retry+1
1778
1779 try:
1780 StabChecker.kill()
1781 except Exception:
1782 pass
1783 StabChecker = subprocess.Popen(\
1784 [pjoin(dir_path,'StabilityCheckDriver')],
1785 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1786 stderr=subprocess.PIPE, cwd=dir_path, bufsize=0)
1787 continue
1788 else:
1789 raise
1790
1791
1792
1793 retry = 0
1794
1795 i=i+1
1796
1797
1798 DP_stability.append(dp_dict)
1799 QP_stability.append(qp_dict)
1800 if not EPS is None:
1801 Exceptional_PS_points.append(EPS)
1802 if not UPS is None:
1803 Unstable_PS_points.append(UPS)
1804
1805 if progress_bar!=None:
1806 progress_bar.finish()
1807 if time_info:
1808 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1809 "%d-%m-%Y %H:%M"))
1810
1811
1812 if not interrupted:
1813 StabChecker.stdin.write('y\n'.encode())
1814 else:
1815 StabChecker.kill()
1816
1817
1818
1819
1820
1821
1822
1823 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1824 return_dict['Stability'][tool_name])
1825
1826 if interrupted:
1827 break
1828
1829 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1830 reusing else matrix_element
1831 return return_dict
1832
1833 @classmethod
1834 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1835 split_orders=-1):
1836 """ This version of get_me_value is simplified for the purpose of this
1837 class. No compilation is necessary. The CT mode can be specified."""
1838
1839
1840 StabChecker.stdin.write('\x1a'.encode())
1841 StabChecker.stdin.write('1\n'.encode())
1842 StabChecker.stdin.write(('%d\n'%mode).encode())
1843 StabChecker.stdin.write(('%s\n'%PSpoint).encode())
1844 StabChecker.stdin.write(('%.16E\n'%mu_r).encode())
1845 StabChecker.stdin.write(('%d\n'%hel).encode())
1846 StabChecker.stdin.write(('%d\n'%split_orders).encode())
1847
1848
1849 try:
1850
1851 while True:
1852 output = StabChecker.stdout.readline().decode()
1853
1854 if output != '':
1855 last_non_empty = output
1856 if output==' ##TAG#RESULT_START#TAG##\n':
1857 break
1858
1859 ret_code = StabChecker.poll()
1860 if not ret_code is None:
1861 output = StabChecker.stdout.readline().decode()
1862 if output != '':
1863 last_non_empty = output
1864 error = StabChecker.stderr.readline().decode()
1865 raise MadGraph5Error("The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1866 (ret_code, last_non_empty, error))
1867
1868 res = ""
1869 while True:
1870 output = StabChecker.stdout.readline().decode()
1871 if output != '':
1872 last_non_empty = output
1873 if str(output)==' ##TAG#RESULT_STOP#TAG##\n':
1874 break
1875 else:
1876 res += output
1877 ret_code = StabChecker.poll()
1878 if not ret_code is None:
1879 output = StabChecker.stdout.readline().decode()
1880 if output != '':
1881 last_non_empty = output
1882 error = StabChecker.stderr.readline().decode()
1883 raise MadGraph5Error("The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1884 (ret_code, last_non_empty, error))
1885
1886 return cls.parse_check_output(res,format='tuple')[0][0]
1887 except IOError as e:
1888 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1889 raise e
1890
1893 """ Perform a python evaluation of the matrix element independently for
1894 all possible helicity configurations for a fixed number of points N and
1895 returns the average for each in the format [[hel_config, eval],...].
1896 This is used to determine what are the vanishing and dependent helicity
1897 configurations at generation time and accordingly setup the output.
1898 This is not yet implemented at LO."""
1899
1900
1901 assert isinstance(process,base_objects.Process)
1902 assert process.get('perturbation_couplings')==[]
1903
1904 N_eval=50
1905
1906 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
1907 auth_skipping = False, reuse = True)
1908
1909 amplitude = diagram_generation.Amplitude(process)
1910 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
1911
1912 cumulative_helEvals = []
1913
1914 for i in range(N_eval):
1915 p, w_rambo = evaluator.get_momenta(process)
1916 helEvals = evaluator.evaluate_matrix_element(\
1917 matrix_element, p = p, output = 'helEvals')['helEvals']
1918 if cumulative_helEvals==[]:
1919 cumulative_helEvals=copy.copy(helEvals)
1920 else:
1921 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
1922 enumerate(cumulative_helEvals)]
1923
1924
1925 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
1926
1927
1928
1929 clean_added_globals(ADDED_GLOBAL)
1930
1931 return cumulative_helEvals
1932
1935 """A wrapper function for running an iteration of a function over
1936 a multiprocess, without having to first create a process list
1937 (which makes a big difference for very large multiprocesses.
1938 stored_quantities is a dictionary for any quantities that we want
1939 to reuse between runs."""
1940
1941 model = multiprocess.get('model')
1942 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
1943 if not leg.get('state')]
1944 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
1945 if leg.get('state')]
1946
1947 id_anti_id_dict = {}
1948 for id in set(tuple(sum(isids+fsids, []))):
1949 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
1950 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
1951 sorted_ids = []
1952 results = []
1953 for is_prod in itertools.product(*isids):
1954 for fs_prod in itertools.product(*fsids):
1955
1956
1957 if check_already_checked(is_prod, fs_prod, sorted_ids,
1958 multiprocess, model, id_anti_id_dict):
1959 continue
1960
1961 process = multiprocess.get_process_with_legs(base_objects.LegList(\
1962 [base_objects.Leg({'id': id, 'state':False}) for \
1963 id in is_prod] + \
1964 [base_objects.Leg({'id': id, 'state':True}) for \
1965 id in fs_prod]))
1966
1967 if opt is not None:
1968 if isinstance(opt, dict):
1969 try:
1970 value = opt[process.base_string()]
1971 except Exception:
1972 continue
1973 result = function(process, stored_quantities, value, options=options)
1974 else:
1975 result = function(process, stored_quantities, opt, options=options)
1976 else:
1977 result = function(process, stored_quantities, options=options)
1978
1979 if result:
1980 results.append(result)
1981
1982 return results
1983
1984
1985
1986
1987
1988 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
1989 id_anti_id_dict = {}):
1990 """Check if process already checked, if so return True, otherwise add
1991 process and antiprocess to sorted_ids."""
1992
1993
1994 if id_anti_id_dict:
1995 is_ids = [id_anti_id_dict[id] for id in \
1996 is_ids]
1997 else:
1998 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
1999 is_ids]
2000
2001 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
2002 [process.get('id')])
2003
2004 if ids in sorted_ids:
2005
2006 return True
2007
2008
2009 sorted_ids.append(ids)
2010
2011
2012 return False
2013
2019 """ Generate a loop matrix element from the process definition, and returns
2020 it along with the timing information dictionary.
2021 If reuse is True, it reuses the already output directory if found.
2022 There is the possibility of specifying the proc_name."""
2023
2024 assert isinstance(process_definition,
2025 (base_objects.ProcessDefinition,base_objects.Process))
2026 assert process_definition.get('perturbation_couplings')!=[]
2027
2028 if isinstance(process_definition,base_objects.ProcessDefinition):
2029 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
2030 raise InvalidCmd("This check can only be performed on single "+
2031 " processes. (i.e. without multiparticle labels).")
2032
2033 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2034 if not leg.get('state')]
2035 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2036 if leg.get('state')]
2037
2038
2039 process = process_definition.get_process(isids,fsids)
2040 else:
2041 process = process_definition
2042
2043 if not output_path is None:
2044 root_path = output_path
2045 else:
2046 root_path = cmd._mgme_dir
2047
2048 timing = {'Diagrams_generation': None,
2049 'n_loops': None,
2050 'HelasDiagrams_generation': None,
2051 'n_loop_groups': None,
2052 'n_loop_wfs': None,
2053 'loop_wfs_ranks': None}
2054
2055 if proc_name:
2056 proc_dir = pjoin(root_path,proc_name)
2057 else:
2058 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
2059 '_'.join(process.shell_string().split('_')[1:])))
2060 if reuse and os.path.isdir(proc_dir):
2061 logger.info("Reusing directory %s"%str(proc_dir))
2062
2063 return timing, process
2064
2065 logger.info("Generating p%s"%process_definition.nice_string()[1:])
2066
2067 start=time.time()
2068 try:
2069 amplitude = loop_diagram_generation.LoopAmplitude(process,
2070 loop_filter=loop_filter)
2071 except InvalidCmd:
2072
2073
2074 return time.time()-start, None
2075 if not amplitude.get('diagrams'):
2076
2077 return time.time()-start, None
2078
2079
2080
2081 loop_optimized_output = cmd.options['loop_optimized_output']
2082 timing['Diagrams_generation']=time.time()-start
2083 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2084 start=time.time()
2085
2086 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2087 optimized_output = loop_optimized_output,gen_color=True)
2088
2089
2090
2091 matrix_element.compute_all_analytic_information()
2092 timing['HelasDiagrams_generation']=time.time()-start
2093
2094 if loop_optimized_output:
2095 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2096 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2097 ldiag.get('loop_wavefunctions')]
2098 timing['n_loop_wfs']=len(lwfs)
2099 timing['loop_wfs_ranks']=[]
2100 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2101 for l in lwfs])+1):
2102 timing['loop_wfs_ranks'].append(\
2103 len([1 for l in lwfs if \
2104 l.get_analytic_info('wavefunction_rank')==rank]))
2105
2106 return timing, matrix_element
2107
2108
2109
2110
2111 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2112 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2113 """For a single loop process, check both its timings and then its stability
2114 in one go without regenerating it."""
2115
2116 if 'reuse' not in options:
2117 keep_folder=False
2118 else:
2119 keep_folder = options['reuse']
2120
2121 model=process_definition.get('model')
2122
2123 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2124 keep_folder,output_path=output_path,cmd=cmd)
2125 reusing = isinstance(matrix_element, base_objects.Process)
2126 options['reuse'] = reusing
2127 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2128 model=model, output_path=output_path, cmd=cmd)
2129
2130 if not myProfiler.loop_optimized_output:
2131 MLoptions={}
2132 else:
2133 MLoptions=MLOptions
2134
2135 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2136 param_card, keep_folder=keep_folder,options=options,
2137 MLOptions = MLoptions)
2138
2139 timing2['reduction_tool'] = MLoptions['MLReductionLib'][0]
2140
2141 if timing2 == None:
2142 return None, None
2143
2144
2145 timing = dict(list(timing1.items())+list(timing2.items()))
2146 stability = myProfiler.check_matrix_element_stability(matrix_element,
2147 options=options, infos_IN=timing,param_card=param_card,
2148 keep_folder = keep_folder,
2149 MLOptions = MLoptions)
2150 if stability == None:
2151 return None, None
2152 else:
2153 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2154 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2155 return timing, stability
2156
2157
2158
2159
2160 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2161 options=None,nPoints=100, output_path=None,
2162 cmd = FakeInterface(), MLOptions = {}):
2163 """For a single loop process, give a detailed summary of the generation and
2164 execution timing."""
2165
2166 if "reuse" in options:
2167 reuse=options['reuse']
2168 else:
2169 reuse=False
2170
2171 reuse=options['reuse']
2172 keep_folder = reuse
2173 model=process_definition.get('model')
2174
2175 timing, matrix_element = generate_loop_matrix_element(process_definition,
2176 reuse, output_path=output_path, cmd=cmd)
2177 reusing = isinstance(matrix_element, base_objects.Process)
2178 options['reuse'] = reusing
2179 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2180 output_path=output_path,model=model,cmd=cmd)
2181
2182 if not myStabilityChecker.loop_optimized_output:
2183 MLoptions = {}
2184 else:
2185 MLoptions = MLOptions
2186
2187 if 'COLLIERComputeUVpoles' not in MLoptions:
2188 MLoptions['COLLIERComputeUVpoles']=False
2189 if 'COLLIERComputeIRpoles' not in MLoptions:
2190 MLoptions['COLLIERComputeIRpoles']=False
2191
2192 if 'COLLIERRequiredAccuracy' not in MLoptions:
2193 MLoptions['COLLIERRequiredAccuracy']=1e-13
2194
2195 if 'COLLIERUseInternalStabilityTest' not in MLoptions:
2196 MLoptions['COLLIERUseInternalStabilityTest']=False
2197
2198
2199
2200 MLoptions['COLLIERGlobalCache'] = 0
2201
2202 if "MLReductionLib" not in MLOptions:
2203 MLoptions["MLReductionLib"] = []
2204 if cuttools:
2205 MLoptions["MLReductionLib"].extend([1])
2206 if "iregi_dir" in tir:
2207 MLoptions["MLReductionLib"].extend([3])
2208 if "pjfry_dir" in tir:
2209 MLoptions["MLReductionLib"].extend([2])
2210 if "golem_dir" in tir:
2211 MLoptions["MLReductionLib"].extend([4])
2212 if "samurai_dir" in tir:
2213 MLoptions["MLReductionLib"].extend([5])
2214 if "ninja_dir" in tir:
2215 MLoptions["MLReductionLib"].extend([6])
2216 if "collier_dir" in tir:
2217 MLoptions["MLReductionLib"].extend([7])
2218
2219 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2220 options=options,param_card=param_card,
2221 keep_folder=keep_folder,
2222 MLOptions=MLoptions)
2223
2224 if stability == None:
2225 return None
2226 else:
2227 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2228 return stability
2229
2230
2231
2232
2233 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2234 output_path=None, options={}, cmd = FakeInterface(),
2235 MLOptions = {}):
2236 """For a single loop process, give a detailed summary of the generation and
2237 execution timing."""
2238
2239 if 'reuse' not in options:
2240 keep_folder = False
2241 else:
2242 keep_folder = options['reuse']
2243 model=process_definition.get('model')
2244 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2245 keep_folder, output_path=output_path, cmd=cmd)
2246 reusing = isinstance(matrix_element, base_objects.Process)
2247 options['reuse'] = reusing
2248 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2249 output_path=output_path, cmd=cmd)
2250
2251 if not myTimer.loop_optimized_output:
2252 MLoptions = {}
2253 else:
2254 MLoptions = MLOptions
2255
2256 if 'COLLIERComputeUVpoles' not in MLoptions:
2257 MLoptions['COLLIERComputeUVpoles']=False
2258 if 'COLLIERComputeIRpoles' not in MLoptions:
2259 MLoptions['COLLIERComputeIRpoles']=False
2260
2261 if 'COLLIERGlobalCache' not in MLoptions:
2262 MLoptions['COLLIERGlobalCache']=-1
2263
2264 if 'MLReductionLib' not in MLoptions or \
2265 len(MLoptions['MLReductionLib'])==0:
2266 MLoptions['MLReductionLib'] = [6]
2267
2268 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2269 keep_folder = keep_folder, options=options,
2270 MLOptions = MLoptions)
2271
2272 if timing2 == None:
2273 return None
2274 else:
2275
2276 res = dict(list(timing1.items())+list(timing2.items()))
2277 res['loop_optimized_output']=myTimer.loop_optimized_output
2278 res['reduction_tool'] = MLoptions['MLReductionLib'][0]
2279 return res
2280
2281
2282
2283
2284 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2285 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2286 """Check processes by generating them with all possible orderings
2287 of particles (which means different diagram building and Helas
2288 calls), and comparing the resulting matrix element values."""
2289
2290 cmass_scheme = cmd.options['complex_mass_scheme']
2291 if isinstance(processes, base_objects.ProcessDefinition):
2292
2293
2294 multiprocess = processes
2295 model = multiprocess.get('model')
2296
2297
2298 if multiprocess.get('perturbation_couplings')==[]:
2299 evaluator = MatrixElementEvaluator(model,
2300 auth_skipping = True, reuse = False, cmd = cmd)
2301 else:
2302 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2303 model=model, auth_skipping = True,
2304 reuse = False, output_path=output_path, cmd = cmd)
2305
2306 results = run_multiprocs_no_crossings(check_process,
2307 multiprocess,
2308 evaluator,
2309 quick,
2310 options)
2311
2312 if "used_lorentz" not in evaluator.stored_quantities:
2313 evaluator.stored_quantities["used_lorentz"] = []
2314
2315 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2316
2317 clean_up(output_path)
2318
2319 return results, evaluator.stored_quantities["used_lorentz"]
2320
2321 elif isinstance(processes, base_objects.Process):
2322 processes = base_objects.ProcessList([processes])
2323 elif isinstance(processes, base_objects.ProcessList):
2324 pass
2325 else:
2326 raise InvalidCmd("processes is of non-supported format")
2327
2328 if not processes:
2329 raise InvalidCmd("No processes given")
2330
2331 model = processes[0].get('model')
2332
2333
2334 if processes[0].get('perturbation_couplings')==[]:
2335 evaluator = MatrixElementEvaluator(model, param_card,
2336 auth_skipping = True, reuse = False, cmd = cmd)
2337 else:
2338 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2339 model=model,param_card=param_card,
2340 auth_skipping = True, reuse = False,
2341 output_path=output_path, cmd = cmd)
2342
2343
2344
2345 sorted_ids = []
2346 comparison_results = []
2347
2348
2349 for process in processes:
2350
2351
2352 if check_already_checked([l.get('id') for l in process.get('legs') if \
2353 not l.get('state')],
2354 [l.get('id') for l in process.get('legs') if \
2355 l.get('state')],
2356 sorted_ids, process, model):
2357 continue
2358
2359 res = check_process(process, evaluator, quick, options)
2360 if res:
2361 comparison_results.append(res)
2362
2363 if "used_lorentz" not in evaluator.stored_quantities:
2364 evaluator.stored_quantities["used_lorentz"] = []
2365
2366 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2367
2368 clean_up(output_path)
2369
2370 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2371
2373 """Check the helas calls for a process by generating the process
2374 using all different permutations of the process legs (or, if
2375 quick, use a subset of permutations), and check that the matrix
2376 element is invariant under this."""
2377
2378 model = process.get('model')
2379
2380
2381 for i, leg in enumerate(process.get('legs')):
2382 leg.set('number', i+1)
2383
2384 logger.info("Checking crossings of %s" % \
2385 process.nice_string().replace('Process:', 'process'))
2386
2387 process_matrix_elements = []
2388
2389
2390
2391 if quick:
2392 leg_positions = [[] for leg in process.get('legs')]
2393 quick = list(range(1,len(process.get('legs')) + 1))
2394
2395 values = []
2396
2397
2398 number_checked=0
2399 for legs in itertools.permutations(process.get('legs')):
2400
2401 order = [l.get('number') for l in legs]
2402 if quick:
2403 found_leg = True
2404 for num in quick:
2405
2406
2407 leg_position = legs.index([l for l in legs if \
2408 l.get('number') == num][0])
2409
2410 if not leg_position in leg_positions[num-1]:
2411 found_leg = False
2412 leg_positions[num-1].append(leg_position)
2413
2414 if found_leg:
2415 continue
2416
2417
2418
2419 if quick and process.get('perturbation_couplings') and number_checked >3:
2420 continue
2421
2422 legs = base_objects.LegList(legs)
2423
2424 if order != list(range(1,len(legs) + 1)):
2425 logger.info("Testing permutation: %s" % \
2426 order)
2427
2428 newproc = copy.copy(process)
2429 newproc.set('legs',legs)
2430
2431
2432 try:
2433 if newproc.get('perturbation_couplings')==[]:
2434 amplitude = diagram_generation.Amplitude(newproc)
2435 else:
2436
2437 loop_base_objects.cutting_method = 'optimal' if \
2438 number_checked%2 == 0 else 'default'
2439 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2440 except InvalidCmd:
2441 result=False
2442 else:
2443 result = amplitude.get('diagrams')
2444
2445 loop_base_objects.cutting_method = 'optimal'
2446
2447 if not result:
2448
2449 logging.info("No diagrams for %s" % \
2450 process.nice_string().replace('Process', 'process'))
2451 break
2452
2453 if order == list(range(1,len(legs) + 1)):
2454
2455 p, w_rambo = evaluator.get_momenta(process, options)
2456
2457
2458 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2459 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2460 gen_color=False)
2461 else:
2462 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2463 optimized_output=evaluator.loop_optimized_output)
2464
2465
2466
2467
2468 if amplitude.get('process').get('has_born'):
2469
2470
2471 if matrix_element in process_matrix_elements:
2472
2473
2474 continue
2475
2476 process_matrix_elements.append(matrix_element)
2477
2478 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2479 options=options)
2480 if res == None:
2481 break
2482
2483 values.append(res[0])
2484 number_checked += 1
2485
2486
2487
2488 if abs(max(values)) + abs(min(values)) > 0 and \
2489 2 * abs(max(values) - min(values)) / \
2490 (abs(max(values)) + abs(min(values))) > 0.01:
2491 break
2492
2493
2494 if not values:
2495 return None
2496
2497
2498
2499 diff = 0
2500 if abs(max(values)) + abs(min(values)) > 0:
2501 diff = 2* abs(max(values) - min(values)) / \
2502 (abs(max(values)) + abs(min(values)))
2503
2504
2505 if process.get('perturbation_couplings'):
2506 passed = diff < 1.e-5
2507 else:
2508 passed = diff < 1.e-8
2509
2510 return {"process": process,
2511 "momenta": p,
2512 "values": values,
2513 "difference": diff,
2514 "passed": passed}
2515
2517 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2518 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2519
2520 if mg_root is None:
2521 pass
2522
2523 directories = misc.glob('%s*' % temp_dir_prefix, mg_root)
2524 if directories != []:
2525 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2526 for dir in directories:
2527
2528 if os.path.isdir(pjoin(dir,'SubProcesses')):
2529 shutil.rmtree(dir)
2530
2539
2540 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2541 """Present the results from a timing and stability consecutive check"""
2542
2543
2544 opt = timing['loop_optimized_output']
2545
2546 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2547 ' output:\n'
2548 text += output_timings(myprocdef,timing)
2549
2550 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2551 ' output:\n'
2552 text += output_stability(stability,output_path, reusing=reusing)
2553
2554 mode = 'optimized' if opt else 'default'
2555 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2556 %(mode,stability['Process'].shell_string()))
2557 logFile = open(logFilePath, 'w')
2558 logFile.write(text)
2559 logFile.close()
2560 logger.info('Log of this profile check was output to file %s'\
2561 %str(logFilePath))
2562 return text
2563
2565 """Present the result of a stability check in a nice format.
2566 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2567 under the MadGraph5_aMC@NLO root folder (output_path)"""
2568
2569 def accuracy(eval_list):
2570 """ Compute the accuracy from different evaluations."""
2571 return (2.0*(max(eval_list)-min(eval_list))/
2572 abs(max(eval_list)+min(eval_list)))
2573
2574 def best_estimate(eval_list):
2575 """ Returns the best estimate from different evaluations."""
2576 return (max(eval_list)+min(eval_list))/2.0
2577
2578 def loop_direction_test_power(eval_list):
2579 """ Computes the loop direction test power P is computed as follow:
2580 P = accuracy(loop_dir_test) / accuracy(all_test)
2581 So that P is large if the loop direction test is effective.
2582 The tuple returned is (log(median(P)),log(min(P)),frac)
2583 where frac is the fraction of events with powers smaller than -3
2584 which means events for which the reading direction test shows an
2585 accuracy three digits higher than it really is according to the other
2586 tests."""
2587 powers=[]
2588 for eval in eval_list:
2589 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2590
2591 other_evals = [eval[key] for key in eval.keys() if key not in \
2592 ['CTModeB','Accuracy']]
2593 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2594 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2595
2596 n_fail=0
2597 for p in powers:
2598 if (math.log(p)/math.log(10))<-3:
2599 n_fail+=1
2600
2601 if len(powers)==0:
2602 return (None,None,None)
2603
2604 return (math.log(median(powers))/math.log(10),
2605 math.log(min(powers))/math.log(10),
2606 n_fail/len(powers))
2607
2608 def test_consistency(dp_eval_list, qp_eval_list):
2609 """ Computes the consistency test C from the DP and QP evaluations.
2610 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2611 So a consistent test would have C as close to one as possible.
2612 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2613 consistencies = []
2614 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2615 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2616 if key!='Accuracy']
2617 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2618 if key!='Accuracy']
2619 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2620 accuracy(dp_evals)!=0.0:
2621 consistencies.append(accuracy(dp_evals)/(abs(\
2622 best_estimate(qp_evals)-best_estimate(dp_evals))))
2623
2624 if len(consistencies)==0:
2625 return (None,None,None)
2626
2627 return (math.log(median(consistencies))/math.log(10),
2628 math.log(min(consistencies))/math.log(10),
2629 math.log(max(consistencies))/math.log(10))
2630
2631 def median(orig_list):
2632 """ Find the median of a sorted float list. """
2633 tmp=copy.copy(orig_list)
2634 tmp.sort()
2635 if len(tmp)%2==0:
2636 return (tmp[int((len(tmp)/2)-1)]+tmp[int(len(tmp)/2)])/2.0
2637 else:
2638 return tmp[int((len(tmp)-1)/2)]
2639
2640
2641 f = format_output
2642 opt = stability['loop_optimized_output']
2643
2644 mode = 'optimized' if opt else 'default'
2645 process = stability['Process']
2646 res_str = "Stability checking for %s (%s mode)\n"\
2647 %(process.nice_string()[9:],mode)
2648
2649 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2650 %(mode,process.shell_string())), 'w')
2651
2652 logFile.write('Stability check results\n\n')
2653 logFile.write(res_str)
2654 data_plot_dict={}
2655 accuracy_dict={}
2656 nPSmax=0
2657 max_acc=0.0
2658 min_acc=1.0
2659 if stability['Stability']:
2660 toolnames= list(stability['Stability'].keys())
2661 toolnamestr=" | ".join(tn+
2662 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2663 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2664 for key,stab in stability['Stability'].items()]
2665 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2666 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2667 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2668 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2669 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2670 len_PS=["%i"%len(evals)+\
2671 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2672 len_PS_str=" | ".join(len_PS)
2673 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2674 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2675 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2676 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2677 pmedminlist=[]
2678 pfraclist=[]
2679 for key,stab in stability['Stability'].items():
2680 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2681 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2682 pfrac_str = f(pfrac,'%.2e')
2683 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2684 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2685 pmedminlist_str=" | ".join(pmedminlist)
2686 pfraclist_str=" | ".join(pfraclist)
2687 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2688 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2689 len_UPS=["%i"%len(upup)+\
2690 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2691 len_UPS_str=" | ".join(len_UPS)
2692 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2693 res_str_i += \
2694 """
2695 = Legend for the statistics of the stability tests. (all log below ar log_10)
2696 The loop direction test power P is computed as follow:
2697 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2698 So that log(P) is positive if the loop direction test is effective.
2699 The tuple printed out is (log(median(P)),log(min(P)))
2700 The consistency test C is computed when QP evaluations are available:
2701 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2702 So a consistent test would have log(C) as close to zero as possible.
2703 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2704 res_str+=res_str_i
2705 for key in stability['Stability'].keys():
2706 toolname=key
2707 stab=stability['Stability'][key]
2708 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2709
2710 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2711 stab['QP_stability']]
2712 nPS = len(DP_stability)
2713 if nPS>nPSmax:nPSmax=nPS
2714 UPS = stab['Unstable_PS_points']
2715 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2716 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2717 EPS = stab['Exceptional_PS_points']
2718 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2719 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2720 res_str_i = ""
2721
2722 xml_toolname = {'GOLEM95':'GOLEM','IREGI':'IREGI',
2723 'CUTTOOLS':'CUTTOOLS','PJFRY++':'PJFRY',
2724 'NINJA':'NINJA','SAMURAI':'SAMURAI',
2725 'COLLIER':'COLLIER'}[toolname.upper()]
2726 if len(UPS)>0:
2727 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2728 %(len(UPS),nPS,toolname)
2729 prefix = 'DP' if toolname=='CutTools' else ''
2730 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2731 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2732 res_str_i += "|= %s Max accuracy............... %s\n"\
2733 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2734 res_str_i += "|= %s Min accuracy............... %s\n"\
2735 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2736 (pmed,pmin,pfrac)=loop_direction_test_power(\
2737 [stab['DP_stability'][U[0]] for U in UPS])
2738 if toolname=='CutTools':
2739 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2740 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2741 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2742 %f(pfrac,'%.2e')
2743 res_str_i += "|= QP Median accuracy............ %s\n"\
2744 %f(median(UPS_stability_QP),'%.2e')
2745 res_str_i += "|= QP Max accuracy............... %s\n"\
2746 %f(min(UPS_stability_QP),'%.2e')
2747 res_str_i += "|= QP Min accuracy............... %s\n"\
2748 %f(max(UPS_stability_QP),'%.2e')
2749 (pmed,pmin,pfrac)=loop_direction_test_power(\
2750 [stab['QP_stability'][U[0]] for U in UPS])
2751 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2752 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2753 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2754 (pmed,pmin,pmax)=test_consistency(\
2755 [stab['DP_stability'][U[0]] for U in UPS],
2756 [stab['QP_stability'][U[0]] for U in UPS])
2757 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2758 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2759 if len(EPS)==0:
2760 res_str_i += "= Number of Exceptional PS points : 0\n"
2761 if len(EPS)>0:
2762 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2763 %(len(EPS),nPS,toolname)
2764 res_str_i += "|= DP Median accuracy............ %s\n"\
2765 %f(median(EPS_stability_DP),'%.2e')
2766 res_str_i += "|= DP Max accuracy............... %s\n"\
2767 %f(min(EPS_stability_DP),'%.2e')
2768 res_str_i += "|= DP Min accuracy............... %s\n"\
2769 %f(max(EPS_stability_DP),'%.2e')
2770 pmed,pmin,pfrac=loop_direction_test_power(\
2771 [stab['DP_stability'][E[0]] for E in EPS])
2772 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2773 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2774 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2775 %f(pfrac,'%.2e')
2776 res_str_i += "|= QP Median accuracy............ %s\n"\
2777 %f(median(EPS_stability_QP),'%.2e')
2778 res_str_i += "|= QP Max accuracy............... %s\n"\
2779 %f(min(EPS_stability_QP),'%.2e')
2780 res_str_i += "|= QP Min accuracy............... %s\n"\
2781 %f(max(EPS_stability_QP),'%.2e')
2782 pmed,pmin,pfrac=loop_direction_test_power(\
2783 [stab['QP_stability'][E[0]] for E in EPS])
2784 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2785 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2786 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2787
2788 logFile.write(res_str_i)
2789
2790 if len(EPS)>0:
2791 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2792 %(len(EPS),toolname))
2793 logFile.write('<EPS_data reduction=%s>\n'%xml_toolname.upper())
2794 for i, eps in enumerate(EPS):
2795 logFile.write('\nEPS #%i\n'%(i+1))
2796 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2797 for p in eps[1]]))
2798 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[eps[0]])
2799 logFile.write(' QP accuracy : %.4e\n'%QP_stability[eps[0]])
2800 logFile.write('</EPS_data>\n')
2801 if len(UPS)>0:
2802 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2803 %(len(UPS),toolname))
2804 logFile.write('<UPS_data reduction=%s>\n'%xml_toolname.upper())
2805 for i, ups in enumerate(UPS):
2806 logFile.write('\nUPS #%i\n'%(i+1))
2807 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2808 for p in ups[1]]))
2809 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[ups[0]])
2810 logFile.write(' QP accuracy : %.4e\n'%QP_stability[ups[0]])
2811 logFile.write('</UPS_data>\n')
2812
2813 logFile.write('\nData entries for the stability plot.\n')
2814 logFile.write('First row is a maximal accuracy delta, second is the '+\
2815 'fraction of events with DP accuracy worse than delta.\n')
2816 logFile.write('<plot_data reduction=%s>\n'%xml_toolname.upper())
2817
2818 if max(DP_stability)>0.0:
2819 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2820 if min_digit_acc>=0:
2821 min_digit_acc = min_digit_acc+1
2822 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2823 else:
2824 logFile.writelines('%.4e %.4e\n'%(accuracies[i], 0.0) for i in \
2825 range(len(accuracies)))
2826 logFile.write('</plot_data>\n')
2827 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2828 ' is output then.'
2829 logFile.write('Perfect accuracy over all the trial PS points.')
2830 res_str +=res_str_i
2831 continue
2832
2833 accuracy_dict[toolname]=accuracies
2834 if max(accuracies) > max_acc: max_acc=max(accuracies)
2835 if min(accuracies) < min_acc: min_acc=min(accuracies)
2836 data_plot=[]
2837 for acc in accuracies:
2838 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2839 /float(len(DP_stability)))
2840 data_plot_dict[toolname]=data_plot
2841
2842 logFile.writelines('%.4e %.4e\n'%(accuracies[i], data_plot[i]) for i in \
2843 range(len(accuracies)))
2844 logFile.write('</plot_data>\n')
2845 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2846 %(nPS,toolname))
2847 logFile.write('First row is DP, second is QP (if available).\n\n')
2848 logFile.write('<accuracies reduction=%s>\n'%xml_toolname.upper())
2849 logFile.writelines('%.4e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2850 else '%.4e\n'%QP_stability[i]) for i in range(nPS))
2851 logFile.write('</accuracies>\n')
2852 res_str+=res_str_i
2853 logFile.close()
2854 res_str += "\n= Stability details of the run are output to the file"+\
2855 " stability_%s_%s.log\n"%(mode,process.shell_string())
2856
2857
2858
2859
2860 if any(isinstance(handler,logging.FileHandler) for handler in \
2861 logging.getLogger('madgraph').handlers):
2862 return res_str
2863
2864 try:
2865 import matplotlib.pyplot as plt
2866 colorlist=['b','r','g','y','m','c','k']
2867 for i,key in enumerate(data_plot_dict.keys()):
2868 color=colorlist[i]
2869 data_plot=data_plot_dict[key]
2870 accuracies=accuracy_dict[key]
2871 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2872 label=key)
2873 plt.axis([min_acc,max_acc,\
2874 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2875 plt.yscale('log')
2876 plt.xscale('log')
2877 plt.title('Stability plot for %s (%s mode, %d points)'%\
2878 (process.nice_string()[9:],mode,nPSmax))
2879 plt.ylabel('Fraction of events')
2880 plt.xlabel('Maximal precision')
2881 plt.legend()
2882 if not reusing:
2883 logger.info('Some stability statistics will be displayed once you '+\
2884 'close the plot window')
2885 plt.show()
2886 else:
2887 fig_output_file = str(pjoin(output_path,
2888 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2889 logger.info('Stability plot output to file %s. '%fig_output_file)
2890 plt.savefig(fig_output_file)
2891 return res_str
2892 except Exception as e:
2893 if isinstance(e, ImportError):
2894 res_str += "\n= Install matplotlib to get a "+\
2895 "graphical display of the results of this check."
2896 else:
2897 res_str += "\n= Could not produce the stability plot because of "+\
2898 "the following error: %s"%str(e)
2899 return res_str
2900
2902 """Present the result of a timings check in a nice format """
2903
2904
2905 f = format_output
2906 loop_optimized_output = timings['loop_optimized_output']
2907 reduction_tool = bannermod.MadLoopParam._ID_reduction_tool_map[
2908 timings['reduction_tool']]
2909
2910 res_str = "%s \n"%process.nice_string()
2911 try:
2912 gen_total = timings['HELAS_MODEL_compilation']+\
2913 timings['HelasDiagrams_generation']+\
2914 timings['Process_output']+\
2915 timings['Diagrams_generation']+\
2916 timings['Process_compilation']+\
2917 timings['Initialization']
2918 except TypeError:
2919 gen_total = None
2920 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2921 res_str += "|= Diagrams generation....... %s\n"\
2922 %f(timings['Diagrams_generation'],'%.3gs')
2923 res_str += "|= Helas Diagrams generation. %s\n"\
2924 %f(timings['HelasDiagrams_generation'],'%.3gs')
2925 res_str += "|= Process output............ %s\n"\
2926 %f(timings['Process_output'],'%.3gs')
2927 res_str += "|= HELAS+model compilation... %s\n"\
2928 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2929 res_str += "|= Process compilation....... %s\n"\
2930 %f(timings['Process_compilation'],'%.3gs')
2931 res_str += "|= Initialization............ %s\n"\
2932 %f(timings['Initialization'],'%.3gs')
2933
2934 res_str += "\n= Reduction tool tested...... %s\n"%reduction_tool
2935 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
2936 %(timings['run_unpolarized_total']*1000.0)
2937 if loop_optimized_output:
2938 coef_time=timings['run_unpolarized_coefs']*1000.0
2939 loop_time=(timings['run_unpolarized_total']-\
2940 timings['run_unpolarized_coefs'])*1000.0
2941 total=coef_time+loop_time
2942 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2943 %(coef_time,int(round(100.0*coef_time/total)))
2944 res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
2945 %(loop_time,int(round(100.0*loop_time/total)))
2946 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
2947 %(timings['run_polarized_total']*1000.0)
2948 if loop_optimized_output:
2949 coef_time=timings['run_polarized_coefs']*1000.0
2950 loop_time=(timings['run_polarized_total']-\
2951 timings['run_polarized_coefs'])*1000.0
2952 total=coef_time+loop_time
2953 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2954 %(coef_time,int(round(100.0*coef_time/total)))
2955 res_str += "|= Loop evaluation time...... %.3gms (%d%%)\n"\
2956 %(loop_time,int(round(100.0*loop_time/total)))
2957 res_str += "\n= Miscellaneous ========================\n"
2958 res_str += "|= Number of hel. computed... %s/%s\n"\
2959 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2960 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2961 if loop_optimized_output:
2962 res_str += "|= Number of loop groups..... %s\n"\
2963 %f(timings['n_loop_groups'],'%d')
2964 res_str += "|= Number of loop wfs........ %s\n"\
2965 %f(timings['n_loop_wfs'],'%d')
2966 if timings['loop_wfs_ranks']!=None:
2967 for i, r in enumerate(timings['loop_wfs_ranks']):
2968 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2969 res_str += "|= Loading time (Color data). ~%.3gms\n"\
2970 %(timings['Booting_time']*1000.0)
2971 res_str += "|= Maximum RAM usage (rss)... %s\n"\
2972 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
2973 res_str += "\n= Output disk size =====================\n"
2974 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
2975 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
2976 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
2977 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
2978
2979 return res_str
2980
2982 """Present the results of a comparison in a nice list format
2983 mode short: return the number of fail process
2984 """
2985 proc_col_size = 17
2986 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
2987 if pert_coupl:
2988 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
2989 else:
2990 process_header = "Process"
2991
2992 if len(process_header) + 1 > proc_col_size:
2993 proc_col_size = len(process_header) + 1
2994
2995 for proc in comparison_results:
2996 if len(proc['process'].base_string()) + 1 > proc_col_size:
2997 proc_col_size = len(proc['process'].base_string()) + 1
2998
2999 col_size = 18
3000
3001 pass_proc = 0
3002 fail_proc = 0
3003 no_check_proc = 0
3004
3005 failed_proc_list = []
3006 no_check_proc_list = []
3007
3008 res_str = fixed_string_length(process_header, proc_col_size) + \
3009 fixed_string_length("Min element", col_size) + \
3010 fixed_string_length("Max element", col_size) + \
3011 fixed_string_length("Relative diff.", col_size) + \
3012 "Result"
3013
3014 for result in comparison_results:
3015 proc = result['process'].base_string()
3016 values = result['values']
3017
3018 if len(values) <= 1:
3019 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3020 " * No permutations, process not checked *"
3021 no_check_proc += 1
3022 no_check_proc_list.append(result['process'].nice_string())
3023 continue
3024
3025 passed = result['passed']
3026
3027 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3028 fixed_string_length("%1.10e" % min(values), col_size) + \
3029 fixed_string_length("%1.10e" % max(values), col_size) + \
3030 fixed_string_length("%1.10e" % result['difference'],
3031 col_size)
3032 if passed:
3033 pass_proc += 1
3034 res_str += "Passed"
3035 else:
3036 fail_proc += 1
3037 failed_proc_list.append(result['process'].nice_string())
3038 res_str += "Failed"
3039
3040 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3041 (pass_proc, pass_proc + fail_proc,
3042 fail_proc, pass_proc + fail_proc)
3043
3044 if fail_proc != 0:
3045 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3046 if no_check_proc != 0:
3047 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3048
3049 return res_str
3050
3052 """Helper function to fix the length of a string by cutting it
3053 or adding extra space."""
3054
3055 if len(mystr) > length:
3056 return mystr[0:length]
3057 else:
3058 return mystr + " " * (length - len(mystr))
3059
3060
3061
3062
3063
3064 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
3065 options=None, output_path=None, cmd = FakeInterface()):
3066 """Check gauge invariance of the processes by using the BRS check.
3067 For one of the massless external bosons (e.g. gluon or photon),
3068 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
3069 """
3070 cmass_scheme = cmd.options['complex_mass_scheme']
3071 if isinstance(processes, base_objects.ProcessDefinition):
3072
3073
3074 multiprocess = processes
3075
3076 model = multiprocess.get('model')
3077
3078 if multiprocess.get('perturbation_couplings')==[]:
3079 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
3080 auth_skipping = True, reuse = False)
3081 else:
3082 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3083 cmd=cmd,model=model, param_card=param_card,
3084 auth_skipping = False, reuse = False,
3085 output_path=output_path)
3086
3087 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
3088
3089 logger.info('Set All width to zero for non complex mass scheme checks')
3090 for particle in evaluator.full_model.get('particles'):
3091 if particle.get('width') != 'ZERO':
3092 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3093 results = run_multiprocs_no_crossings(check_gauge_process,
3094 multiprocess,
3095 evaluator,
3096 options=options
3097 )
3098
3099 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3100
3101 clean_up(output_path)
3102
3103 return results
3104
3105 elif isinstance(processes, base_objects.Process):
3106 processes = base_objects.ProcessList([processes])
3107 elif isinstance(processes, base_objects.ProcessList):
3108 pass
3109 else:
3110 raise InvalidCmd("processes is of non-supported format")
3111
3112 assert processes, "No processes given"
3113
3114 model = processes[0].get('model')
3115
3116
3117 if processes[0].get('perturbation_couplings')==[]:
3118 evaluator = MatrixElementEvaluator(model, param_card,
3119 auth_skipping = True, reuse = False,
3120 cmd = cmd)
3121 else:
3122 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3123 model=model, param_card=param_card,
3124 auth_skipping = False, reuse = False,
3125 output_path=output_path, cmd = cmd)
3126 comparison_results = []
3127 comparison_explicit_flip = []
3128
3129
3130 for process in processes:
3131
3132
3133
3134
3135
3136
3137
3138
3139
3140 result = check_gauge_process(process, evaluator,options=options)
3141 if result:
3142 comparison_results.append(result)
3143
3144 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3145
3146 clean_up(output_path)
3147
3148 return comparison_results
3149
3152 """Check gauge invariance for the process, unless it is already done."""
3153
3154 model = process.get('model')
3155
3156
3157 found_gauge = False
3158 for i, leg in enumerate(process.get('legs')):
3159 part = model.get_particle(leg.get('id'))
3160 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3161 found_gauge = True
3162 break
3163 if not found_gauge:
3164 logger.info("No ward identity for %s" % \
3165 process.nice_string().replace('Process', 'process'))
3166
3167 return None
3168
3169 for i, leg in enumerate(process.get('legs')):
3170 leg.set('number', i+1)
3171
3172 logger.info("Checking ward identities for %s" % \
3173 process.nice_string().replace('Process', 'process'))
3174
3175 legs = process.get('legs')
3176
3177
3178 try:
3179 if process.get('perturbation_couplings')==[]:
3180 amplitude = diagram_generation.Amplitude(process)
3181 else:
3182 amplitude = loop_diagram_generation.LoopAmplitude(process)
3183 except InvalidCmd:
3184 logging.info("No diagrams for %s" % \
3185 process.nice_string().replace('Process', 'process'))
3186 return None
3187 if not amplitude.get('diagrams'):
3188
3189 logging.info("No diagrams for %s" % \
3190 process.nice_string().replace('Process', 'process'))
3191 return None
3192
3193 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3194 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3195 gen_color = False)
3196 else:
3197 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3198 optimized_output=evaluator.loop_optimized_output)
3199
3200
3201
3202
3203
3204
3205
3206
3207 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3208 output='jamp', options=options)
3209
3210 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3211 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3212 gen_color = False)
3213
3214 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3215 output='jamp', options=options)
3216
3217 if mvalue and mvalue['m2']:
3218 return {'process':process,'value':mvalue,'brs':brsvalue}
3219
3221 """Present the results of a comparison in a nice list format"""
3222
3223 proc_col_size = 17
3224
3225 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3226
3227
3228 if pert_coupl:
3229 threshold=1e-5
3230 else:
3231 threshold=1e-10
3232
3233 if pert_coupl:
3234 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3235 else:
3236 process_header = "Process"
3237
3238 if len(process_header) + 1 > proc_col_size:
3239 proc_col_size = len(process_header) + 1
3240
3241 for one_comp in comparison_results:
3242 proc = one_comp['process'].base_string()
3243 mvalue = one_comp['value']
3244 brsvalue = one_comp['brs']
3245 if len(proc) + 1 > proc_col_size:
3246 proc_col_size = len(proc) + 1
3247
3248 col_size = 18
3249
3250 pass_proc = 0
3251 fail_proc = 0
3252
3253 failed_proc_list = []
3254 no_check_proc_list = []
3255
3256 res_str = fixed_string_length(process_header, proc_col_size) + \
3257 fixed_string_length("matrix", col_size) + \
3258 fixed_string_length("BRS", col_size) + \
3259 fixed_string_length("ratio", col_size) + \
3260 "Result"
3261
3262 for one_comp in comparison_results:
3263 proc = one_comp['process'].base_string()
3264 mvalue = one_comp['value']
3265 brsvalue = one_comp['brs']
3266 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3267 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3268 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3269 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3270 fixed_string_length("%1.10e" % ratio, col_size)
3271
3272 if ratio > threshold:
3273 fail_proc += 1
3274 proc_succeed = False
3275 failed_proc_list.append(proc)
3276 res_str += "Failed"
3277 else:
3278 pass_proc += 1
3279 proc_succeed = True
3280 res_str += "Passed"
3281
3282
3283
3284
3285
3286 if len(mvalue['jamp'])!=0:
3287 for k in range(len(mvalue['jamp'][0])):
3288 m_sum = 0
3289 brs_sum = 0
3290
3291 for j in range(len(mvalue['jamp'])):
3292
3293 m_sum += abs(mvalue['jamp'][j][k])**2
3294 brs_sum += abs(brsvalue['jamp'][j][k])**2
3295
3296
3297 if not m_sum:
3298 continue
3299 ratio = abs(brs_sum) / abs(m_sum)
3300
3301 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3302 fixed_string_length("%1.10e" % m_sum, col_size) + \
3303 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3304 fixed_string_length("%1.10e" % ratio, col_size)
3305
3306 if ratio > 1e-15:
3307 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3308 fail_proc += 1
3309 pass_proc -= 1
3310 failed_proc_list.append(proc)
3311 res_str += tmp_str + "Failed"
3312 elif not proc_succeed:
3313 res_str += tmp_str + "Passed"
3314
3315
3316 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3317 (pass_proc, pass_proc + fail_proc,
3318 fail_proc, pass_proc + fail_proc)
3319
3320 if fail_proc != 0:
3321 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3322
3323 if output=='text':
3324 return res_str
3325 else:
3326 return fail_proc
3327
3328
3329
3330 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3331 reuse = False, output_path=None, cmd = FakeInterface()):
3332 """ Check if the square matrix element (sum over helicity) is lorentz
3333 invariant by boosting the momenta with different value."""
3334
3335 cmass_scheme = cmd.options['complex_mass_scheme']
3336 if isinstance(processes, base_objects.ProcessDefinition):
3337
3338
3339 multiprocess = processes
3340 model = multiprocess.get('model')
3341
3342 if multiprocess.get('perturbation_couplings')==[]:
3343 evaluator = MatrixElementEvaluator(model,
3344 cmd= cmd, auth_skipping = False, reuse = True)
3345 else:
3346 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3347 model=model, auth_skipping = False, reuse = True,
3348 output_path=output_path, cmd = cmd)
3349
3350 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3351
3352 logger.info('Set All width to zero for non complex mass scheme checks')
3353 for particle in evaluator.full_model.get('particles'):
3354 if particle.get('width') != 'ZERO':
3355 evaluator.full_model.get('parameter_dict')[\
3356 particle.get('width')] = 0.
3357
3358 results = run_multiprocs_no_crossings(check_lorentz_process,
3359 multiprocess,
3360 evaluator,
3361 options=options)
3362
3363 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3364
3365 clean_up(output_path)
3366
3367 return results
3368
3369 elif isinstance(processes, base_objects.Process):
3370 processes = base_objects.ProcessList([processes])
3371 elif isinstance(processes, base_objects.ProcessList):
3372 pass
3373 else:
3374 raise InvalidCmd("processes is of non-supported format")
3375
3376 assert processes, "No processes given"
3377
3378 model = processes[0].get('model')
3379
3380
3381 if processes[0].get('perturbation_couplings')==[]:
3382 evaluator = MatrixElementEvaluator(model, param_card,
3383 auth_skipping = False, reuse = True,
3384 cmd=cmd)
3385 else:
3386 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3387 model=model,param_card=param_card,
3388 auth_skipping = False, reuse = True,
3389 output_path=output_path, cmd = cmd)
3390
3391 comparison_results = []
3392
3393
3394 for process in processes:
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404 result = check_lorentz_process(process, evaluator,options=options)
3405 if result:
3406 comparison_results.append(result)
3407
3408 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3409
3410 clean_up(output_path)
3411
3412 return comparison_results
3413
3416 """Check gauge invariance for the process, unless it is already done."""
3417
3418 amp_results = []
3419 model = process.get('model')
3420
3421 for i, leg in enumerate(process.get('legs')):
3422 leg.set('number', i+1)
3423
3424 logger.info("Checking lorentz transformations for %s" % \
3425 process.nice_string().replace('Process:', 'process'))
3426
3427 legs = process.get('legs')
3428
3429
3430 try:
3431 if process.get('perturbation_couplings')==[]:
3432 amplitude = diagram_generation.Amplitude(process)
3433 else:
3434 amplitude = loop_diagram_generation.LoopAmplitude(process)
3435 except InvalidCmd:
3436 logging.info("No diagrams for %s" % \
3437 process.nice_string().replace('Process', 'process'))
3438 return None
3439
3440 if not amplitude.get('diagrams'):
3441
3442 logging.info("No diagrams for %s" % \
3443 process.nice_string().replace('Process', 'process'))
3444 return None
3445
3446
3447 p, w_rambo = evaluator.get_momenta(process, options)
3448
3449
3450 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3451 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3452 gen_color = True)
3453 else:
3454 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3455 optimized_output = evaluator.loop_optimized_output)
3456
3457 MLOptions = {'ImprovePS':True,'ForceMP':True}
3458 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3459 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3460 auth_skipping = True, options=options)
3461 else:
3462 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3463 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3464 options = options)
3465
3466 if data and data['m2']:
3467 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3468 results = [data]
3469 else:
3470 results = [('Original evaluation',data)]
3471 else:
3472 return {'process':process, 'results':'pass'}
3473
3474
3475
3476
3477 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3478 for boost in range(1,4):
3479 boost_p = boost_momenta(p, boost)
3480 results.append(evaluator.evaluate_matrix_element(matrix_element,
3481 p=boost_p,output='jamp'))
3482 else:
3483
3484 boost_p = boost_momenta(p, 3)
3485 results.append(('Z-axis boost',
3486 evaluator.evaluate_matrix_element(matrix_element, options=options,
3487 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3488
3489
3490
3491
3492 if not options['events']:
3493 boost_p = boost_momenta(p, 1)
3494 results.append(('X-axis boost',
3495 evaluator.evaluate_matrix_element(matrix_element, options=options,
3496 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3497 boost_p = boost_momenta(p, 2)
3498 results.append(('Y-axis boost',
3499 evaluator.evaluate_matrix_element(matrix_element,options=options,
3500 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3501
3502
3503 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3504 results.append(('Z-axis pi/2 rotation',
3505 evaluator.evaluate_matrix_element(matrix_element,options=options,
3506 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3507
3508 sq2 = math.sqrt(2.0)
3509 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3510 results.append(('Z-axis pi/4 rotation',
3511 evaluator.evaluate_matrix_element(matrix_element,options=options,
3512 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3513
3514
3515 return {'process': process, 'results': results}
3516
3517
3518
3519
3520 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3521 options=None, tir={}, output_path=None,
3522 cuttools="", reuse=False, cmd = FakeInterface()):
3523 """Check gauge invariance of the processes by flipping
3524 the gauge of the model
3525 """
3526
3527 mg_root = cmd._mgme_dir
3528
3529 cmass_scheme = cmd.options['complex_mass_scheme']
3530
3531 if isinstance(processes_unit, base_objects.ProcessDefinition):
3532
3533
3534 multiprocess_unit = processes_unit
3535 model = multiprocess_unit.get('model')
3536
3537
3538
3539 loop_optimized_bu = cmd.options['loop_optimized_output']
3540 if processes_unit.get('squared_orders'):
3541 if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
3542 cmd.options['loop_optimized_output'] = True
3543 else:
3544 raise InvalidCmd("The gauge test cannot be performed for "+
3545 " a process with more than QCD corrections and which"+
3546 " specifies squared order constraints.")
3547 else:
3548 cmd.options['loop_optimized_output'] = False
3549
3550 aloha.unitary_gauge = True
3551 if processes_unit.get('perturbation_couplings')==[]:
3552 evaluator = MatrixElementEvaluator(model, param_card,
3553 cmd=cmd,auth_skipping = False, reuse = True)
3554 else:
3555 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3556 cmd=cmd, model=model,
3557 param_card=param_card,
3558 auth_skipping = False,
3559 output_path=output_path,
3560 reuse = False)
3561 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3562 logger.info('Set All width to zero for non complex mass scheme checks')
3563 for particle in evaluator.full_model.get('particles'):
3564 if particle.get('width') != 'ZERO':
3565 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3566
3567 output_u = run_multiprocs_no_crossings(get_value,
3568 multiprocess_unit,
3569 evaluator,
3570 options=options)
3571
3572 clean_added_globals(ADDED_GLOBAL)
3573
3574 if processes_unit.get('perturbation_couplings')!=[]:
3575 clean_up(output_path)
3576
3577 momentum = {}
3578 for data in output_u:
3579 momentum[data['process']] = data['p']
3580
3581 multiprocess_feynm = processes_feynm
3582 model = multiprocess_feynm.get('model')
3583
3584
3585 aloha.unitary_gauge = False
3586
3587
3588 cmd.options['loop_optimized_output'] = True
3589 if processes_feynm.get('perturbation_couplings')==[]:
3590 evaluator = MatrixElementEvaluator(model, param_card,
3591 cmd= cmd, auth_skipping = False, reuse = False)
3592 else:
3593 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3594 cmd= cmd, model=model,
3595 param_card=param_card,
3596 auth_skipping = False,
3597 output_path=output_path,
3598 reuse = False)
3599
3600 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3601
3602 for particle in evaluator.full_model.get('particles'):
3603 if particle.get('width') != 'ZERO':
3604 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3605
3606 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3607 evaluator, momentum,
3608 options=options)
3609 output = [processes_unit]
3610 for data in output_f:
3611 local_dico = {}
3612 local_dico['process'] = data['process']
3613 local_dico['value_feynm'] = data['value']
3614 local_dico['value_unit'] = [d['value'] for d in output_u
3615 if d['process'] == data['process']][0]
3616 output.append(local_dico)
3617
3618 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3619
3620 clean_up(output_path)
3621
3622
3623 cmd.options['loop_optimized_output'] = loop_optimized_bu
3624
3625 return output
3626
3627
3628
3629
3630 else:
3631 raise InvalidCmd("processes is of non-supported format")
3632
3638 """Check complex mass scheme consistency in the offshell region of s-channels
3639 detected for this process, by varying the expansion paramer consistently
3640 with the corresponding width and making sure that the difference between
3641 the complex mass-scheme and the narrow-width approximation is higher order.
3642 """
3643
3644 if not isinstance(process_line, str):
3645 raise InvalidCmd("Proces definition must be given as a stirng for this check")
3646
3647
3648 cmd.do_set('complex_mass_scheme False', log=False)
3649
3650 multiprocess_nwa = cmd.extract_process(process_line)
3651
3652
3653 has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
3654 'decays.py'))
3655
3656
3657 missing_perturbations = cmd._curr_model.get_coupling_orders()-\
3658 set(multiprocess_nwa.get('perturbation_couplings'))
3659
3660 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3661 len(missing_perturbations)>0:
3662 logger.warning("------------------------------------------------------")
3663 logger.warning("The process considered does not specify the following "+
3664 "type of loops to be included : %s"%str(list(missing_perturbations)))
3665 logger.warning("Consequently, the CMS check will be unsuccessful if the"+
3666 " process involves any resonating particle whose LO decay is "+
3667 "mediated by one of these orders.")
3668 logger.warning("You can use the syntax '[virt=all]' to automatically"+
3669 " include all loops supported by the model.")
3670 logger.warning("------------------------------------------------------")
3671
3672 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3673 len(multiprocess_nwa.get('legs'))<=4:
3674 logger.warning("------------------------------------------------------")
3675 logger.warning("Processes with four or less external states are typically not"+\
3676 " sensitive to incorrect Complex Mass Scheme implementations.")
3677 logger.warning("You can test this sensitivity by making sure that the"+
3678 " same check on the leading-order counterpart of this process *fails*"+
3679 " when using the option '--diff_lambda_power=2'.")
3680 logger.warning("If it does not, then consider adding a massless "+
3681 "gauge vector to the external states.")
3682 logger.warning("------------------------------------------------------")
3683
3684 if options['recompute_width']=='auto':
3685 if multiprocess_nwa.get('perturbation_couplings')!=[]:
3686
3687 options['recompute_width'] = 'first_time'
3688 else:
3689 options['recompute_width'] = 'never'
3690
3691
3692 if options['recompute_width'] in ['first_time', 'always'] and \
3693 not has_FRdecay and not 'cached_widths' in options:
3694 logger.info('The LO widths will need to be recomputed but the '+
3695 'model considered does not appear to have a decay module.\nThe widths'+
3696 ' will need to be computed numerically and it will slow down the test.\n'+
3697 'Consider using a param_card already specifying correct LO widths and'+
3698 " adding the option --recompute_width=never when doing this check.")
3699
3700 if options['recompute_width']=='never' and \
3701 any(order in multiprocess_nwa.get('perturbation_couplings') for order in
3702 options['expansion_orders']):
3703 logger.warning('You chose not to recompute the widths while including'+
3704 ' loop corrections. The check will be successful only if the width'+\
3705 ' specified in the default param_card is LO accurate (Remember that'+\
3706 ' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
3707 ' respectively by default).')
3708
3709
3710
3711
3712
3713 if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
3714 modelname = cmd._curr_model.get('modelpath+restriction')
3715 with misc.MuteLogger(['madgraph'], ['INFO']):
3716 model = import_ufo.import_model(modelname, decay=True,
3717 complex_mass_scheme=False)
3718 multiprocess_nwa.set('model', model)
3719
3720 run_options = copy.deepcopy(options)
3721
3722
3723 if options['seed'] > 0:
3724 random.seed(options['seed'])
3725
3726
3727 run_options['param_card'] = param_card
3728 if isinstance(cmd, FakeInterface):
3729 raise MadGraph5Error("Check CMS cannot be run with a FakeInterface.")
3730 run_options['cmd'] = cmd
3731 run_options['MLOptions'] = MLOptions
3732 if output_path:
3733 run_options['output_path'] = output_path
3734 else:
3735 run_options['output_path'] = cmd._mgme_dir
3736
3737
3738 run_options['has_FRdecay'] = has_FRdecay
3739
3740
3741 if 'cached_widths' not in run_options:
3742 run_options['cached_widths'] = {}
3743
3744
3745 run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
3746
3747 if options['tweak']['name']:
3748 logger.info("Now running the CMS check for tweak '%s'"\
3749 %options['tweak']['name'])
3750
3751 model = multiprocess_nwa.get('model')
3752
3753 for particle in model.get('particles'):
3754 mass_param = model.get_parameter(particle.get('mass'))
3755 if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
3756 if model.get('name') not in ['sm','loop_sm']:
3757 logger.warning("The mass '%s' of particle '%s' is not an external"%\
3758 (model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
3759 " parameter as required by this check. \nMG5_aMC will try to"+\
3760 " modify the model to remedy the situation. No guarantee.")
3761 status = model.change_electroweak_mode(set(['mz','mw','alpha']))
3762 if not status:
3763 raise InvalidCmd('The EW scheme could apparently not be changed'+\
3764 ' so as to have the W-boson mass external. The check cannot'+\
3765 ' proceed.')
3766 break
3767
3768 veto_orders = [order for order in model.get('coupling_orders') if \
3769 order not in options['expansion_orders']]
3770 if len(veto_orders)>0:
3771 logger.warning('You did not define any parameter scaling rule for the'+\
3772 " coupling orders %s. They will be "%','.join(veto_orders)+\
3773 "forced to zero in the tests. Consider adding the scaling rule to"+\
3774 "avoid this. (see option '--cms' in 'help check')")
3775 for order in veto_orders:
3776 multiprocess_nwa.get('orders')[order]==0
3777 multiprocess_nwa.set('perturbation_couplings', [order for order in
3778 multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
3779
3780 if multiprocess_nwa.get('perturbation_couplings')==[]:
3781 evaluator = MatrixElementEvaluator(model, param_card,
3782 cmd=cmd,auth_skipping = False, reuse = True)
3783 else:
3784 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3785 cmd=cmd, model=model,
3786 param_card=param_card,
3787 auth_skipping = False,
3788 output_path=output_path,
3789 reuse = False)
3790
3791 cached_information = []
3792 output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3793 multiprocess_nwa,
3794 evaluator,
3795
3796
3797
3798
3799
3800 opt = cached_information,
3801 options=run_options)
3802
3803
3804 clean_added_globals(ADDED_GLOBAL)
3805
3806
3807 cmd.do_set('complex_mass_scheme True', log=False)
3808
3809
3810 multiprocess_cms = cmd.extract_process(process_line)
3811 model = multiprocess_cms.get('model')
3812
3813 if len(veto_orders)>0:
3814 for order in veto_orders:
3815 multiprocess_cms.get('orders')[order]==0
3816 multiprocess_cms.set('perturbation_couplings', [order for order in
3817 multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
3818
3819 if multiprocess_cms.get('perturbation_couplings')==[]:
3820 evaluator = MatrixElementEvaluator(model, param_card,
3821 cmd=cmd,auth_skipping = False, reuse = True)
3822 else:
3823 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3824 cmd=cmd, model=model,
3825 param_card=param_card,
3826 auth_skipping = False,
3827 output_path=output_path,
3828 reuse = False)
3829
3830 output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3831 multiprocess_cms,
3832 evaluator,
3833
3834 opt = dict(cached_information),
3835 options=run_options)
3836
3837 if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
3838
3839 clean_up(output_path)
3840
3841
3842
3843
3844 result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
3845
3846 result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
3847 for i, proc_res in enumerate(output_nwa):
3848 result['ordered_processes'].append(proc_res[0])
3849 result[proc_res[0]] = {
3850 'NWA':proc_res[1]['resonances_result'],
3851 'CMS':output_cms[i][1]['resonances_result'],
3852 'born_order':proc_res[1]['born_order'],
3853 'loop_order':proc_res[1]['loop_order']}
3854
3855
3856
3857 options['cached_widths'] = run_options['cached_widths']
3858
3859
3860 result['recompute_width'] = options['recompute_width']
3861 result['has_FRdecay'] = has_FRdecay
3862 result['widths_computed'] = []
3863 cached_widths = sorted(list(options['cached_widths'].items()), key=lambda el: \
3864 abs(el[0][0]))
3865 for (pdg, lambda_value), width in cached_widths:
3866 if lambda_value != 1.0:
3867 continue
3868 result['widths_computed'].append((model.get_particle(pdg).get_name(),
3869 width))
3870
3871
3872 clean_added_globals(ADDED_GLOBAL)
3873
3874 return result
3875
3880 """Check CMS for the process in argument. The options 'opt' is quite important.
3881 When opt is a list, it means that we are doing NWA and we are filling the
3882 list with the following tuple
3883 ('proc_name',({'ParticlePDG':ParticlePDG,
3884 'FinalStateMothersNumbers':set([]),
3885 'PS_point_used':[]},...))
3886 When opt is a dictionary, we are in the CMS mode and it will be reused then.
3887 """
3888
3889
3890
3891 NLO = process.get('perturbation_couplings') != []
3892
3893 def glue_momenta(production, decay):
3894 """ Merge together the kinematics for the production of particle
3895 positioned last in the 'production' array with the 1>N 'decay' kinematic'
3896 provided where the decay particle is first."""
3897
3898 from MadSpin.decay import momentum
3899
3900 full = production[:-1]
3901
3902
3903
3904
3905
3906 for p in decay[1:]:
3907 bp = momentum(*p).boost(momentum(*production[-1]))
3908 full.append([bp.E,bp.px,bp.py,bp.pz])
3909
3910 return full
3911
3912 def find_resonances(diagrams):
3913 """ Find all the resonances in the matrix element in argument """
3914
3915 model = process['model']
3916 resonances_found = []
3917
3918 for ll, diag in enumerate(diagrams):
3919 for amp in diag.get('amplitudes'):
3920
3921
3922 s_channels, t_channels = amp.\
3923 get_s_and_t_channels(process.get_ninitial(), model, 0)
3924
3925
3926 replacement_dict = {}
3927 for s_channel in s_channels:
3928 new_resonance = {
3929 'ParticlePDG':s_channel.get('legs')[-1].get('id'),
3930 'FSMothersNumbers':[],
3931 'PS_point_used':[]}
3932 for leg in s_channel.get('legs')[:-1]:
3933 if leg.get('number')>0:
3934 new_resonance['FSMothersNumbers'].append(
3935 leg.get('number'))
3936 else:
3937 try:
3938 new_resonance['FSMothersNumbers'].extend(
3939 replacement_dict[leg.get('number')])
3940 except KeyError:
3941 raise Exception('The following diagram '+\
3942 'is malformed:'+diag.nice_string())
3943
3944 replacement_dict[s_channel.get('legs')[-1].get('number')] = \
3945 new_resonance['FSMothersNumbers']
3946 new_resonance['FSMothersNumbers'] = set(
3947 new_resonance['FSMothersNumbers'])
3948 if new_resonance not in resonances_found:
3949 resonances_found.append(new_resonance)
3950
3951
3952 kept_resonances = []
3953 for resonance in resonances_found:
3954
3955 if resonance['ParticlePDG'] == 0:
3956 continue
3957
3958
3959 if abs(resonance['ParticlePDG']) in \
3960 [abs(l.get('id')) for l in process.get('legs')]:
3961 continue
3962
3963 mass_string = evaluator.full_model.get_particle(
3964 resonance['ParticlePDG']).get('mass')
3965 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
3966
3967 if mass==0.0:
3968 continue
3969
3970 width_string = evaluator.full_model.get_particle(
3971 resonance['ParticlePDG']).get('width')
3972 width = evaluator.full_model.get('parameter_dict')[width_string].real
3973
3974
3975 if width==0.0:
3976 continue
3977
3978 final_state_energy = sum(
3979 evaluator.full_model.get('parameter_dict')[
3980 evaluator.full_model.get_particle(l.get('id')).get('mass')].real
3981 for l in process.get('legs') if l.get('number') in
3982 resonance['FSMothersNumbers'])
3983
3984
3985 special_mass = (1.0 + options['offshellness'])*mass
3986
3987
3988 if special_mass<final_state_energy:
3989 raise InvalidCmd('The offshellness specified (%s) is such'\
3990 %options['offshellness']+' that the resulting kinematic is '+\
3991 'impossible for resonance %s %s.'%(evaluator.full_model.
3992 get_particle(resonance['ParticlePDG']).get_name(),
3993 str(list(resonance['FSMothersNumbers']))))
3994 continue
3995
3996
3997 kept_resonances.append(resonance)
3998
3999 for resonance in kept_resonances:
4000
4001 set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
4002
4003 return tuple(kept_resonances)
4004
4005 def set_PSpoint(resonance, force_other_res_offshell=[],
4006 allow_energy_increase=1.5, isolation_cuts=True):
4007 """ Starting from the specified resonance, construct a phase space point
4008 for it and possibly also enforce other resonances to be onshell. Possibly
4009 allow to progressively increase enregy by steps of the integer specified
4010 (negative float to forbid it) and possible enforce default isolation cuts
4011 as well."""
4012
4013 def invmass(momenta):
4014 """ Computes the invariant mass of a list of momenta."""
4015 ptot = [sum(p[i] for p in momenta) for i in range(4)]
4016 return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
4017
4018 model = evaluator.full_model
4019 def getmass(pdg):
4020 """ Returns the mass of a particle given the current model and its
4021 pdg given in argument."""
4022 return model.get('parameter_dict')[
4023 model.get_particle(pdg).get('mass')].real
4024
4025 N_trials = 0
4026 max_trial = 1e4
4027 nstep_for_energy_increase = 1e3
4028 PS_point_found = None
4029 if options['offshellness'] > 0.0:
4030 offshellness = options['offshellness']
4031 else:
4032
4033
4034
4035
4036 offshellness = (0.25*(options['offshellness']+1.0))-1.0
4037
4038
4039
4040
4041 if options['offshellness'] < 0.0:
4042 energy_increase = math.sqrt(allow_energy_increase)
4043 else:
4044 energy_increase = allow_energy_increase
4045
4046 other_res_offshell = [res for res in force_other_res_offshell if
4047 res!=resonance]
4048
4049
4050
4051 all_other_res_masses = [getmass(res['ParticlePDG'])
4052 for res in other_res_offshell]
4053 resonance_mass = getmass(resonance['ParticlePDG'])
4054
4055 str_res = '%s %s'%(model.get_particle(
4056 resonance['ParticlePDG']).get_name(),
4057 str(list(resonance['FSMothersNumbers'])))
4058 leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
4059
4060
4061
4062 daughter_masses = sum(getmass(leg_number_to_leg[\
4063 number].get('id')) for number in resonance['FSMothersNumbers'])
4064 min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
4065
4066
4067
4068 min_energy = max(sum(getmass(l.get('id')) for l in \
4069 process.get('legs') if l.get('state')==True),
4070 sum(getmass(l.get('id')) for l in \
4071 process.get('legs') if l.get('state')==False))
4072
4073
4074
4075 daughter_offshellnesses = [(1.0+options['offshellness'])*mass
4076 for i, mass in enumerate(all_other_res_masses) if
4077 other_res_offshell[i]['FSMothersNumbers'].issubset(
4078 resonance['FSMothersNumbers'])]
4079
4080 if options['offshellness'] >= 0.0:
4081
4082 if len(daughter_offshellnesses)>0:
4083 max_mass = max(daughter_offshellnesses)
4084
4085 offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
4086 options['offshellness'])
4087
4088 max_mass = max([(1.0+options['offshellness'])*mass for mass in \
4089 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4090
4091
4092
4093 target = max(min_energy*1.2,max_mass*2.0)
4094 if target > options['energy']:
4095 logger.warning("The user-defined energy %f seems "%options['energy']+
4096 " insufficient to reach the minimum propagator invariant mass "+
4097 "%f required for the chosen offshellness %f."%(max_mass,
4098 options['offshellness']) + " Energy reset to %f."%target)
4099 options['energy'] = target
4100
4101 else:
4102 if len(daughter_offshellnesses) > 0:
4103 min_mass = min(daughter_offshellnesses)
4104
4105 offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
4106 options['offshellness'])
4107
4108
4109
4110 if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
4111 msg = 'The resonance %s cannot accomodate'%str_res+\
4112 ' an offshellness of %f because the daughter'%options['offshellness']+\
4113 ' masses are %f.'%daughter_masses
4114 if options['offshellness']<min_offshellnes:
4115 msg += ' Try again with an offshellness'+\
4116 ' smaller (in absolute value) of at least %f.'%min_offshellnes
4117 else:
4118 msg += ' Try again with a smalled offshellness (in absolute value).'
4119 raise InvalidCmd(msg)
4120
4121 min_mass = min([(1.0+options['offshellness'])*mass for mass in \
4122 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4123
4124
4125 if 2.0*min_mass < options['energy']:
4126 new_energy = max(min_energy*1.2, 2.0*min_mass)
4127 logger.warning("The user-defined energy %f seems "%options['energy']+
4128 " too large to not overshoot the maximum propagator invariant mass "+
4129 "%f required for the chosen offshellness %f."%(min_mass,
4130 options['offshellness']) + " Energy reset to %f."%new_energy)
4131 options['energy'] = new_energy
4132
4133 if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
4134 logger.debug("The target energy is not compatible with the mass"+
4135 " of the external states for this process (%f). It is "%min_mass+
4136 "unlikely that a valid kinematic configuration will be found.")
4137
4138 if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
4139 options['offshellness']>0.0 and offshellness>options['offshellness']:
4140 logger.debug("Offshellness increased to %f"%offshellness+
4141 " so as to try to find a kinematical configuration with"+
4142 " offshellness at least equal to %f"%options['offshellness']+
4143 " for all resonances.")
4144
4145 start_energy = options['energy']
4146 while N_trials<max_trial:
4147 N_trials += 1
4148 if N_trials%nstep_for_energy_increase==0:
4149 if allow_energy_increase > 0.0:
4150 old_offshellness = offshellness
4151 if offshellness > 0.0:
4152 options['energy'] *= energy_increase
4153 offshellness *= energy_increase
4154 else:
4155 options['energy'] = max(options['energy']/energy_increase,
4156 min_energy*1.2)
4157 offshellness = max(min_offshellnes,
4158 ((offshellness+1.0)/energy_increase)-1.0)
4159 if old_offshellness!=offshellness:
4160 logger.debug('Trying to find a valid kinematic'+\
4161 " configuration for resonance '%s'"%str_res+\
4162 ' with increased offshellness %f'%offshellness)
4163
4164 candidate = get_PSpoint_for_resonance(resonance, offshellness)
4165 pass_offshell_test = True
4166 for i, res in enumerate(other_res_offshell):
4167
4168 if offshellness > 0.0:
4169 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
4170 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4171 pass_offshell_test = False
4172 break
4173 else:
4174 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
4175 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4176 pass_offshell_test = False
4177 break
4178 if not pass_offshell_test:
4179 continue
4180
4181 if isolation_cuts:
4182
4183 if not evaluator.pass_isolation_cuts(candidate,
4184 ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
4185 continue
4186 PS_point_found = candidate
4187 break
4188
4189
4190 options['energy'] = start_energy
4191
4192 if PS_point_found is None:
4193 err_msg = 'Could not find a valid PS point in %d'%max_trial+\
4194 ' trials. Try increasing the energy, modify the offshellness '+\
4195 'or relax some constraints.'
4196 if options['offshellness']<0.0:
4197 err_msg +='Try with a positive offshellness instead (or a '+\
4198 'negative one of smaller absolute value)'
4199 raise InvalidCmd(err_msg)
4200 else:
4201
4202
4203 resonance['offshellnesses'] = []
4204 all_other_res_masses = [resonance_mass] + all_other_res_masses
4205 other_res_offshell = [resonance] + other_res_offshell
4206 for i, res in enumerate(other_res_offshell):
4207 if i==0:
4208 res_str = 'self'
4209 else:
4210 res_str = '%s %s'%(model.get_particle(
4211 res['ParticlePDG']).get_name(),
4212 str(list(res['FSMothersNumbers'])))
4213 resonance['offshellnesses'].append((res_str,(
4214 (invmass([PS_point_found[j-1] for j in
4215 res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
4216
4217 resonance['PS_point_used'] = PS_point_found
4218
4219 def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
4220 """ Assigns a kinematic configuration to the resonance dictionary
4221 given in argument."""
4222
4223
4224 mass_string = evaluator.full_model.get_particle(
4225 resonance['ParticlePDG']).get('mass')
4226 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
4227
4228
4229 special_mass = (1.0 + offshellness)*mass
4230
4231
4232 prod_proc = base_objects.Process({'legs':base_objects.LegList(
4233 copy.copy(leg) for leg in process.get('legs') if
4234 leg.get('number') not in resonance['FSMothersNumbers'])})
4235
4236
4237
4238 prod_proc.get('legs').append(base_objects.Leg({
4239 'number':max(l.get('number') for l in process.get('legs'))+1,
4240 'state':True,
4241 'id':0}))
4242
4243 decay_proc = base_objects.Process({'legs':base_objects.LegList(
4244 copy.copy(leg) for leg in process.get('legs') if leg.get('number')
4245 in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
4246
4247
4248
4249
4250 decay_proc.get('legs').insert(0,base_objects.Leg({
4251 'number':-1,
4252 'state':False,
4253 'id':0}))
4254 prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
4255 special_mass=special_mass)[0]
4256 decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
4257 special_mass=special_mass)[0]
4258 momenta = glue_momenta(prod_kinematic,decay_kinematic)
4259
4260
4261
4262 ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
4263 for i in range(len(prod_proc.get('legs'))-1)]
4264
4265 ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
4266 momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
4267
4268
4269 return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
4270
4271
4272
4273 @misc.mute_logger()
4274 def get_width(PDG, lambdaCMS, param_card):
4275 """ Returns the width to use for particle with absolute PDG 'PDG' and
4276 for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
4277
4278
4279
4280 if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
4281 return 0.0
4282
4283 particle = evaluator.full_model.get_particle(PDG)
4284
4285
4286
4287 if particle.get('ghost') or particle.get('goldstone'):
4288 return 0.0
4289
4290
4291 if particle.get('width')=='ZERO':
4292 return 0.0
4293
4294 if (PDG,lambdaCMS) in options['cached_widths']:
4295 return options['cached_widths'][(PDG,lambdaCMS)]
4296
4297 if options['recompute_width'] == 'never':
4298 width = evaluator.full_model.\
4299 get('parameter_dict')[particle.get('width')].real
4300 else:
4301
4302 if aloha.complex_mass:
4303 raise MadGraph5Error("The width for particle with PDG %d and"%PDG+\
4304 " lambdaCMS=%f should have already been "%lambdaCMS+\
4305 "computed during the NWA run.")
4306
4307
4308 if options['recompute_width'] in ['always','first_time']:
4309 particle_name = particle.get_name()
4310 with misc.TMP_directory(dir=options['output_path']) as path:
4311 param_card.write(pjoin(path,'tmp.dat'))
4312
4313
4314
4315 command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
4316 ' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
4317 ' --precision_channel=0.001'
4318
4319 param_card.write(pjoin(options['output_path'],'tmp.dat'))
4320
4321
4322
4323 orig_model = options['cmd']._curr_model
4324 orig_helas_model = options['cmd']._curr_helas_model
4325 options['cmd'].do_compute_widths(command, evaluator.full_model)
4326
4327 options['cmd']._curr_model = orig_model
4328 options['cmd']._curr_helas_model = orig_helas_model
4329
4330
4331 evaluator.full_model.set_parameters_and_couplings(
4332 param_card=param_card)
4333 try:
4334 tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
4335 except:
4336 raise MadGraph5Error('Error occured during width '+\
4337 'computation with command:\n compute_widths %s'%command)
4338 width = tmp_param_card['decay'].get(PDG).value
4339
4340
4341
4342
4343
4344
4345
4346 if options['recompute_width'] in ['never','first_time']:
4347
4348 for lam in options['lambdaCMS']:
4349 options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
4350 else:
4351 options['cached_widths'][(PDG,lambdaCMS)] = width
4352
4353 return options['cached_widths'][(PDG,lambdaCMS)]
4354
4355 def get_order(diagrams, diagsName):
4356 """Compute the common summed of coupling orders used for this cms check
4357 in the diagrams specified. When inconsistency occurs, use orderName
4358 in the warning message if throwm."""
4359
4360 orders = set([])
4361 for diag in diagrams:
4362 diag_orders = diag.calculate_orders()
4363 orders.add(sum((diag_orders[order] if order in diag_orders else 0)
4364 for order in options['expansion_orders']))
4365 if len(orders)>1:
4366 logger.warning(msg%('%s '%diagsName,str(orders)))
4367 return min(list(orders))
4368 else:
4369 return list(orders)[0]
4370
4371 MLoptions = copy.copy(options['MLOptions'])
4372
4373 MLoptions['DoubleCheckHelicityFilter'] = False
4374
4375
4376 for tweak in options['tweak']['custom']:
4377 if tweak.startswith('seed'):
4378 try:
4379 new_seed = int(tweak[4:])
4380 except ValueError:
4381 raise MadGraph5Error("Seed '%s' is not of the right format 'seed<int>'."%tweak)
4382 random.seed(new_seed)
4383
4384 mode = 'CMS' if aloha.complex_mass else 'NWA'
4385 for i, leg in enumerate(process.get('legs')):
4386 leg.set('number', i+1)
4387
4388 logger.info("Running CMS check for process %s (now doing %s scheme)" % \
4389 ( process.nice_string().replace('Process:', 'process'), mode))
4390
4391 proc_dir = None
4392 resonances = None
4393 warning_msg = "All %sdiagrams do not share the same sum of orders "+\
4394 "%s; found %%s."%(','.join(options['expansion_orders']))+\
4395 " This potentially problematic for the CMS check."
4396 if NLO:
4397
4398
4399
4400 if options['name']=='auto':
4401 proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
4402 temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
4403 ('_' if process.get('perturbation_couplings') else '')+
4404 '_'.join(process.get('perturbation_couplings')),mode)
4405 else:
4406 proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
4407 temp_dir_prefix,options['name'], mode)
4408
4409 timing, matrix_element = generate_loop_matrix_element(process,
4410 options['reuse'], output_path=options['output_path'],
4411 cmd = options['cmd'], proc_name=proc_name,
4412 loop_filter=options['loop_filter'])
4413 if matrix_element is None:
4414
4415 return None
4416
4417 reusing = isinstance(matrix_element, base_objects.Process)
4418 proc_dir = pjoin(options['output_path'],proc_name)
4419
4420
4421 infos = evaluator.setup_process(matrix_element, proc_dir,
4422 reusing = reusing, param_card = options['param_card'],
4423 MLOptions=MLoptions)
4424
4425 evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
4426 mp = None, loop_filter = True,MLOptions=MLoptions)
4427
4428
4429 tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
4430 if os.path.isfile(tmp_card_backup):
4431
4432 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4433 " Now reverting 'param_card.dat' to its original value.")
4434 shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
4435 else:
4436
4437 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
4438
4439 tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
4440 'model_functions.f__TemporaryBackup__')
4441 if os.path.isfile(tmp_modelfunc_backup):
4442
4443 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4444 " Now reverting 'model_functions.f' to its original value.")
4445 shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
4446 'model_functions.f'))
4447 evaluator.apply_log_tweak(proc_dir, 'recompile')
4448 else:
4449
4450 shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
4451 tmp_modelfunc_backup)
4452
4453
4454 MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
4455 read_ps = True, npoints = 1, hel_config = options['helicity'],
4456 split_orders=options['split_orders'])
4457
4458
4459
4460 for dir in misc.glob('P*_*', pjoin(proc_dir,'SubProcesses')):
4461 if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
4462 continue
4463 try:
4464 os.remove(pjoin(dir,'check'))
4465 os.remove(pjoin(dir,'check_sa.o'))
4466 except OSError:
4467 pass
4468
4469 with open(os.devnull, 'w') as devnull:
4470 retcode = subprocess.call(['make','check'],
4471 cwd=dir, stdout=devnull, stderr=devnull)
4472 if retcode != 0:
4473 raise MadGraph5Error("Compilation error with "+\
4474 "'make check' in %s"%dir)
4475
4476
4477 pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
4478 if reusing:
4479
4480
4481 if not os.path.isfile(pkl_path):
4482 raise InvalidCmd('The folder %s could'%proc_dir+\
4483 " not be reused because the resonance specification file "+
4484 "'resonance_specs.pkl' is missing.")
4485 else:
4486 proc_name, born_order, loop_order, resonances = \
4487 save_load_object.load_from_file(pkl_path)
4488
4489
4490 for res in resonances:
4491 set_PSpoint(res, force_other_res_offshell=resonances)
4492
4493
4494 if isinstance(opt, list):
4495 opt.append((proc_name, resonances))
4496 else:
4497 resonances = opt
4498 else:
4499 helas_born_diagrams = matrix_element.get_born_diagrams()
4500 if len(helas_born_diagrams)==0:
4501 logger.warning('The CMS check for loop-induced process is '+\
4502 'not yet available (nor is it very interesting).')
4503 return None
4504 born_order = get_order(helas_born_diagrams,'Born')
4505 loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
4506
4507
4508 if isinstance(opt, list):
4509 opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
4510 resonances = opt[-1][1]
4511 else:
4512 resonances = opt
4513
4514
4515 save_load_object.save_to_file(pkl_path, (process.base_string(),
4516 born_order, loop_order,resonances))
4517
4518 else:
4519
4520 try:
4521 amplitude = diagram_generation.Amplitude(process)
4522 except InvalidCmd:
4523 logging.info("No diagrams for %s" % \
4524 process.nice_string().replace('Process', 'process'))
4525 return None
4526 if not amplitude.get('diagrams'):
4527
4528 logging.info("No diagrams for %s" % \
4529 process.nice_string().replace('Process', 'process'))
4530 return None
4531
4532 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4533 gen_color=True)
4534 diagrams = matrix_element.get('diagrams')
4535 born_order = get_order(diagrams,'Born')
4536
4537 loop_order = -1
4538
4539 if isinstance(opt, list):
4540 opt.append((process.base_string(),find_resonances(diagrams)))
4541 resonances = opt[-1][1]
4542 else:
4543 resonances= opt
4544
4545 if len(resonances)==0:
4546 logger.info("No resonance found for process %s."\
4547 %process.base_string())
4548 return None
4549
4550
4551 if not options['cached_param_card'][mode][0]:
4552 if NLO:
4553 param_card = check_param_card.ParamCard(
4554 pjoin(proc_dir,'Cards','param_card.dat'))
4555 else:
4556 param_card = check_param_card.ParamCard(
4557 StringIO.StringIO(evaluator.full_model.write_param_card()))
4558 options['cached_param_card'][mode][0] = param_card
4559 name2block, _ = param_card.analyze_param_card()
4560 options['cached_param_card'][mode][1] = name2block
4561
4562 else:
4563 param_card = options['cached_param_card'][mode][0]
4564 name2block = options['cached_param_card'][mode][1]
4565
4566
4567 if loop_order != -1 and (loop_order+born_order)%2 != 0:
4568 raise MadGraph5Error('The summed squared matrix element '+\
4569 " order '%d' is not even."%(loop_order+born_order))
4570 result = {'born_order':born_order,
4571 'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
4572 'resonances_result':[]}
4573
4574
4575 if NLO:
4576 try:
4577 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
4578 pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
4579 except:
4580 pass
4581
4582
4583 had_log_tweaks=False
4584 if NLO:
4585 for tweak in options['tweak']['custom']:
4586 if tweak.startswith('seed'):
4587 continue
4588 try:
4589 logstart, logend = tweak.split('->')
4590 except:
4591 raise Madgraph5Error("Tweak '%s' not reckognized."%tweak)
4592 if logstart in ['logp','logm', 'log'] and \
4593 logend in ['logp','logm', 'log']:
4594 if NLO:
4595 evaluator.apply_log_tweak(proc_dir, [logstart, logend])
4596 had_log_tweaks = True
4597 else:
4598 raise Madgraph5Error("Tweak '%s' not reckognized."%tweak)
4599 if had_log_tweaks:
4600 evaluator.apply_log_tweak(proc_dir, 'recompile')
4601
4602
4603 if options['resonances']=='all':
4604 resonances_to_run = resonances
4605 elif isinstance(options['resonances'],int):
4606 resonances_to_run = resonances[:options['resonances']]
4607 elif isinstance(options['resonances'],list):
4608 resonances_to_run = []
4609 for res in resonances:
4610 for res_selection in options['resonances']:
4611 if abs(res['ParticlePDG'])==res_selection[0] and \
4612 res['FSMothersNumbers']==set(res_selection[1]):
4613 resonances_to_run.append(res)
4614 break
4615 else:
4616 raise InvalidCmd("Resonance selection '%s' not reckognized"%\
4617 str(options['resonances']))
4618
4619
4620
4621 if NLO and options['show_plot']:
4622 widgets = ['ME evaluations:', pbar.Percentage(), ' ',
4623 pbar.Bar(),' ', pbar.ETA(), ' ']
4624 progress_bar = pbar.ProgressBar(widgets=widgets,
4625 maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
4626 progress_bar.update(0)
4627
4628 sys.stdout.flush()
4629 else:
4630 progress_bar = None
4631
4632 for resNumber, res in enumerate(resonances_to_run):
4633
4634
4635 result['resonances_result'].append({'resonance':res,'born':[]})
4636 if NLO:
4637 result['resonances_result'][-1]['finite'] = []
4638
4639 for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
4640
4641
4642 new_param_card = check_param_card.ParamCard(param_card)
4643
4644 for param, replacement in options['expansion_parameters'].items():
4645
4646
4647 orig_param = param.replace('__tmpprefix__','')
4648 if orig_param not in name2block:
4649
4650
4651
4652 continue
4653 for block, lhaid in name2block[orig_param]:
4654 orig_value = float(param_card[block].get(lhaid).value)
4655 new_value = eval(replacement,
4656 {param:orig_value,'lambdacms':lambdaCMS})
4657 new_param_card[block].get(lhaid).value=new_value
4658
4659
4660
4661
4662
4663
4664
4665
4666 evaluator.full_model.set_parameters_and_couplings(
4667 param_card=new_param_card)
4668
4669 for decay in new_param_card['decay'].keys():
4670 if mode=='CMS':
4671 new_width = get_width(abs(decay[0]), lambdaCMS,
4672 new_param_card)
4673 else:
4674 new_width = 0.0
4675 new_param_card['decay'].get(decay).value= new_width
4676
4677
4678 evaluator.full_model.set_parameters_and_couplings(
4679 param_card=new_param_card)
4680 if NLO:
4681 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4682
4683
4684 if lambdaCMS==1.0 and mode=='CMS' and \
4685 options['recompute_width'] in ['always','first_time']:
4686 new_param_card.write(pjoin(proc_dir,
4687 'Cards','param_card.dat_recomputed_widths'))
4688
4689
4690
4691 if mode=='NWA' and (options['recompute_width']=='always' or (
4692 options['recompute_width']=='first_time' and lambdaCMS==1.0)):
4693
4694 tmp_param_card = check_param_card.ParamCard(new_param_card)
4695
4696
4697 for decay in new_param_card['decay'].keys():
4698 particle_name = evaluator.full_model.get_particle(\
4699 abs(decay[0])).get_name()
4700 new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
4701 tmp_param_card['decay'].get(decay).value = new_width
4702 if not options['has_FRdecay'] and new_width != 0.0 and \
4703 (abs(decay[0]),lambdaCMS) not in options['cached_widths']:
4704 logger.info('Numerically computed width of particle'+\
4705 ' %s for lambda=%.4g : %-9.6gGeV'%
4706 (particle_name,lambdaCMS,new_width))
4707
4708
4709
4710 if lambdaCMS==1.0 and NLO:
4711 tmp_param_card.write(pjoin(proc_dir,
4712 'Cards','param_card.dat_recomputed_widths'))
4713
4714
4715 for param, replacement in options['tweak']['params'].items():
4716
4717
4718 orig_param = param.replace('__tmpprefix__','')
4719
4720 if orig_param.lower() == 'allwidths':
4721
4722 for decay in new_param_card['decay'].keys():
4723 orig_value = float(new_param_card['decay'].get(decay).value)
4724 new_value = eval(replacement,
4725 {param:orig_value,'lambdacms':lambdaCMS})
4726 new_param_card['decay'].get(decay).value = new_value
4727 continue
4728 if orig_param not in name2block:
4729
4730
4731 continue
4732 for block, lhaid in name2block[orig_param]:
4733 orig_value = float(new_param_card[block].get(lhaid).value)
4734 new_value = eval(replacement,
4735 {param:orig_value,'lambdacms':lambdaCMS})
4736 new_param_card[block].get(lhaid).value=new_value
4737
4738 if options['tweak']['params']:
4739
4740 evaluator.full_model.set_parameters_and_couplings(
4741 param_card=new_param_card)
4742 if NLO:
4743 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4744
4745
4746 if NLO:
4747 ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
4748 proc_dir, PSpoint=res['PS_point_used'], verbose=False,
4749 format='dict', skip_compilation=True)
4750
4751
4752
4753
4754 result['resonances_result'][-1]['born'].append(ME_res['born'])
4755 result['resonances_result'][-1]['finite'].append(
4756 ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
4757 else:
4758 ME_res = evaluator.evaluate_matrix_element(matrix_element,
4759 p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
4760 result['resonances_result'][-1]['born'].append(ME_res)
4761 if not progress_bar is None:
4762 progress_bar.update(resNumber*len(options['lambdaCMS'])+\
4763 (lambdaNumber+1))
4764
4765 sys.stdout.flush()
4766
4767
4768 log_reversed = False
4769 for tweak in options['tweak']['custom']:
4770 if tweak.startswith('log') and had_log_tweaks:
4771 if log_reversed:
4772 continue
4773 if NLO:
4774 evaluator.apply_log_tweak(proc_dir, 'default')
4775 evaluator.apply_log_tweak(proc_dir, 'recompile')
4776 log_reversed = True
4777
4778
4779 evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
4780 if NLO:
4781 try:
4782 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
4783 pjoin(proc_dir,'Cards','param_card.dat'))
4784 except:
4785 param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4786
4787
4788
4789 try:
4790 os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
4791 os.remove(pjoin(proc_dir,'Source','MODEL',
4792 'model_functions.f__TemporaryBackup__'))
4793 except:
4794 pass
4795
4796 return (process.nice_string().replace('Process:', '').strip(),result)
4797
4798 -def get_value(process, evaluator, p=None, options=None):
4799 """Return the value/momentum for a phase space point"""
4800
4801 for i, leg in enumerate(process.get('legs')):
4802 leg.set('number', i+1)
4803
4804 logger.info("Checking %s in %s gauge" % \
4805 ( process.nice_string().replace('Process:', 'process'),
4806 'unitary' if aloha.unitary_gauge else 'feynman'))
4807
4808 legs = process.get('legs')
4809
4810
4811 try:
4812 if process.get('perturbation_couplings')==[]:
4813 amplitude = diagram_generation.Amplitude(process)
4814 else:
4815 amplitude = loop_diagram_generation.LoopAmplitude(process)
4816 except InvalidCmd:
4817 logging.info("No diagrams for %s" % \
4818 process.nice_string().replace('Process', 'process'))
4819 return None
4820
4821 if not amplitude.get('diagrams'):
4822
4823 logging.info("No diagrams for %s" % \
4824 process.nice_string().replace('Process', 'process'))
4825 return None
4826
4827 if not p:
4828
4829 p, w_rambo = evaluator.get_momenta(process, options)
4830
4831
4832 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
4833 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4834 gen_color = True)
4835 else:
4836 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
4837 gen_color = True, optimized_output = evaluator.loop_optimized_output)
4838
4839 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
4840 output='jamp',options=options)
4841
4842 if mvalue and mvalue['m2']:
4843 return {'process':process.base_string(),'value':mvalue,'p':p}
4844
4846 """Present the results of a comparison in a nice list format for loop
4847 processes. It detail the results from each lorentz transformation performed.
4848 """
4849
4850 process = comparison_results[0]['process']
4851 results = comparison_results[0]['results']
4852
4853
4854 threshold_rotations = 1e-6
4855
4856
4857
4858 threshold_boosts = 1e-3
4859 res_str = "%s" % process.base_string()
4860
4861 transfo_col_size = 17
4862 col_size = 18
4863 transfo_name_header = 'Transformation name'
4864
4865 if len(transfo_name_header) + 1 > transfo_col_size:
4866 transfo_col_size = len(transfo_name_header) + 1
4867
4868 for transfo_name, value in results:
4869 if len(transfo_name) + 1 > transfo_col_size:
4870 transfo_col_size = len(transfo_name) + 1
4871
4872 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
4873 fixed_string_length("Value", col_size) + \
4874 fixed_string_length("Relative diff.", col_size) + "Result"
4875
4876 ref_value = results[0]
4877 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
4878 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
4879
4880
4881 all_pass = True
4882 for res in results[1:]:
4883 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
4884 threshold_rotations
4885 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
4886 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
4887 this_pass = rel_diff <= threshold
4888 if not this_pass:
4889 all_pass = False
4890 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
4891 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
4892 fixed_string_length("%1.10e" % rel_diff, col_size) + \
4893 ("Passed" if this_pass else "Failed")
4894 if all_pass:
4895 res_str += '\n' + 'Summary: passed'
4896 else:
4897 res_str += '\n' + 'Summary: failed'
4898
4899 return res_str
4900
4902 """Present the results of a comparison in a nice list format
4903 if output='fail' return the number of failed process -- for test--
4904 """
4905
4906
4907 if comparison_results[0]['process']['perturbation_couplings']!=[]:
4908 return output_lorentz_inv_loop(comparison_results, output)
4909
4910 proc_col_size = 17
4911
4912 threshold=1e-10
4913 process_header = "Process"
4914
4915 if len(process_header) + 1 > proc_col_size:
4916 proc_col_size = len(process_header) + 1
4917
4918 for proc, values in comparison_results:
4919 if len(proc) + 1 > proc_col_size:
4920 proc_col_size = len(proc) + 1
4921
4922 col_size = 18
4923
4924 pass_proc = 0
4925 fail_proc = 0
4926 no_check_proc = 0
4927
4928 failed_proc_list = []
4929 no_check_proc_list = []
4930
4931 res_str = fixed_string_length(process_header, proc_col_size) + \
4932 fixed_string_length("Min element", col_size) + \
4933 fixed_string_length("Max element", col_size) + \
4934 fixed_string_length("Relative diff.", col_size) + \
4935 "Result"
4936
4937 for one_comp in comparison_results:
4938 proc = one_comp['process'].base_string()
4939 data = one_comp['results']
4940
4941 if data == 'pass':
4942 no_check_proc += 1
4943 no_check_proc_list.append(proc)
4944 continue
4945
4946 values = [data[i]['m2'] for i in range(len(data))]
4947
4948 min_val = min(values)
4949 max_val = max(values)
4950 diff = (max_val - min_val) / abs(max_val)
4951
4952 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4953 fixed_string_length("%1.10e" % min_val, col_size) + \
4954 fixed_string_length("%1.10e" % max_val, col_size) + \
4955 fixed_string_length("%1.10e" % diff, col_size)
4956
4957 if diff < threshold:
4958 pass_proc += 1
4959 proc_succeed = True
4960 res_str += "Passed"
4961 else:
4962 fail_proc += 1
4963 proc_succeed = False
4964 failed_proc_list.append(proc)
4965 res_str += "Failed"
4966
4967
4968
4969
4970
4971 if len(data[0]['jamp'])!=0:
4972 for k in range(len(data[0]['jamp'][0])):
4973 sum = [0] * len(data)
4974
4975 for j in range(len(data[0]['jamp'])):
4976
4977 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4978 sum = [sum[i] + values[i] for i in range(len(values))]
4979
4980
4981 min_val = min(sum)
4982 max_val = max(sum)
4983 if not max_val:
4984 continue
4985 diff = (max_val - min_val) / max_val
4986
4987 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
4988 fixed_string_length("%1.10e" % min_val, col_size) + \
4989 fixed_string_length("%1.10e" % max_val, col_size) + \
4990 fixed_string_length("%1.10e" % diff, col_size)
4991
4992 if diff > 1e-10:
4993 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4994 fail_proc += 1
4995 pass_proc -= 1
4996 failed_proc_list.append(proc)
4997 res_str += tmp_str + "Failed"
4998 elif not proc_succeed:
4999 res_str += tmp_str + "Passed"
5000
5001
5002
5003 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5004 (pass_proc, pass_proc + fail_proc,
5005 fail_proc, pass_proc + fail_proc)
5006
5007 if fail_proc != 0:
5008 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5009 if no_check_proc:
5010 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5011
5012 if output == 'text':
5013 return res_str
5014 else:
5015 return fail_proc
5016
5018 """Present the results of a comparison in a nice list format
5019 if output='fail' return the number of failed process -- for test--
5020 """
5021
5022 proc_col_size = 17
5023
5024
5025
5026 pert_coupl = comparison_results[0]['perturbation_couplings']
5027 comparison_results = comparison_results[1:]
5028
5029 if pert_coupl:
5030 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
5031 else:
5032 process_header = "Process"
5033
5034 if len(process_header) + 1 > proc_col_size:
5035 proc_col_size = len(process_header) + 1
5036
5037 for data in comparison_results:
5038 proc = data['process']
5039 if len(proc) + 1 > proc_col_size:
5040 proc_col_size = len(proc) + 1
5041
5042 pass_proc = 0
5043 fail_proc = 0
5044 no_check_proc = 0
5045
5046 failed_proc_list = []
5047 no_check_proc_list = []
5048
5049 col_size = 18
5050
5051 res_str = fixed_string_length(process_header, proc_col_size) + \
5052 fixed_string_length("Unitary", col_size) + \
5053 fixed_string_length("Feynman", col_size) + \
5054 fixed_string_length("Relative diff.", col_size) + \
5055 "Result"
5056
5057 for one_comp in comparison_results:
5058 proc = one_comp['process']
5059 data = [one_comp['value_unit'], one_comp['value_feynm']]
5060
5061
5062 if data[0] == 'pass':
5063 no_check_proc += 1
5064 no_check_proc_list.append(proc)
5065 continue
5066
5067 values = [data[i]['m2'] for i in range(len(data))]
5068
5069 min_val = min(values)
5070 max_val = max(values)
5071
5072
5073 diff = (max_val - min_val) / abs(max_val)
5074
5075 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
5076 fixed_string_length("%1.10e" % values[0], col_size) + \
5077 fixed_string_length("%1.10e" % values[1], col_size) + \
5078 fixed_string_length("%1.10e" % diff, col_size)
5079
5080 if diff < 1e-8:
5081 pass_proc += 1
5082 proc_succeed = True
5083 res_str += "Passed"
5084 else:
5085 fail_proc += 1
5086 proc_succeed = False
5087 failed_proc_list.append(proc)
5088 res_str += "Failed"
5089
5090
5091
5092
5093
5094 if len(data[0]['jamp'])>0:
5095 for k in range(len(data[0]['jamp'][0])):
5096 sum = [0, 0]
5097
5098 for j in range(len(data[0]['jamp'])):
5099
5100 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
5101 sum = [sum[i] + values[i] for i in range(len(values))]
5102
5103
5104 min_val = min(sum)
5105 max_val = max(sum)
5106 if not max_val:
5107 continue
5108 diff = (max_val - min_val) / max_val
5109
5110 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
5111 fixed_string_length("%1.10e" % sum[0], col_size) + \
5112 fixed_string_length("%1.10e" % sum[1], col_size) + \
5113 fixed_string_length("%1.10e" % diff, col_size)
5114
5115 if diff > 1e-10:
5116 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
5117 fail_proc += 1
5118 pass_proc -= 1
5119 failed_proc_list.append(proc)
5120 res_str += tmp_str + "Failed"
5121 elif not proc_succeed:
5122 res_str += tmp_str + "Passed"
5123
5124
5125
5126 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5127 (pass_proc, pass_proc + fail_proc,
5128 fail_proc, pass_proc + fail_proc)
5129
5130 if fail_proc != 0:
5131 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5132 if no_check_proc:
5133 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5134
5135
5136 if output == 'text':
5137 return res_str
5138 else:
5139 return fail_proc
5140
5141 -def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
5142 """Creates a suitable filename for saving these results."""
5143
5144 if opts['name']=='auto' and opts['analyze']!='None':
5145
5146 return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
5147 [0],extension)
5148
5149 if opts['name']!='auto':
5150 basename = opts['name']
5151 else:
5152 prefix = 'cms_check_'
5153
5154 if len(cms_res['ordered_processes'])==1:
5155 proc = cms_res['ordered_processes'][0]
5156 replacements = [('=>','gt'),('<=','lt'),('/','_no_'),
5157 (' ',''),('+','p'),('-','m'),
5158 ('~','x'), ('>','_'),('=','eq'),('^2','squared')]
5159
5160 try:
5161 proc=proc[:proc.index('[')]
5162 except ValueError:
5163 pass
5164
5165 for key, value in replacements:
5166 proc = proc.replace(key,value)
5167
5168 basename =prefix+proc+'_%s_'%used_model.get('name')+\
5169 ( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
5170 cms_res['perturbation_orders']!=[] else '')
5171
5172 else:
5173 basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
5174
5175 suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
5176 if output_path:
5177 return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
5178 else:
5179 return '%s%s.%s'%(basename,suffix,extension)
5180
5182 """ Outputs nicely the outcome of the complex mass scheme check performed
5183 by varying the width in the offshell region of resonances found for eahc process.
5184 Output just specifies whether text should be returned or a list of failed
5185 processes. Use 'concise_text' for a consise report of the results."""
5186
5187 pert_orders=result['perturbation_orders']
5188
5189
5190
5191
5192
5193
5194
5195
5196
5197 diff_lambda_power = options['diff_lambda_power']
5198
5199
5200
5201
5202
5203
5204
5205 if 'has_FRdecay' in result:
5206 has_FRdecay = result['has_FRdecay']
5207 else:
5208 has_FRdecay = False
5209
5210 if not pert_orders:
5211 CMS_test_threshold = 1e-3
5212 else:
5213
5214
5215
5216
5217
5218
5219 if not has_FRdecay and ('recomputed_with' not in result or \
5220 result['recompute_width'] in ['always','first_time']):
5221 CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
5222 else:
5223
5224
5225 CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
5226
5227
5228
5229
5230 consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
5231
5232
5233 group_val = 3
5234
5235
5236
5237
5238 diff_zero_threshold = 1e-3
5239
5240
5241 lambda_range = options['lambda_plot_range']
5242
5243
5244
5245
5246
5247
5248
5249
5250
5251
5252 res_str = ''
5253
5254 concise_str = ''
5255 concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
5256 concise_repl_dict = {'Header':{'process':'Process',
5257 'asymptot':'Asymptot',
5258 'cms_check':'Deviation to asymptot',
5259 'status':'Result'}}
5260
5261
5262
5263
5264
5265 useLatexParticleName = 'built-in'
5266 name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
5267 'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
5268 'mu+':r'\mu^+',
5269 'mu-':r'\mu^-',
5270 'ta+':r'\tau^+',
5271 'ta-':r'\tau^-'}
5272 for p in ['e','m','t']:
5273 d = {'e':'e','m':r'\mu','t':r'\tau'}
5274 name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
5275 name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
5276
5277 for p in ['u','d','c','s','b','t']:
5278 name2tex[p]=p
5279 name2tex['%s~'%p]=r'\bar{%s}'%p
5280
5281 def format_particle_name(particle, latex=useLatexParticleName):
5282 p_name = particle
5283 if latex=='model':
5284 try:
5285 texname = model.get_particle(particle).get('texname')
5286 if texname and texname!='none':
5287 p_name = r'$\displaystyle %s$'%texname
5288 except:
5289 pass
5290 elif latex=='built-in':
5291 try:
5292 p_name = r'$\displaystyle %s$'%name2tex[particle]
5293 except:
5294 pass
5295 return p_name
5296
5297 def resonance_str(resonance, latex=useLatexParticleName):
5298 """ Provides a concise string to characterize the resonance """
5299 particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
5300 mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
5301 return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
5302 ','.join(mothersID))
5303
5304 def format_title(process, resonance):
5305 """ Format the plot title given the process and resonance """
5306
5307 process_string = []
5308 for particle in process.split():
5309 if '<=' in particle:
5310 particle = particle.replace('<=',r'$\displaystyle <=$')
5311 if '^2' in particle:
5312 particle = particle.replace('^2',r'$\displaystyle ^2$')
5313 if particle=='$$':
5314 process_string.append(r'\$\$')
5315 continue
5316 if particle=='>':
5317 process_string.append(r'$\displaystyle \rightarrow$')
5318 continue
5319 if particle=='/':
5320 process_string.append(r'$\displaystyle /$')
5321 continue
5322 process_string.append(format_particle_name(particle))
5323
5324 if resonance=='':
5325 return r'CMS check for %s' %(' '.join(process_string))
5326 else:
5327 return r'CMS check for %s ( resonance %s )'\
5328 %(' '.join(process_string),resonance)
5329
5330 def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
5331 proc=None, res=None):
5332 """ Guess the lambda scaling from a list of ME values and return it.
5333 Also compare with the expected result if specified and trigger a
5334 warning if not in agreement."""
5335
5336 bpowers = []
5337 for i, lambdaCMS in enumerate(lambda_values[1:]):
5338 bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
5339 lambda_values[0]/lambdaCMS)))
5340
5341
5342 bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
5343 key = lambda elem: elem[1], reverse=True)[0][0]
5344 if not expected:
5345 return bpower
5346 if bpower != expected:
5347 logger.warning('The apparent scaling of the squared amplitude'+
5348 'seems inconsistent w.r.t to detected value '+
5349 '(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
5350 ' This happend for process %s and resonance %s'%(proc, res))
5351 return bpower
5352
5353 def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
5354 """ Checks if the values passed in argument are stable and return the
5355 stability check outcome warning if it is not precise enough. """
5356
5357 values = sorted([
5358 abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
5359 i, val in enumerate(ME_values)])
5360 median = values[len(values)//2]
5361 max_diff = max(abs(values[0]-median),abs(values[-1]-median))
5362 stability = max_diff/median
5363 stab_threshold = 1e-2
5364 if stability >= stab_threshold:
5365 return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
5366 %(values_name, stability)
5367 else:
5368 return None
5369
5370 if options['analyze']=='None':
5371 if options['reuse']:
5372 save_path = CMS_save_path('pkl', result, model, options,
5373 output_path=output_path)
5374 buff = "\nThe results of this check have been stored on disk and its "+\
5375 "analysis can be rerun at anytime with the MG5aMC command:\n "+\
5376 " check cms --analyze=%s\n"%save_path
5377 res_str += buff
5378 concise_str += buff
5379 save_load_object.save_to_file(save_path, result)
5380 elif len(result['ordered_processes'])>0:
5381 buff = "\nUse the following synthax if you want to store "+\
5382 "the raw results on disk.\n"+\
5383 " check cms -reuse <proc_def> <options>\n"
5384 res_str += buff
5385 concise_str += buff
5386
5387
5388
5389
5390
5391 checks = []
5392 for process in result['ordered_processes']:
5393 checks.extend([(process,resID) for resID in \
5394 range(len(result[process]['CMS']))])
5395
5396 if options['reuse']:
5397 logFile = open(CMS_save_path(
5398 'log', result, model, options, output_path=output_path),'w')
5399
5400 lambdaCMS_list=result['lambdaCMS']
5401
5402
5403 failed_procs = []
5404
5405
5406 bar = lambda char: char*47
5407
5408
5409 if 'widths_computed' in result:
5410 res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
5411 if result['recompute_width'] == 'never':
5412 res_str += '| Widths extracted from the param_card.dat'
5413 else:
5414 res_str += '| Widths computed %s'%('analytically' if has_FRdecay
5415 else 'numerically')
5416 if result['recompute_width'] == 'first_time':
5417 res_str += ' for \lambda = 1'
5418 elif result['recompute_width'] == 'always':
5419 res_str += ' for all \lambda values'
5420 res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
5421 for particle_name, width in result['widths_computed']:
5422 res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
5423 res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
5424
5425
5426
5427
5428 nstab_points=group_val
5429
5430 differences_target = {}
5431 for process, resID in checks:
5432
5433
5434 concise_repl_dict[process] = {'process':process,
5435 'asymptot':'N/A',
5436 'cms_check':'N/A',
5437 'status':'N/A'}
5438 proc_res = result[process]
5439 cms_res = proc_res['CMS'][resID]
5440 nwa_res = proc_res['NWA'][resID]
5441 resonance = resonance_str(cms_res['resonance'], latex='none')
5442 cms_born=cms_res['born']
5443 nwa_born=nwa_res['born']
5444
5445 res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
5446
5447 proc_title = "%s (resonance %s)"%(process,resonance)
5448 centering = (bar(2)+8-len(proc_title))//2
5449 res_str += "%s%s\n"%(' '*centering,proc_title)
5450
5451 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5452
5453
5454 if diff_lambda_power!=1:
5455 res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
5456 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5457
5458 born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
5459 expected=proc_res['born_order'], proc=process, res=resonance)
5460 stab_cms_born = check_stability(cms_born[-nstab_points:],
5461 lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
5462 if stab_cms_born:
5463 res_str += stab_cms_born
5464 stab_nwa_born = check_stability(nwa_born[-nstab_points:],
5465 lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
5466 if stab_nwa_born:
5467 res_str += stab_nwa_born
5468
5469 res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
5470 for i, p in enumerate(cms_res['resonance']['PS_point_used']):
5471 res_str += " | p%-2.d = "%(i+1)
5472 for pi in p:
5473 res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
5474 res_str += "\n"
5475
5476 res_str += "== Offshellnesses of all detected resonances\n"
5477 for res_name, offshellness in cms_res['resonance']['offshellnesses']:
5478 res_str += " | %-15s = %f\n"%(res_name, offshellness)
5479 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5480
5481 if not pert_orders:
5482 res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
5483 else:
5484 cms_finite=cms_res['finite']
5485 nwa_finite=nwa_res['finite']
5486 loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
5487 expected=proc_res['loop_order'], proc=process, res=resonance)
5488 res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
5489 %(born_power,loop_power)
5490 stab_cms_finite = check_stability(cms_finite[-nstab_points:],
5491 lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
5492 if stab_cms_finite:
5493 res_str += stab_cms_finite
5494 stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
5495 lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
5496 if stab_nwa_finite:
5497 res_str += stab_nwa_finite
5498
5499 CMSData = []
5500 NWAData = []
5501 DiffData = []
5502 for idata, lam in enumerate(lambdaCMS_list):
5503 if not pert_orders:
5504 new_cms=cms_born[idata]/(lam**born_power)
5505 new_nwa=nwa_born[idata]/(lam**born_power)
5506 else:
5507 new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
5508 new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
5509 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5510 CMSData.append(new_cms)
5511 NWAData.append(new_nwa)
5512 DiffData.append(new_diff)
5513
5514
5515
5516
5517
5518
5519 trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
5520 low_diff_median = sorted(DiffData[trim_range:-trim_range])\
5521 [(len(DiffData)-2*trim_range)//2]
5522
5523
5524
5525
5526
5527
5528 current_median = 0
5529
5530 scan_index = 0
5531 reference = abs(sorted(NWAData)[len(NWAData)//2])
5532 if low_diff_median!= 0.0:
5533 if abs(reference/low_diff_median)<diff_zero_threshold:
5534 reference = abs(low_diff_median)
5535 while True:
5536 scanner = DiffData[scan_index:group_val+scan_index]
5537 current_median = sorted(scanner)[len(scanner)//2]
5538
5539
5540 if abs(current_median-low_diff_median)/reference<\
5541 consideration_threshold:
5542 break;
5543 scan_index += 1
5544 if (group_val+scan_index)>=len(DiffData):
5545
5546
5547 logger.warning('The median scanning failed during the CMS check '+
5548 'for process %s'%proc_title+\
5549 'This is means that the difference plot has not stable'+\
5550 'intermediate region and MG5_aMC will arbitrarily consider the'+\
5551 'left half of the values.')
5552 scan_index = -1
5553 break;
5554
5555 if scan_index == -1:
5556 cms_check_data_range = len(DiffData)//2
5557 else:
5558 cms_check_data_range = scan_index + group_val
5559
5560 res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
5561 %(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
5562 len(lambdaCMS_list)-scan_index)
5563
5564 CMScheck_values = DiffData[cms_check_data_range:]
5565
5566
5567
5568
5569 if scan_index >= 0:
5570
5571 scan_index = len(CMScheck_values)
5572 used_group_val = max(3,group_val)
5573 unstability_found = True
5574 while True:
5575 scanner = CMScheck_values[scan_index-used_group_val:scan_index]
5576 maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
5577 if maxdiff/reference<consideration_threshold:
5578 break;
5579 if (scan_index-used_group_val)==0:
5580
5581
5582 unstability_found = False
5583 break;
5584
5585 scan_index -= 1
5586
5587
5588 if unstability_found:
5589 unstab_check=CMScheck_values[scan_index:]
5590 relative_array = [val > CMScheck_values[scan_index-1] for
5591 val in unstab_check]
5592 upper = relative_array.count(True)
5593 lower = relative_array.count(False)
5594 if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
5595 logger.warning(
5596 """For process %s, a numerically unstable region was detected starting from lambda < %.1e.
5597 Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
5598 If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
5599 minimum value of lambda to be considered in the CMS check."""\
5600 %(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
5601
5602
5603
5604
5605 scan_index = 0
5606 max_diff = 0.0
5607 res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
5608 %('%.3g'%reference)
5609 res_str += "== Asymptotic difference value detected = %s\n"\
5610 %('%.3g'%low_diff_median)
5611 concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
5612
5613
5614 differences_target[(process,resID)]= low_diff_median
5615
5616 while True:
5617 current_vals = CMScheck_values[scan_index:scan_index+group_val]
5618 max_diff = max(max_diff, abs(low_diff_median-
5619 sorted(current_vals)[len(current_vals)//2])/reference)
5620 if (scan_index+group_val)>=len(CMScheck_values):
5621 break
5622 scan_index += 1
5623
5624
5625 cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
5626 CMS_test_threshold*100.0)
5627 res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
5628 concise_repl_dict[process]['cms_check'] = \
5629 "%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
5630
5631 if max_diff>CMS_test_threshold:
5632 failed_procs.append((process,resonance))
5633 res_str += "%s %s %s\n"%(bar('='),
5634 'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
5635 concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
5636 else 'Passed'
5637
5638 if output=='concise_text':
5639
5640 max_proc_size = max(
5641 [len(process) for process in result['ordered_processes']]+[10])
5642
5643 res_str = concise_str
5644 res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
5645 for process in result['ordered_processes']:
5646 res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
5647
5648 if len(checks):
5649 res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
5650 ('.\n' if not failed_procs else ', failed checks are for:\n')
5651 else:
5652 return "\nNo CMS check to perform, the process either has no diagram or does not "+\
5653 "not feature any massive s-channel resonance."
5654
5655 for process, resonance in failed_procs:
5656 res_str += "> %s, %s\n"%(process, resonance)
5657
5658 if output=='concise_text':
5659 res_str += '\nMore detailed information on this check available with the command:\n'
5660 res_str += ' MG5_aMC>display checks\n'
5661
5662
5663
5664
5665 if not options['show_plot']:
5666 if options['reuse']:
5667 logFile.write(res_str)
5668 logFile.close()
5669 if output.endswith('text'):
5670 return res_str
5671 else:
5672 return failed_procs
5673
5674 fig_output_file = CMS_save_path('pdf', result, model, options,
5675 output_path=output_path)
5676 base_fig_name = fig_output_file[:-4]
5677 suffix = 1
5678 while os.path.isfile(fig_output_file):
5679 fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
5680 suffix+=1
5681
5682 process_data_plot_dict={}
5683
5684
5685
5686 all_res = [(result, None)]
5687 for i, add_res in enumerate(options['analyze'].split(',')[1:]):
5688 specs =re.match(r'^(?P<filename>.*)\((?P<title>.*)\)$', add_res)
5689 if specs:
5690 filename = specs.group('filename')
5691 title = specs.group('title')
5692 else:
5693 filename = add_res
5694 title = '#%d'%(i+1)
5695
5696 new_result = save_load_object.load_from_file(filename)
5697 if new_result is None:
5698 raise InvalidCmd('The complex mass scheme check result'+
5699 " file below could not be read.\n %s"%filename)
5700 if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
5701 or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
5702 raise self.InvalidCmd('The complex mass scheme check result'+
5703 " file below does not seem compatible.\n %s"%filename)
5704 all_res.append((new_result,title))
5705
5706
5707 for process, resID in checks:
5708 data1=[]
5709 data2=[]
5710 info ={}
5711 for res in all_res:
5712 proc_res = res[0][process]
5713 cms_res = proc_res['CMS'][resID]
5714 nwa_res = proc_res['NWA'][resID]
5715 resonance = resonance_str(cms_res['resonance'])
5716 if options['resonances']!=1:
5717 info['title'] = format_title(process, resonance)
5718 else:
5719 info['title'] = format_title(process, '')
5720
5721 cms_born=cms_res['born']
5722 nwa_born=nwa_res['born']
5723 if len(cms_born) != len(lambdaCMS_list) or\
5724 len(nwa_born) != len(lambdaCMS_list):
5725 raise MadGraph5Error('Inconsistent list of results w.r.t. the'+\
5726 ' lambdaCMS values specified for process %s'%process)
5727 if pert_orders:
5728 cms_finite=cms_res['finite']
5729 nwa_finite=nwa_res['finite']
5730 if len(cms_finite) != len(lambdaCMS_list) or\
5731 len(nwa_finite) != len(lambdaCMS_list):
5732 raise MadGraph5Error('Inconsistent list of results w.r.t. the'+\
5733 ' lambdaCMS values specified for process %s'%process)
5734
5735 bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
5736 expected=proc_res['born_order'], proc=process, res=resonance)
5737
5738 CMSData = []
5739 NWAData = []
5740 DiffData = []
5741 for idata, lam in enumerate(lambdaCMS_list):
5742 if not pert_orders:
5743 new_cms = cms_born[idata]/lam**bpower
5744 new_nwa = nwa_born[idata]/lam**bpower
5745 else:
5746 new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
5747 new_nwa=nwa_finite[idata]
5748 new_cms /= lam*nwa_born[idata]
5749 new_nwa /= lam*nwa_born[idata]
5750 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5751 CMSData.append(new_cms)
5752 NWAData.append(new_nwa)
5753 DiffData.append(new_diff)
5754 if res[1] is None:
5755 if not pert_orders:
5756 data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
5757 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
5758 else:
5759 data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
5760 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
5761 data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
5762 %('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
5763 ,DiffData])
5764 data2.append([r'Detected asymptot',[differences_target[(process,resID)]
5765 for i in range(len(lambdaCMS_list))]])
5766 else:
5767 data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' ').replace('#','\#'), CMSData])
5768 data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' ').replace('#','\#'), NWAData])
5769 data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' ').replace('#','\#'), DiffData])
5770
5771 process_data_plot_dict[(process,resID)]=(data1,data2, info)
5772
5773
5774 try:
5775 import matplotlib.pyplot as plt
5776 from matplotlib.backends.backend_pdf import PdfPages
5777 logger.info('Rendering plots... (this can take some time because of the latex labels)')
5778
5779 res_str += \
5780 """\n-----------------------------------------------------------------------------------------------
5781 | In the plots, the Complex Mass Scheme check is successful if the normalized difference |
5782 | between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
5783 -----------------------------------------------------------------------------------------------\n"""
5784
5785
5786 if lambda_range[1]>0:
5787 min_lambda_index = -1
5788 for i, lam in enumerate(lambdaCMS_list):
5789 if lam<=lambda_range[1]:
5790 min_lambda_index = i
5791 break
5792 else:
5793 min_lambda_index = 0
5794 if lambda_range[0]>0:
5795 max_lambda_index = -1
5796 for i, lam in enumerate(lambdaCMS_list):
5797 if lam<=lambda_range[0]:
5798 max_lambda_index=i-1
5799 break
5800 else:
5801 max_lambda_index=len(lambdaCMS_list)-1
5802
5803 if max_lambda_index==-1 or min_lambda_index==-1 or \
5804 min_lambda_index==max_lambda_index:
5805 raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
5806 (lambda_range[0],lambda_range[1]))
5807
5808 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5809 lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
5810
5811 plt.rc('text', usetex=True)
5812 plt.rc('font', family='serif')
5813 pp=PdfPages(fig_output_file)
5814 if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
5815 colorlist=['b','r','g','k','c','m','y']
5816 else:
5817 import matplotlib.colors as colors
5818 import matplotlib.cm as mplcm
5819 import matplotlib.colors as colors
5820
5821
5822 cm = plt.get_cmap('gist_rainbow')
5823 cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
5824 scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
5825
5826 colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
5827
5828
5829
5830
5831
5832
5833 legend_size = 10
5834 for iproc, (process, resID) in enumerate(checks):
5835 data1,data2, info=process_data_plot_dict[(process,resID)]
5836
5837 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5838 for i in range(len(data1)):
5839 data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
5840 for i in range(len(data2)):
5841 data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
5842 plt.figure(iproc+1)
5843 plt.subplot(211)
5844 minvalue=1e+99
5845 maxvalue=-1e+99
5846 for i, d1 in enumerate(data1):
5847
5848 color=colorlist[i//2]
5849 data_plot=d1[1]
5850 minvalue=min(min(data_plot),minvalue)
5851 maxvalue=max(max(data_plot),maxvalue)
5852 plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
5853 linestyle=('-' if i%2==0 else '--'),
5854 label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
5855 ymin = minvalue-(maxvalue-minvalue)/5.
5856 ymax = maxvalue+(maxvalue-minvalue)/5.
5857
5858 plt.yscale('linear')
5859 plt.xscale('log')
5860 plt.title(info['title'],fontsize=12,y=1.08)
5861 plt.ylabel(r'$\displaystyle \mathcal{M}$')
5862
5863 if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
5864 for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
5865 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5866 else:
5867 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5868
5869 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
5870
5871 plt.subplot(212)
5872 minvalue=1e+99
5873 maxvalue=-1e+99
5874
5875 try:
5876 asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
5877 plt.plot(lambdaCMS_list, data2[asymptot_index][1],
5878 color='0.75', marker='', linestyle='-', label='')
5879 except ValueError:
5880 pass
5881
5882 color_ID = -1
5883 for d2 in data2:
5884
5885 if d2[0]=='Detected asymptot':
5886 continue
5887 color_ID += 1
5888 color=colorlist[color_ID]
5889 data_plot=d2[1]
5890 minvalue=min(min(data_plot),minvalue)
5891 maxvalue=max(max(data_plot),maxvalue)
5892 plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
5893 linestyle='-', label=d2[0])
5894 ymin = minvalue-(maxvalue-minvalue)/5.
5895 ymax = maxvalue+(maxvalue-minvalue)/5.
5896
5897 plt.yscale('linear')
5898 plt.xscale('log')
5899 plt.ylabel(r'$\displaystyle \Delta$')
5900 plt.xlabel(r'$\displaystyle \lambda$')
5901
5902
5903 sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
5904 left_stability = sum(abs(s[0]-s[-1]) for s in sd)
5905 sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
5906 right_stability = sum(abs(s[0]-s[-1]) for s in sd)
5907 left_stable = False if right_stability==0.0 else \
5908 (left_stability/right_stability)<0.1
5909
5910 if left_stable:
5911 if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
5912 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5913 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5914 else:
5915 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5916 else:
5917 if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
5918 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5919 plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
5920 else:
5921 plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
5922
5923 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
5924 minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
5925
5926 plt.savefig(pp,format='pdf')
5927
5928 pp.close()
5929
5930 if len(checks)>0:
5931 logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
5932
5933 if sys.platform.startswith('linux'):
5934 misc.call(["xdg-open", fig_output_file])
5935 elif sys.platform.startswith('darwin'):
5936 misc.call(["open", fig_output_file])
5937
5938 plt.close("all")
5939
5940 except Exception as e:
5941 if isinstance(e, ImportError):
5942 res_str += "\n= Install matplotlib to get a "+\
5943 "graphical display of the results of the cms check."
5944 else:
5945 general_error = "\n= Could not produce the cms check plot because of "+\
5946 "the following error: %s"%str(e)
5947 try:
5948 import six.moves.tkinter
5949 if isinstance(e, six.moves.tkinter.TclError):
5950 res_str += "\n= Plots are not generated because your system"+\
5951 " does not support graphical display."
5952 else:
5953 res_str += general_error
5954 except:
5955 res_str += general_error
5956
5957 if options['reuse']:
5958 logFile.write(res_str)
5959 logFile.close()
5960
5961 if output.endswith('text'):
5962 return res_str
5963 else:
5964 return failed_procs
5965