1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 """Several different checks for processes (and hence models):
16 permutation tests, gauge invariance tests, lorentz invariance
17 tests. Also class for evaluation of Python matrix elements,
18 MatrixElementEvaluator."""
19
20 from __future__ import division
21
22 import array
23 import copy
24 import fractions
25 import itertools
26 import logging
27 import math
28 import os
29 import sys
30 import re
31 import shutil
32 import random
33 import glob
34 import re
35 import subprocess
36 import time
37 import datetime
38 import errno
39 import pickle
40
41
42
43 import aloha
44 import aloha.aloha_writers as aloha_writers
45 import aloha.create_aloha as create_aloha
46
47 import madgraph.iolibs.export_python as export_python
48 import madgraph.iolibs.helas_call_writers as helas_call_writers
49 import models.import_ufo as import_ufo
50 import madgraph.iolibs.save_load_object as save_load_object
51 import madgraph.iolibs.file_writers as writers
52
53 import madgraph.core.base_objects as base_objects
54 import madgraph.core.color_algebra as color
55 import madgraph.core.color_amp as color_amp
56 import madgraph.core.helas_objects as helas_objects
57 import madgraph.core.diagram_generation as diagram_generation
58
59 import madgraph.various.rambo as rambo
60 import madgraph.various.misc as misc
61 import madgraph.various.progressbar as pbar
62 import madgraph.various.banner as bannermod
63 import madgraph.various.progressbar as pbar
64
65 import madgraph.loop.loop_diagram_generation as loop_diagram_generation
66 import madgraph.loop.loop_helas_objects as loop_helas_objects
67 import madgraph.loop.loop_base_objects as loop_base_objects
68 import models.check_param_card as check_param_card
69
70 from madgraph.interface.madevent_interface import MadLoopInitializer
71 from madgraph.interface.common_run_interface import AskforEditCard
72 from madgraph import MG5DIR, InvalidCmd, MadGraph5Error
73
74 from madgraph.iolibs.files import cp
75
76 import StringIO
77 import models.model_reader as model_reader
78 import aloha.template_files.wavefunctions as wavefunctions
79 from aloha.template_files.wavefunctions import \
80 ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx
81
82 ADDED_GLOBAL = []
83
84 temp_dir_prefix = "TMP_CHECK"
85
86 pjoin = os.path.join
89 for value in list(to_clean):
90 del globals()[value]
91 to_clean.remove(value)
92
97 """ Just an 'option container' to mimick the interface which is passed to the
98 tests. We put in only what is now used from interface by the test:
99 cmd.options['fortran_compiler']
100 cmd.options['complex_mass_scheme']
101 cmd._mgme_dir"""
102 - def __init__(self, mgme_dir = "", complex_mass_scheme = False,
103 fortran_compiler = 'gfortran' ):
104 self._mgme_dir = mgme_dir
105 self.options = {}
106 self.options['complex_mass_scheme']=complex_mass_scheme
107 self.options['fortran_compiler']=fortran_compiler
108
109
110
111
112
113 logger = logging.getLogger('madgraph.various.process_checks')
118 """boost the set momenta in the 'boost direction' by the 'beta'
119 factor"""
120
121 boost_p = []
122 gamma = 1/ math.sqrt(1 - beta**2)
123 for imp in p:
124 bosst_p = imp[boost_direction]
125 E, px, py, pz = imp
126 boost_imp = []
127
128 boost_imp.append(gamma * E - gamma * beta * bosst_p)
129
130 if boost_direction == 1:
131 boost_imp.append(-gamma * beta * E + gamma * px)
132 else:
133 boost_imp.append(px)
134
135 if boost_direction == 2:
136 boost_imp.append(-gamma * beta * E + gamma * py)
137 else:
138 boost_imp.append(py)
139
140 if boost_direction == 3:
141 boost_imp.append(-gamma * beta * E + gamma * pz)
142 else:
143 boost_imp.append(pz)
144
145 boost_p.append(boost_imp)
146
147 return boost_p
148
153 """Class taking care of matrix element evaluation, storing
154 relevant quantities for speedup."""
155
156 - def __init__(self, model , param_card = None,
157 auth_skipping = False, reuse = True, cmd = FakeInterface()):
158 """Initialize object with stored_quantities, helas_writer,
159 model, etc.
160 auth_skipping = True means that any identical matrix element will be
161 evaluated only once
162 reuse = True means that the matrix element corresponding to a
163 given process can be reused (turn off if you are using
164 different models for the same process)"""
165
166 self.cmd = cmd
167
168
169 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model)
170
171
172 self.full_model = model_reader.ModelReader(model)
173 try:
174 self.full_model.set_parameters_and_couplings(param_card)
175 except MadGraph5Error:
176 if isinstance(param_card, (str,file)):
177 raise
178 logger.warning('param_card present in the event file not compatible.'+
179 ' We will use the default one.')
180 self.full_model.set_parameters_and_couplings()
181
182 self.auth_skipping = auth_skipping
183 self.reuse = reuse
184 self.cmass_scheme = cmd.options['complex_mass_scheme']
185 self.store_aloha = []
186 self.stored_quantities = {}
187
188
189
190
191 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None,
192 gauge_check=False, auth_skipping=None, output='m2',
193 options=None):
194 """Calculate the matrix element and evaluate it for a phase space point
195 output is either m2, amp, jamp
196 """
197
198 if full_model:
199 self.full_model = full_model
200 process = matrix_element.get('processes')[0]
201 model = process.get('model')
202
203 if "matrix_elements" not in self.stored_quantities:
204 self.stored_quantities['matrix_elements'] = []
205 matrix_methods = {}
206
207 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p:
208
209 matrix = eval("Matrix_%s()" % process.shell_string())
210 me_value = matrix.smatrix(p, self.full_model)
211 if output == "m2":
212 return matrix.smatrix(p, self.full_model), matrix.amp2
213 else:
214 m2 = matrix.smatrix(p, self.full_model)
215 return {'m2': m2, output:getattr(matrix, output)}
216 if (auth_skipping or self.auth_skipping) and matrix_element in \
217 self.stored_quantities['matrix_elements']:
218
219 logger.info("Skipping %s, " % process.nice_string() + \
220 "identical matrix element already tested" \
221 )
222 return None
223
224 self.stored_quantities['matrix_elements'].append(matrix_element)
225
226
227
228
229 if "list_colorize" not in self.stored_quantities:
230 self.stored_quantities["list_colorize"] = []
231 if "list_color_basis" not in self.stored_quantities:
232 self.stored_quantities["list_color_basis"] = []
233 if "list_color_matrices" not in self.stored_quantities:
234 self.stored_quantities["list_color_matrices"] = []
235
236 col_basis = color_amp.ColorBasis()
237 new_amp = matrix_element.get_base_amplitude()
238 matrix_element.set('base_amplitude', new_amp)
239 colorize_obj = col_basis.create_color_dict_list(new_amp)
240
241 try:
242
243
244
245 col_index = self.stored_quantities["list_colorize"].index(colorize_obj)
246 except ValueError:
247
248
249 self.stored_quantities['list_colorize'].append(colorize_obj)
250 col_basis.build()
251 self.stored_quantities['list_color_basis'].append(col_basis)
252 col_matrix = color_amp.ColorMatrix(col_basis)
253 self.stored_quantities['list_color_matrices'].append(col_matrix)
254 col_index = -1
255
256
257 matrix_element.set('color_basis',
258 self.stored_quantities['list_color_basis'][col_index])
259 matrix_element.set('color_matrix',
260 self.stored_quantities['list_color_matrices'][col_index])
261
262
263 if "used_lorentz" not in self.stored_quantities:
264 self.stored_quantities["used_lorentz"] = []
265
266 me_used_lorentz = set(matrix_element.get_used_lorentz())
267 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \
268 if lorentz not in self.store_aloha]
269
270 aloha_model = create_aloha.AbstractALOHAModel(model.get('name'))
271 aloha_model.add_Lorentz_object(model.get('lorentz'))
272 aloha_model.compute_subset(me_used_lorentz)
273
274
275 aloha_routines = []
276 for routine in aloha_model.values():
277 aloha_routines.append(routine.write(output_dir = None,
278 mode='mg5',
279 language = 'Python'))
280 for routine in aloha_model.external_routines:
281 aloha_routines.append(
282 open(aloha_model.locate_external(routine, 'Python')).read())
283
284
285 previous_globals = list(globals().keys())
286 for routine in aloha_routines:
287 exec(routine, globals())
288 for key in globals().keys():
289 if key not in previous_globals:
290 ADDED_GLOBAL.append(key)
291
292
293 self.store_aloha.extend(me_used_lorentz)
294
295 exporter = export_python.ProcessExporterPython(matrix_element,
296 self.helas_writer)
297 try:
298 matrix_methods = exporter.get_python_matrix_methods(\
299 gauge_check=gauge_check)
300
301 except helas_call_writers.HelasWriterError, error:
302 logger.info(error)
303 return None
304
305
306
307 if self.reuse:
308
309 exec(matrix_methods[process.shell_string()], globals())
310 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string())
311 else:
312
313 exec(matrix_methods[process.shell_string()])
314
315 if not p:
316 p, w_rambo = self.get_momenta(process, options)
317
318 exec("data = Matrix_%s()" % process.shell_string())
319 if output == "m2":
320 return data.smatrix(p, self.full_model), data.amp2
321 else:
322 m2 = data.smatrix(p,self.full_model)
323 return {'m2': m2, output:getattr(data, output)}
324
325 @staticmethod
327 """ Check whether the specified kinematic point passes isolation cuts
328 """
329
330 def Pt(pmom):
331 """ Computes the pt of a 4-momentum"""
332 return math.sqrt(pmom[1]**2+pmom[2]**2)
333
334 def DeltaR(p1,p2):
335 """ Computes the DeltaR between two 4-momenta"""
336
337 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2)
338 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2)
339 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3]))
340 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3]))
341
342 phi1=math.atan2(p1[2],p1[1])
343 phi2=math.atan2(p2[2],p2[1])
344 dphi=abs(phi2-phi1)
345
346 dphi=abs(abs(dphi-math.pi)-math.pi)
347
348 return math.sqrt(dphi**2+(eta2-eta1)**2)
349
350 for i, pmom in enumerate(pmoms[2:]):
351
352 if Pt(pmom)<ptcut:
353 return False
354
355 for pmom2 in pmoms[3+i:]:
356 if DeltaR(pmom,pmom2)<drcut:
357 return False
358 return True
359
360
361
362
363 - def get_momenta(self, process, options=None, special_mass=None):
364 """Get a point in phase space for the external states in the given
365 process, with the CM energy given. The incoming particles are
366 assumed to be oriented along the z axis, with particle 1 along the
367 positive z axis.
368 For the CMS check, one must be able to chose the mass of the special
369 resonance particle with id = -1, and the special_mass option allows
370 to specify it."""
371
372 if not options:
373 energy=1000
374 events=None
375 else:
376 energy = options['energy']
377 events = options['events']
378 to_skip = 0
379
380 if not (isinstance(process, base_objects.Process) and \
381 isinstance(energy, (float,int))):
382 raise rambo.RAMBOError, "Not correct type for arguments to get_momenta"
383
384
385 sorted_legs = sorted(process.get('legs'), lambda l1, l2:\
386 l1.get('number') - l2.get('number'))
387
388
389 if events:
390 ids = [l.get('id') for l in sorted_legs]
391 import MadSpin.decay as madspin
392 if not hasattr(self, 'event_file'):
393 fsock = open(events)
394 self.event_file = madspin.Event(fsock)
395
396 skip = 0
397 while self.event_file.get_next_event() != 'no_event':
398 event = self.event_file.particle
399
400 event_ids = [p['pid'] for p in event.values()]
401 if event_ids == ids:
402 skip += 1
403 if skip > to_skip:
404 break
405 else:
406 raise MadGraph5Error, 'No compatible events for %s' % ids
407 p = []
408 for part in event.values():
409 m = part['momentum']
410 p.append([m.E, m.px, m.py, m.pz])
411 return p, 1
412
413 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False])
414 nfinal = len(sorted_legs) - nincoming
415
416
417 mass = []
418 for l in sorted_legs:
419 if l.get('id') != 0:
420 mass_string = self.full_model.get_particle(l.get('id')).get('mass')
421 mass.append(self.full_model.get('parameter_dict')[mass_string].real)
422 else:
423 if isinstance(special_mass, float):
424 mass.append(special_mass)
425 else:
426 raise Exception, "A 'special_mass' option must be specified"+\
427 " in get_momenta when a leg with id=-10 is present (for CMS check)"
428
429
430
431
432
433
434
435
436
437 energy = max(energy, sum(mass[:nincoming])*1.2,sum(mass[nincoming:])*1.2)
438
439
440
441
442
443
444
445 if nfinal == 1:
446 p = []
447 energy = mass[-1]
448 p.append([energy/2,0,0,energy/2])
449 p.append([energy/2,0,0,-energy/2])
450 p.append([mass[-1],0,0,0])
451 return p, 1.0
452
453 e2 = energy**2
454 m1 = mass[0]
455 p = []
456
457 masses = rambo.FortranList(nfinal)
458 for i in range(nfinal):
459 masses[i+1] = mass[nincoming + i]
460
461 if nincoming == 1:
462
463 p.append([abs(m1), 0., 0., 0.])
464 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses)
465
466 for i in range(1, nfinal+1):
467 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
468 p_rambo[(2,i)], p_rambo[(3,i)]]
469 p.append(momi)
470
471 return p, w_rambo
472
473 if nincoming != 2:
474 raise rambo.RAMBOError('Need 1 or 2 incoming particles')
475
476 if nfinal == 1:
477 energy = masses[1]
478 if masses[1] == 0.0:
479 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\
480 ' state particle massless is invalid')
481
482 e2 = energy**2
483 m2 = mass[1]
484
485 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \
486 2*m1**2*m2**2 + m2**4) / (4*e2))
487 e1 = math.sqrt(mom**2+m1**2)
488 e2 = math.sqrt(mom**2+m2**2)
489
490 p.append([e1, 0., 0., mom])
491 p.append([e2, 0., 0., -mom])
492
493 if nfinal == 1:
494 p.append([energy, 0., 0., 0.])
495 return p, 1.
496
497 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses)
498
499
500 for i in range(1, nfinal+1):
501 momi = [p_rambo[(4,i)], p_rambo[(1,i)],
502 p_rambo[(2,i)], p_rambo[(3,i)]]
503 p.append(momi)
504
505 return p, w_rambo
506
512 """Class taking care of matrix element evaluation for loop processes."""
513
514 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={},
515 cmd=FakeInterface(),*args,**kwargs):
516 """Allow for initializing the MG5 root where the temporary fortran
517 output for checks is placed."""
518
519 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs)
520
521 self.mg_root=self.cmd._mgme_dir
522
523 if output_path is None:
524 self.output_path = self.cmd._mgme_dir
525 else:
526 self.output_path = output_path
527
528 self.cuttools_dir=cuttools_dir
529 self.tir_dir=tir_dir
530 self.loop_optimized_output = cmd.options['loop_optimized_output']
531
532
533 self.proliferate=True
534
535
536
537
538 - def evaluate_matrix_element(self, matrix_element, p=None, options=None,
539 gauge_check=False, auth_skipping=None, output='m2',
540 PS_name = None, MLOptions={}):
541 """Calculate the matrix element and evaluate it for a phase space point
542 Output can only be 'm2. The 'jamp' and 'amp' returned values are just
543 empty lists at this point.
544 If PS_name is not none the written out PS.input will be saved in
545 the file PS.input_<PS_name> as well."""
546
547 process = matrix_element.get('processes')[0]
548 model = process.get('model')
549
550 if options and 'split_orders' in options.keys():
551 split_orders = options['split_orders']
552 else:
553 split_orders = -1
554
555 if "loop_matrix_elements" not in self.stored_quantities:
556 self.stored_quantities['loop_matrix_elements'] = []
557
558 if (auth_skipping or self.auth_skipping) and matrix_element in \
559 [el[0] for el in self.stored_quantities['loop_matrix_elements']]:
560
561 logger.info("Skipping %s, " % process.nice_string() + \
562 "identical matrix element already tested" )
563 return None
564
565
566 if not p:
567 p, w_rambo = self.get_momenta(process, options=options)
568
569 if matrix_element in [el[0] for el in \
570 self.stored_quantities['loop_matrix_elements']]:
571 export_dir=self.stored_quantities['loop_matrix_elements'][\
572 [el[0] for el in self.stored_quantities['loop_matrix_elements']\
573 ].index(matrix_element)][1]
574 logger.debug("Reusing generated output %s"%str(export_dir))
575 else:
576 export_dir=pjoin(self.output_path,temp_dir_prefix)
577 if os.path.isdir(export_dir):
578 if not self.proliferate:
579 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir))
580 else:
581 id=1
582 while os.path.isdir(pjoin(self.output_path,\
583 '%s_%i'%(temp_dir_prefix,id))):
584 id+=1
585 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id))
586
587 if self.proliferate:
588 self.stored_quantities['loop_matrix_elements'].append(\
589 (matrix_element,export_dir))
590
591
592
593 import madgraph.loop.loop_exporters as loop_exporters
594 if self.loop_optimized_output:
595 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
596 else:
597 exporter_class=loop_exporters.LoopProcessExporterFortranSA
598
599 MLoptions = {'clean': True,
600 'complex_mass': self.cmass_scheme,
601 'export_format':'madloop',
602 'mp':True,
603 'SubProc_prefix':'P',
604 'compute_color_flows': not process.get('has_born'),
605 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
606 'cuttools_dir': self.cuttools_dir,
607 'fortran_compiler': self.cmd.options['fortran_compiler'],
608 'output_dependencies': self.cmd.options['output_dependencies']}
609
610 MLoptions.update(self.tir_dir)
611
612 FortranExporter = exporter_class(\
613 self.mg_root, export_dir, MLoptions)
614 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
615 FortranExporter.copy_v4template(modelname=model.get('name'))
616 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
617 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
618 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
619 for c in l]))
620 FortranExporter.convert_model_to_mg4(model,wanted_lorentz,wanted_couplings)
621 FortranExporter.finalize_v4_directory(None,"",False,False,compiler=
622 {'fortran':self.cmd.options['fortran_compiler'],
623 'f2py':self.cmd.options['fortran_compiler'],
624 'cpp':self.cmd.options['fortran_compiler']})
625
626 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
627 split_orders=split_orders)
628
629 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
630 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions)
631
632 if gauge_check:
633 file_path, orig_file_content, new_file_content = \
634 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
635 ['helas_calls_ampb_1.f','loop_matrix.f'])
636 file = open(file_path,'w')
637 file.write(new_file_content)
638 file.close()
639 if self.loop_optimized_output:
640 mp_file_path, mp_orig_file_content, mp_new_file_content = \
641 self.setup_ward_check(pjoin(export_dir,'SubProcesses'),
642 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True)
643 mp_file = open(mp_file_path,'w')
644 mp_file.write(mp_new_file_content)
645 mp_file.close()
646
647
648 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\
649 export_dir, p, PS_name = PS_name, verbose=False)[0][0]
650
651
652 if gauge_check:
653 file = open(file_path,'w')
654 file.write(orig_file_content)
655 file.close()
656 if self.loop_optimized_output:
657 mp_file = open(mp_file_path,'w')
658 mp_file.write(mp_orig_file_content)
659 mp_file.close()
660
661
662 if not self.proliferate:
663 shutil.rmtree(export_dir)
664
665 if output == "m2":
666
667
668 return finite_m2, []
669 else:
670 return {'m2': finite_m2, output:[]}
671
672 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False,
673 DoubleCheckHelicityFilter=False, MLOptions={}):
674 """ Set parameters in MadLoopParams.dat suited for these checks.MP
675 stands for multiple precision and can either be a bool or an integer
676 to specify the mode."""
677
678
679 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r')
680 MLCard = bannermod.MadLoopParam(file)
681
682 if isinstance(mp,bool):
683 mode = 4 if mp else 1
684 else:
685 mode = mp
686
687 for key, value in MLOptions.items():
688 if key == "MLReductionLib":
689 if isinstance(value, int):
690 ml_reds = str(value)
691 if isinstance(value,list):
692 if len(value)==0:
693 ml_reds = '1'
694 else:
695 ml_reds="|".join([str(vl) for vl in value])
696 elif isinstance(value, str):
697 ml_reds = value
698 elif isinstance(value, int):
699 ml_reds = str(value)
700 else:
701 raise MadGraph5Error, 'The argument %s '%str(value)+\
702 ' in fix_MadLoopParamCard must be a string, integer'+\
703 ' or a list.'
704 MLCard.set("MLReductionLib",ml_reds)
705 elif key == 'ImprovePS':
706 MLCard.set('ImprovePSPoint',2 if value else -1)
707 elif key == 'ForceMP':
708 mode = 4
709 elif key in MLCard:
710 MLCard.set(key,value)
711 else:
712 raise Exception, 'The MadLoop options %s specified in function'%key+\
713 ' fix_MadLoopParamCard does not correspond to an option defined'+\
714 ' MadLoop nor is it specially handled in this function.'
715 if not mode is None:
716 MLCard.set('CTModeRun',mode)
717 MLCard.set('CTModeInit',mode)
718 MLCard.set('UseLoopFilter',loop_filter)
719 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter)
720
721 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
722
723 @classmethod
724 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], PS_name = None,
725 verbose=True, format='tuple', skip_compilation=False):
726 """Compile and run ./check, then parse the output and return the result
727 for process with id = proc_id and PSpoint if specified.
728 If PS_name is not none the written out PS.input will be saved in
729 the file PS.input_<PS_name> as well"""
730 if verbose:
731 sys.stdout.write('.')
732 sys.stdout.flush()
733
734 shell_name = None
735 directories = glob.glob(pjoin(working_dir, 'SubProcesses',
736 'P%i_*' % proc_id))
737 if directories and os.path.isdir(directories[0]):
738 shell_name = os.path.basename(directories[0])
739
740
741 if not shell_name:
742 logging.info("Directory hasn't been created for process %s" %proc)
743 return ((0.0, 0.0, 0.0, 0.0, 0), [])
744
745 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name))
746
747 dir_name = pjoin(working_dir, 'SubProcesses', shell_name)
748 if not skip_compilation:
749
750 if os.path.isfile(pjoin(dir_name,'check')):
751 os.remove(pjoin(dir_name,'check'))
752 try:
753 os.remove(pjoin(dir_name,'check_sa.o'))
754 os.remove(pjoin(dir_name,'loop_matrix.o'))
755 except OSError:
756 pass
757
758 devnull = open(os.devnull, 'w')
759 retcode = subprocess.call(['make','check'],
760 cwd=dir_name, stdout=devnull, stderr=devnull)
761 devnull.close()
762
763 if retcode != 0:
764 logging.info("Error while executing make in %s" % shell_name)
765 return ((0.0, 0.0, 0.0, 0.0, 0), [])
766
767
768 if PSpoint:
769 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint)
770
771
772 if not PS_name is None:
773 misc.write_PS_input(pjoin(dir_name, \
774 'PS.input_%s'%PS_name),PSpoint)
775
776 try:
777 output = subprocess.Popen('./check',
778 cwd=dir_name,
779 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout
780 output.read()
781 output.close()
782 if os.path.exists(pjoin(dir_name,'result.dat')):
783 return cls.parse_check_output(file(pjoin(dir_name,\
784 'result.dat')),format=format)
785 else:
786 logging.warning("Error while looking for file %s"%str(os.path\
787 .join(dir_name,'result.dat')))
788 return ((0.0, 0.0, 0.0, 0.0, 0), [])
789 except IOError:
790 logging.warning("Error while executing ./check in %s" % shell_name)
791 return ((0.0, 0.0, 0.0, 0.0, 0), [])
792
793 @classmethod
795 """Parse the output string and return a pair where first four values are
796 the finite, born, single and double pole of the ME and the fourth is the
797 GeV exponent and the second value is a list of 4 momenta for all particles
798 involved. Return the answer in two possible formats, 'tuple' or 'dict'."""
799
800 res_dict = {'res_p':[],
801 'born':0.0,
802 'finite':0.0,
803 '1eps':0.0,
804 '2eps':0.0,
805 'gev_pow':0,
806 'export_format':'Default',
807 'accuracy':0.0,
808 'return_code':0,
809 'Split_Orders_Names':[],
810 'Loop_SO_Results':[],
811 'Born_SO_Results':[],
812 'Born_kept':[],
813 'Loop_kept':[]
814 }
815 res_p = []
816
817
818
819 if isinstance(output,file) or isinstance(output,list):
820 text=output
821 elif isinstance(output,str):
822 text=output.split('\n')
823 else:
824 raise MadGraph5Error, 'Type for argument output not supported in'+\
825 ' parse_check_output.'
826 for line in text:
827 splitline=line.split()
828 if len(splitline)==0:
829 continue
830 elif splitline[0]=='PS':
831 res_p.append([float(s) for s in splitline[1:]])
832 elif splitline[0]=='ASO2PI':
833 res_dict['alphaS_over_2pi']=float(splitline[1])
834 elif splitline[0]=='BORN':
835 res_dict['born']=float(splitline[1])
836 elif splitline[0]=='FIN':
837 res_dict['finite']=float(splitline[1])
838 elif splitline[0]=='1EPS':
839 res_dict['1eps']=float(splitline[1])
840 elif splitline[0]=='2EPS':
841 res_dict['2eps']=float(splitline[1])
842 elif splitline[0]=='EXP':
843 res_dict['gev_pow']=int(splitline[1])
844 elif splitline[0]=='Export_Format':
845 res_dict['export_format']=splitline[1]
846 elif splitline[0]=='ACC':
847 res_dict['accuracy']=float(splitline[1])
848 elif splitline[0]=='RETCODE':
849 res_dict['return_code']=int(splitline[1])
850 elif splitline[0]=='Split_Orders_Names':
851 res_dict['Split_Orders_Names']=splitline[1:]
852 elif splitline[0] in ['Born_kept', 'Loop_kept']:
853 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]]
854 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']:
855
856
857
858
859 res_dict[splitline[0]].append(\
860 ([int(el) for el in splitline[1:]],{}))
861 elif splitline[0]=='SO_Loop':
862 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\
863 float(splitline[2])
864 elif splitline[0]=='SO_Born':
865 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\
866 float(splitline[2])
867
868 res_dict['res_p'] = res_p
869
870 if format=='tuple':
871 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'],
872 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p'])
873 else:
874 return res_dict
875
876 @staticmethod
878 """ Changes the file model_functions.f in the SOURCE of the process output
879 so as to change how logarithms are analytically continued and see how
880 it impacts the CMS check."""
881 valid_modes = ['default','recompile']
882 if not (mode in valid_modes or (isinstance(mode, list) and
883 len(mode)==2 and all(m in ['logp','logm','log'] for m in mode))):
884 raise MadGraph5Error("Mode '%s' not reckonized"%mode+
885 " in function apply_log_tweak.")
886
887 model_path = pjoin(proc_path,'Source','MODEL')
888 directories = glob.glob(pjoin(proc_path,'SubProcesses','P0_*'))
889 if directories and os.path.isdir(directories[0]):
890 exe_path = directories[0]
891 else:
892 raise MadGraph5Error, 'Could not find a process executable '+\
893 'directory in %s'%proc_dir
894 bu_path = pjoin(model_path, 'model_functions.f__backUp__')
895
896 if mode=='default':
897
898 if not os.path.isfile(bu_path):
899 raise MadGraph5Error, 'Back up file %s could not be found.'%bu_path
900 shutil.move(bu_path, pjoin(model_path, 'model_functions.f'))
901 return
902
903 if mode=='recompile':
904 try:
905 os.remove(pjoin(model_path,'model_functions.o'))
906 os.remove(pjoin(proc_path,'lib','libmodel.a'))
907 except:
908 pass
909 misc.compile(cwd=model_path)
910
911 try:
912 os.remove(pjoin(exe_path,'check'))
913 except:
914 pass
915 misc.compile(arg=['check'], cwd=exe_path)
916 return
917
918 if mode[0]==mode[1]:
919 return
920
921
922 mp_prefix = 'MP_'
923 target_line = 'FUNCTION %%sREG%s(ARG)'%mode[0].lower()
924
925
926 if not os.path.isfile(bu_path):
927 shutil.copy(pjoin(model_path, 'model_functions.f'), bu_path)
928 model_functions = open(pjoin(model_path,'model_functions.f'),'r')
929
930 new_model_functions = []
931 has_replaced = False
932 just_replaced = False
933 find_one_replacement= False
934 mp_mode = None
935 suffix = {'log':'','logp':r'\s*\+\s*TWOPII','logm':r'\s*\-\s*TWOPII'}
936 replace_regex=r'^\s*%%sREG%s\s*=\s*LOG\(ARG\)%s'%(mode[0],suffix[mode[0]])
937 for line in model_functions:
938
939 if just_replaced:
940 if not re.match(r'\s{6}', line):
941 continue
942 else:
943 just_replaced = False
944 if mp_mode is None:
945
946 new_model_functions.append(line)
947 if (target_line%mp_prefix).lower() in line.lower():
948 mp_mode = mp_prefix
949 elif (target_line%'').lower() in line.lower():
950 mp_mode = ''
951 else:
952
953 if not has_replaced and re.match(replace_regex%mp_mode,line,
954 re.IGNORECASE):
955
956 if mode[0]=='log':
957 if mp_mode=='':
958 new_line =\
959 """ if(dble(arg).lt.0.0d0.and.dimag(arg).gt.0.0d0)then
960 reg%s=log(arg) %s TWOPII
961 else
962 reg%s=log(arg)
963 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
964 else:
965 new_line =\
966 """ if(real(arg,kind=16).lt.0.0e0_16.and.imagpart(arg).lt.0.0e0_16)then
967 mp_reg%s=log(arg) %s TWOPII
968 else
969 mp_reg%s=log(arg)
970 endif\n"""%(mode[0],'+' if mode[1]=='logp' else '-',mode[0])
971 else:
972 new_line = ' '*6+"%sreg%s=log(arg) %s\n"%(mp_mode,mode[0],
973 ('' if mode[1]=='log' else ('+TWOPII' if mode[1]=='logp' else '-TWOPII')))
974 new_model_functions.append(new_line)
975 just_replaced = True
976 has_replaced = True
977 find_one_replacement = True
978 else:
979 new_model_functions.append(line)
980 if re.match(r'^\s*END\s*$',line,re.IGNORECASE):
981 mp_mode = None
982 has_replaced = False
983
984 if not find_one_replacement:
985 logger.warning('No replacement was found/performed for token '+
986 "'%s->%s'."%(mode[0],mode[1]))
987 else:
988 open(pjoin(model_path,'model_functions.f'),'w').\
989 write(''.join(new_model_functions))
990 return
991
993 """ Modify loop_matrix.f so to have one external massless gauge boson
994 polarization vector turned into its momentum. It is not a pretty and
995 flexible solution but it works for this particular case."""
996
997 shell_name = None
998 directories = glob.glob(pjoin(working_dir,'P0_*'))
999 if directories and os.path.isdir(directories[0]):
1000 shell_name = os.path.basename(directories[0])
1001
1002 dir_name = pjoin(working_dir, shell_name)
1003
1004
1005 ind=0
1006 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name,
1007 file_names[ind])):
1008 ind += 1
1009 if ind==len(file_names):
1010 raise Exception, "No helas calls output file found."
1011
1012 helas_file_name=pjoin(dir_name,file_names[ind])
1013 file = open(pjoin(dir_name,helas_file_name), 'r')
1014
1015 helas_calls_out=""
1016 original_file=""
1017 gaugeVectorRegExp=re.compile(\
1018 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+
1019 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)")
1020 foundGauge=False
1021
1022 for line in file:
1023 helas_calls_out+=line
1024 original_file+=line
1025 if line.find("INCLUDE 'coupl.inc'") != -1 or \
1026 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1:
1027 helas_calls_out+=" INTEGER WARDINT\n"
1028 if not foundGauge:
1029 res=gaugeVectorRegExp.search(line)
1030 if res!=None:
1031 foundGauge=True
1032 helas_calls_out+=" DO WARDINT=1,4\n"
1033 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")="
1034 if not mp:
1035 helas_calls_out+=\
1036 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n"
1037 else:
1038 helas_calls_out+="CMPLX(P(WARDINT-1,"+\
1039 res.group('p_id')+"),0.0E0_16,KIND=16)\n"
1040 helas_calls_out+=" ENDDO\n"
1041 file.close()
1042
1043 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
1044
1049 """Class taking care of matrix element evaluation and running timing for
1050 loop processes."""
1051
1055
1056 @classmethod
1058 """ Return a dictionary of the parameter of the MadLoopParamCard.
1059 The key is the name of the parameter and the value is the corresponding
1060 string read from the card."""
1061
1062 return bannermod.MadLoopParam(MLCardPath)
1063
1064
1065 @classmethod
1067 """ Set the parameters in MadLoopParamCard to the values specified in
1068 the dictionary params.
1069 The key is the name of the parameter and the value is the corresponding
1070 string to write in the card."""
1071
1072 MLcard = bannermod.MadLoopParam(MLCardPath)
1073 for key,value in params.items():
1074 MLcard.set(key, value, ifnotdefault=False)
1075
1076 MLcard.write(MLCardPath, commentdefault=True)
1077
1079 """ Edit loop_matrix.f in order to skip the loop evaluation phase.
1080 Notice this only affects the double precision evaluation which is
1081 normally fine as we do not make the timing check on mp."""
1082
1083 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1084 loop_matrix = file.read()
1085 file.close()
1086
1087 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1088 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.'
1089 if skip else '.FALSE.'), loop_matrix)
1090 file.write(loop_matrix)
1091 file.close()
1092
1094 """ Edit loop_matrix.f in order to set the flag which stops the
1095 execution after booting the program (i.e. reading the color data)."""
1096
1097 file = open(pjoin(dir_name,'loop_matrix.f'), 'r')
1098 loop_matrix = file.read()
1099 file.close()
1100
1101 file = open(pjoin(dir_name,'loop_matrix.f'), 'w')
1102 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.'
1103 if bootandstop else '.FALSE.'), loop_matrix)
1104 file.write(loop_matrix)
1105 file.close()
1106
1107 - def setup_process(self, matrix_element, export_dir, reusing = False,
1108 param_card = None, MLOptions={},clean=True):
1109 """ Output the matrix_element in argument and perform the initialization
1110 while providing some details about the output in the dictionary returned.
1111 Returns None if anything fails"""
1112
1113 infos={'Process_output': None,
1114 'HELAS_MODEL_compilation' : None,
1115 'dir_path' : None,
1116 'Initialization' : None,
1117 'Process_compilation' : None}
1118
1119 if not reusing and clean:
1120 if os.path.isdir(export_dir):
1121 clean_up(self.output_path)
1122 if os.path.isdir(export_dir):
1123 raise InvalidCmd(\
1124 "The directory %s already exist. Please remove it."\
1125 %str(export_dir))
1126 else:
1127 if not os.path.isdir(export_dir):
1128 raise InvalidCmd(\
1129 "Could not find the directory %s to reuse."%str(export_dir))
1130
1131
1132 if not reusing and clean:
1133 model = matrix_element['processes'][0].get('model')
1134
1135
1136 import madgraph.loop.loop_exporters as loop_exporters
1137 if self.loop_optimized_output:
1138 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA
1139 else:
1140 exporter_class=loop_exporters.LoopProcessExporterFortranSA
1141
1142 MLoptions = {'clean': True,
1143 'complex_mass': self.cmass_scheme,
1144 'export_format':'madloop',
1145 'mp':True,
1146 'SubProc_prefix':'P',
1147 'compute_color_flows':not matrix_element['processes'][0].get('has_born'),
1148 'loop_dir': pjoin(self.mg_root,'Template','loop_material'),
1149 'cuttools_dir': self.cuttools_dir,
1150 'fortran_compiler':self.cmd.options['fortran_compiler'],
1151 'output_dependencies':self.cmd.options['output_dependencies']}
1152
1153 MLoptions.update(self.tir_dir)
1154
1155 start=time.time()
1156 FortranExporter = exporter_class(self.mg_root, export_dir, MLoptions)
1157 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model)
1158 FortranExporter.copy_v4template(modelname=model.get('name'))
1159 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel)
1160 wanted_lorentz = list(set(matrix_element.get_used_lorentz()))
1161 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \
1162 for c in l]))
1163 FortranExporter.convert_model_to_mg4(self.full_model,wanted_lorentz,wanted_couplings)
1164 infos['Process_output'] = time.time()-start
1165 start=time.time()
1166 FortranExporter.finalize_v4_directory(None,"",False,False,compiler=
1167 {'fortran':self.cmd.options['fortran_compiler'],
1168 'f2py':self.cmd.options['fortran_compiler'],
1169 'cpp':self.cmd.options['fortran_compiler']})
1170 infos['HELAS_MODEL_compilation'] = time.time()-start
1171
1172
1173 if param_card != None:
1174 if isinstance(param_card, str):
1175 cp(pjoin(param_card),\
1176 pjoin(export_dir,'Cards','param_card.dat'))
1177 else:
1178 param_card.write(pjoin(export_dir,'Cards','param_card.dat'))
1179
1180
1181
1182 MadLoopInitializer.fix_PSPoint_in_check(
1183 pjoin(export_dir,'SubProcesses'), read_ps = False, npoints = 4)
1184
1185 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'),
1186 mp = False, loop_filter = True,MLOptions=MLOptions)
1187
1188 shell_name = None
1189 directories = glob.glob(pjoin(export_dir, 'SubProcesses','P0_*'))
1190 if directories and os.path.isdir(directories[0]):
1191 shell_name = os.path.basename(directories[0])
1192 dir_name = pjoin(export_dir, 'SubProcesses', shell_name)
1193 infos['dir_path']=dir_name
1194
1195
1196
1197 if not MadLoopInitializer.need_MadLoopInit(
1198 export_dir, subproc_prefix='P'):
1199 return infos
1200
1201 attempts = [3,15]
1202
1203 try:
1204 os.remove(pjoin(dir_name,'check'))
1205 os.remove(pjoin(dir_name,'check_sa.o'))
1206 except OSError:
1207 pass
1208
1209 nPS_necessary = MadLoopInitializer.run_initialization(dir_name,
1210 pjoin(export_dir,'SubProcesses'),infos,\
1211 req_files = ['HelFilter.dat','LoopFilter.dat'],
1212 attempts = attempts)
1213 if attempts is None:
1214 logger.error("Could not compile the process %s,"%shell_name+\
1215 " try to generate it via the 'generate' command.")
1216 return None
1217 if nPS_necessary is None:
1218 logger.error("Could not initialize the process %s"%shell_name+\
1219 " with %s PS points."%max(attempts))
1220 return None
1221 elif nPS_necessary > min(attempts):
1222 logger.warning("Could not initialize the process %s"%shell_name+\
1223 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary))
1224
1225 return infos
1226
1227 - def time_matrix_element(self, matrix_element, reusing = False,
1228 param_card = None, keep_folder = False, options=None,
1229 MLOptions = {}):
1230 """ Output the matrix_element in argument and give detail information
1231 about the timing for its output and running"""
1232
1233
1234
1235 make_it_quick=False
1236
1237 if options and 'split_orders' in options.keys():
1238 split_orders = options['split_orders']
1239 else:
1240 split_orders = -1
1241
1242 assert ((not reusing and isinstance(matrix_element, \
1243 helas_objects.HelasMatrixElement)) or (reusing and
1244 isinstance(matrix_element, base_objects.Process)))
1245 if not reusing:
1246 proc_name = matrix_element['processes'][0].shell_string()[2:]
1247 else:
1248 proc_name = matrix_element.shell_string()[2:]
1249
1250 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\
1251 temp_dir_prefix+"_%s"%proc_name)
1252
1253 res_timings = self.setup_process(matrix_element,export_dir, \
1254 reusing, param_card,MLOptions = MLOptions,clean=True)
1255
1256 if res_timings == None:
1257 return None
1258 dir_name=res_timings['dir_path']
1259
1260 def check_disk_usage(path):
1261 return subprocess.Popen("du -shc -L "+str(path), \
1262 stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
1263
1264
1265
1266
1267 res_timings['du_source']=check_disk_usage(pjoin(\
1268 export_dir,'Source','*','*.f'))
1269 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f'))
1270 res_timings['du_color']=check_disk_usage(pjoin(dir_name,
1271 'MadLoop5_resources','*.dat'))
1272 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check'))
1273
1274 if not res_timings['Initialization']==None:
1275 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0
1276 elif make_it_quick:
1277 time_per_ps_estimate = -1.0
1278 else:
1279
1280
1281 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1282 read_ps = False, npoints = 3, hel_config = -1,
1283 split_orders=split_orders)
1284 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1285 time_per_ps_estimate = run_time/3.0
1286
1287 self.boot_time_setup(dir_name,bootandstop=True)
1288 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1289 res_timings['Booting_time'] = run_time
1290 self.boot_time_setup(dir_name,bootandstop=False)
1291
1292
1293 contributing_hel=0
1294 n_contrib_hel=0
1295 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r')
1296 proc_prefix = proc_prefix_file.read()
1297 proc_prefix_file.close()
1298 helicities = file(pjoin(dir_name,'MadLoop5_resources',
1299 '%sHelFilter.dat'%proc_prefix)).read().split()
1300 for i, hel in enumerate(helicities):
1301 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T':
1302 if contributing_hel==0:
1303 contributing_hel=i+1
1304 n_contrib_hel += 1
1305
1306 if contributing_hel==0:
1307 logger.error("Could not find a contributing helicity "+\
1308 "configuration for process %s."%proc_name)
1309 return None
1310
1311 res_timings['n_contrib_hel']=n_contrib_hel
1312 res_timings['n_tot_hel']=len(helicities)
1313
1314
1315 if not make_it_quick:
1316 target_pspoints_number = max(int(30.0/time_per_ps_estimate)+1,50)
1317 else:
1318 target_pspoints_number = 10
1319
1320 logger.info("Checking timing for process %s "%proc_name+\
1321 "with %d PS points."%target_pspoints_number)
1322
1323 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1324 read_ps = False, npoints = target_pspoints_number*2, \
1325 hel_config = contributing_hel, split_orders=split_orders)
1326 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1327
1328 if compile_time == None: return None
1329
1330 res_timings['run_polarized_total']=\
1331 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1332
1333 if make_it_quick:
1334 res_timings['run_unpolarized_total'] = 1.0
1335 res_timings['ram_usage'] = 0.0
1336 else:
1337 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1338 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1339 split_orders=split_orders)
1340 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name,
1341 checkRam=True)
1342
1343 if compile_time == None: return None
1344 res_timings['run_unpolarized_total']=\
1345 (run_time-res_timings['Booting_time'])/target_pspoints_number
1346 res_timings['ram_usage'] = ram_usage
1347
1348 if not self.loop_optimized_output:
1349 return res_timings
1350
1351
1352
1353
1354
1355 self.skip_loop_evaluation_setup(dir_name,skip=True)
1356
1357 if make_it_quick:
1358 res_timings['run_unpolarized_coefs'] = 1.0
1359 else:
1360 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1361 read_ps = False, npoints = target_pspoints_number, hel_config = -1,
1362 split_orders=split_orders)
1363 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1364 if compile_time == None: return None
1365 res_timings['run_unpolarized_coefs']=\
1366 (run_time-res_timings['Booting_time'])/target_pspoints_number
1367
1368 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1369 read_ps = False, npoints = target_pspoints_number*2, \
1370 hel_config = contributing_hel, split_orders=split_orders)
1371 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name)
1372 if compile_time == None: return None
1373 res_timings['run_polarized_coefs']=\
1374 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2)
1375
1376
1377 self.skip_loop_evaluation_setup(dir_name,skip=False)
1378
1379 return res_timings
1380
1381
1382
1383
1384
1385 - def check_matrix_element_stability(self, matrix_element,options=None,
1386 infos_IN = None, param_card = None, keep_folder = False,
1387 MLOptions = {}):
1388 """ Output the matrix_element in argument, run in for nPoints and return
1389 a dictionary containing the stability information on each of these points.
1390 If infos are provided, then the matrix element output is skipped and
1391 reused from a previous run and the content of infos.
1392 """
1393
1394 if not options:
1395 reusing = False
1396 nPoints = 100
1397 split_orders = -1
1398 else:
1399 reusing = options['reuse']
1400 nPoints = options['npoints']
1401 split_orders = options['split_orders']
1402
1403 assert ((not reusing and isinstance(matrix_element, \
1404 helas_objects.HelasMatrixElement)) or (reusing and
1405 isinstance(matrix_element, base_objects.Process)))
1406
1407
1408 def format_PS_point(ps, rotation=0):
1409 """ Write out the specified PS point to the file dir_path/PS.input
1410 while rotating it if rotation!=0. We consider only rotations of 90
1411 but one could think of having rotation of arbitrary angle too.
1412 The first two possibilities, 1 and 2 are a rotation and boost
1413 along the z-axis so that improve_ps can still work.
1414 rotation=0 => No rotation
1415 rotation=1 => Z-axis pi/2 rotation
1416 rotation=2 => Z-axis pi/4 rotation
1417 rotation=3 => Z-axis boost
1418 rotation=4 => (x'=z,y'=-x,z'=-y)
1419 rotation=5 => (x'=-z,y'=y,z'=x)"""
1420 if rotation==0:
1421 p_out=copy.copy(ps)
1422 elif rotation==1:
1423 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps]
1424 elif rotation==2:
1425 sq2 = math.sqrt(2.0)
1426 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps]
1427 elif rotation==3:
1428 p_out = boost_momenta(ps, 3)
1429
1430
1431 elif rotation==4:
1432 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps]
1433 elif rotation==5:
1434 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps]
1435 else:
1436 raise MadGraph5Error("Rotation id %i not implemented"%rotation)
1437
1438 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1439
1440 def pick_PS_point(proc, options):
1441 """ Randomly generate a PS point and make sure it is eligible. Then
1442 return it. Users can edit the cuts here if they want."""
1443
1444 p, w_rambo = self.get_momenta(proc, options)
1445 if options['events']:
1446 return p
1447
1448 while (not MatrixElementEvaluator.pass_isolation_cuts(p) and len(p)>3):
1449 p, w_rambo = self.get_momenta(proc, options)
1450
1451
1452
1453
1454 if len(p)==3:
1455 p = boost_momenta(p,3,random.uniform(0.0,0.99))
1456 return p
1457
1458
1459
1460
1461 accuracy_threshold=1.0e-1
1462
1463
1464
1465 num_rotations = 1
1466
1467 if "MLReductionLib" not in MLOptions:
1468 tools=[1]
1469 else:
1470 tools=MLOptions["MLReductionLib"]
1471 tools=list(set(tools))
1472
1473
1474 tool_var={'pjfry':2,'golem':4,'samurai':5,'ninja':6}
1475 for tool in ['pjfry','golem','samurai','ninja']:
1476 tool_dir='%s_dir'%tool
1477 if not tool_dir in self.tir_dir:
1478 continue
1479 tool_libpath=self.tir_dir[tool_dir]
1480 tool_libname="lib%s.a"%tool
1481 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \
1482 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))):
1483 if tool_var[tool] in tools:
1484 tools.remove(tool_var[tool])
1485 if not tools:
1486 return None
1487
1488
1489 if not reusing:
1490 process = matrix_element['processes'][0]
1491 else:
1492 process = matrix_element
1493 proc_name = process.shell_string()[2:]
1494 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\
1495 temp_dir_prefix+"_%s"%proc_name)
1496
1497 tools_name={1:'CutTools',2:'PJFry++',3:'IREGI',4:'Golem95',5:'Samurai',
1498 6:'Ninja'}
1499 return_dict={}
1500 return_dict['Stability']={}
1501 infos_save={'Process_output': None,
1502 'HELAS_MODEL_compilation' : None,
1503 'dir_path' : None,
1504 'Initialization' : None,
1505 'Process_compilation' : None}
1506
1507 for tool in tools:
1508 tool_name=tools_name[tool]
1509
1510
1511
1512
1513
1514 DP_stability = []
1515 QP_stability = []
1516
1517 Unstable_PS_points = []
1518
1519 Exceptional_PS_points = []
1520
1521 MLoptions={}
1522 MLoptions["MLReductionLib"]=tool
1523 clean = (tool==tools[0]) and not nPoints==0
1524 if infos_IN==None or (tool_name not in infos_IN):
1525 infos=infos_IN
1526 else:
1527 infos=infos_IN[tool_name]
1528
1529 if not infos:
1530 infos = self.setup_process(matrix_element,export_dir, \
1531 reusing, param_card,MLoptions,clean)
1532 if not infos:
1533 return None
1534
1535 if clean:
1536 infos_save['Process_output']=infos['Process_output']
1537 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation']
1538 infos_save['dir_path']=infos['dir_path']
1539 infos_save['Process_compilation']=infos['Process_compilation']
1540 else:
1541 if not infos['Process_output']:
1542 infos['Process_output']=infos_save['Process_output']
1543 if not infos['HELAS_MODEL_compilation']:
1544 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation']
1545 if not infos['dir_path']:
1546 infos['dir_path']=infos_save['dir_path']
1547 if not infos['Process_compilation']:
1548 infos['Process_compilation']=infos_save['Process_compilation']
1549
1550 dir_path=infos['dir_path']
1551
1552
1553 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool]
1554 data_i = 0
1555
1556 if reusing:
1557
1558 data_i=0
1559 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))):
1560 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i))
1561 saved_run = save_load_object.load_from_file(pickle_path)
1562 if data_i>0:
1563 logger.info("Loading additional data stored in %s."%
1564 str(pickle_path))
1565 logger.info("Loaded data moved to %s."%str(pjoin(
1566 dir_path,'LOADED_'+savefile%('_%d'%data_i))))
1567 shutil.move(pickle_path,
1568 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i)))
1569 DP_stability.extend(saved_run['DP_stability'])
1570 QP_stability.extend(saved_run['QP_stability'])
1571 Unstable_PS_points.extend(saved_run['Unstable_PS_points'])
1572 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points'])
1573 data_i += 1
1574
1575 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability,
1576 'QP_stability':QP_stability,
1577 'Unstable_PS_points':Unstable_PS_points,
1578 'Exceptional_PS_points':Exceptional_PS_points}
1579
1580 if nPoints==0:
1581 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0:
1582
1583 if data_i>1:
1584 save_load_object.save_to_file(pjoin(dir_path,
1585 savefile%'_0'),return_dict['Stability'][tool_name])
1586 continue
1587 else:
1588 logger.info("ERROR: Not reusing a directory or any pickled"+
1589 " result for tool %s and the number"%tool_name+\
1590 " of point for the check is zero.")
1591 return None
1592
1593 logger.info("Checking stability of process %s "%proc_name+\
1594 "with %d PS points by %s."%(nPoints,tool_name))
1595 if infos['Initialization'] != None:
1596 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0
1597 sec_needed = int(time_per_ps_estimate*nPoints*4)
1598 else:
1599 sec_needed = 0
1600
1601 progress_bar = None
1602 time_info = False
1603 if sec_needed>5:
1604 time_info = True
1605 logger.info("This check should take about "+\
1606 "%s to run. Started on %s."%(\
1607 str(datetime.timedelta(seconds=sec_needed)),\
1608 datetime.datetime.now().strftime("%d-%m-%Y %H:%M")))
1609 if logger.getEffectiveLevel()<logging.WARNING and \
1610 (sec_needed>5 or (reusing and infos['Initialization'] == None)):
1611 widgets = ['Stability check:', pbar.Percentage(), ' ',
1612 pbar.Bar(),' ', pbar.ETA(), ' ']
1613 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints,
1614 fd=sys.stdout)
1615 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'),
1616 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders)
1617
1618
1619
1620 try:
1621 os.remove(pjoin(dir_path,'check'))
1622 os.remove(pjoin(dir_path,'check_sa.o'))
1623 except OSError:
1624 pass
1625
1626 devnull = open(os.devnull, 'w')
1627 retcode = subprocess.call(['make','check'],
1628 cwd=dir_path, stdout=devnull, stderr=devnull)
1629 devnull.close()
1630 if retcode != 0:
1631 logging.info("Error while executing make in %s" % dir_path)
1632 return None
1633
1634
1635
1636
1637 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')):
1638
1639
1640 if os.path.isfile(pjoin(dir_path,'born_matrix.f')):
1641 checkerName = 'StabilityCheckDriver.f'
1642 else:
1643 checkerName = 'StabilityCheckDriver_loop_induced.f'
1644
1645 with open(pjoin(self.mg_root,'Template','loop_material','Checks',
1646 checkerName),'r') as checkerFile:
1647 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix:
1648 checkerToWrite = checkerFile.read()%{'proc_prefix':
1649 proc_prefix.read()}
1650 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w')
1651 checkerFile.write(checkerToWrite)
1652 checkerFile.close()
1653
1654
1655
1656
1657
1658 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')):
1659 os.remove(pjoin(dir_path,'StabilityCheckDriver'))
1660 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')):
1661 os.remove(pjoin(dir_path,'loop_matrix.o'))
1662 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \
1663 mode='fortran', job_specs = False)
1664
1665
1666
1667
1668 if len(process['legs'])==3:
1669 self.fix_MadLoopParamCard(dir_path, mp=False,
1670 loop_filter=False, DoubleCheckHelicityFilter=True)
1671
1672 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')],
1673 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
1674 cwd=dir_path)
1675 start_index = len(DP_stability)
1676 if progress_bar!=None:
1677 progress_bar.start()
1678
1679
1680 interrupted = False
1681
1682
1683 retry = 0
1684
1685 i=start_index
1686 if options and 'events' in options and options['events']:
1687
1688 import MadSpin.decay as madspin
1689 fsock = open(options['events'])
1690 self.event_file = madspin.Event(fsock)
1691 while i<(start_index+nPoints):
1692
1693 qp_dict={}
1694 dp_dict={}
1695 UPS = None
1696 EPS = None
1697
1698 if retry==0:
1699 p = pick_PS_point(process, options)
1700
1701 try:
1702 if progress_bar!=None:
1703 progress_bar.update(i+1-start_index)
1704
1705 PSPoint = format_PS_point(p,0)
1706 dp_res=[]
1707 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1708 split_orders=split_orders))
1709 dp_dict['CTModeA']=dp_res[-1]
1710 dp_res.append(self.get_me_value(StabChecker,PSPoint,2,
1711 split_orders=split_orders))
1712 dp_dict['CTModeB']=dp_res[-1]
1713 for rotation in range(1,num_rotations+1):
1714 PSPoint = format_PS_point(p,rotation)
1715 dp_res.append(self.get_me_value(StabChecker,PSPoint,1,
1716 split_orders=split_orders))
1717 dp_dict['Rotation%i'%rotation]=dp_res[-1]
1718
1719 if any([not res for res in dp_res]):
1720 return None
1721 dp_accuracy =((max(dp_res)-min(dp_res))/
1722 abs(sum(dp_res)/len(dp_res)))
1723 dp_dict['Accuracy'] = dp_accuracy
1724 if dp_accuracy>accuracy_threshold:
1725 if tool in [1,6]:
1726
1727 UPS = [i,p]
1728 qp_res=[]
1729 PSPoint = format_PS_point(p,0)
1730 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1731 split_orders=split_orders))
1732 qp_dict['CTModeA']=qp_res[-1]
1733 qp_res.append(self.get_me_value(StabChecker,PSPoint,5,
1734 split_orders=split_orders))
1735 qp_dict['CTModeB']=qp_res[-1]
1736 for rotation in range(1,num_rotations+1):
1737 PSPoint = format_PS_point(p,rotation)
1738 qp_res.append(self.get_me_value(StabChecker,PSPoint,4,
1739 split_orders=split_orders))
1740 qp_dict['Rotation%i'%rotation]=qp_res[-1]
1741
1742 if any([not res for res in qp_res]):
1743 return None
1744
1745 qp_accuracy = ((max(qp_res)-min(qp_res))/
1746 abs(sum(qp_res)/len(qp_res)))
1747 qp_dict['Accuracy']=qp_accuracy
1748 if qp_accuracy>accuracy_threshold:
1749 EPS = [i,p]
1750 else:
1751
1752
1753 UPS = [i,p]
1754
1755 except KeyboardInterrupt:
1756 interrupted = True
1757 break
1758 except IOError, e:
1759 if e.errno == errno.EINTR:
1760 if retry==100:
1761 logger.error("Failed hundred times consecutively because"+
1762 " of system call interruptions.")
1763 raise
1764 else:
1765 logger.debug("Recovered from a system call interruption."+\
1766 "PSpoint #%i, Attempt #%i."%(i,retry+1))
1767
1768 time.sleep(0.5)
1769
1770 retry = retry+1
1771
1772 try:
1773 StabChecker.kill()
1774 except Exception:
1775 pass
1776 StabChecker = subprocess.Popen(\
1777 [pjoin(dir_path,'StabilityCheckDriver')],
1778 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1779 stderr=subprocess.PIPE, cwd=dir_path)
1780 continue
1781 else:
1782 raise
1783
1784
1785
1786 retry = 0
1787
1788 i=i+1
1789
1790
1791 DP_stability.append(dp_dict)
1792 QP_stability.append(qp_dict)
1793 if not EPS is None:
1794 Exceptional_PS_points.append(EPS)
1795 if not UPS is None:
1796 Unstable_PS_points.append(UPS)
1797
1798 if progress_bar!=None:
1799 progress_bar.finish()
1800 if time_info:
1801 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\
1802 "%d-%m-%Y %H:%M"))
1803
1804
1805 if not interrupted:
1806 StabChecker.stdin.write('y\n')
1807 else:
1808 StabChecker.kill()
1809
1810
1811
1812
1813
1814
1815
1816 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\
1817 return_dict['Stability'][tool_name])
1818
1819 if interrupted:
1820 break
1821
1822 return_dict['Process'] = matrix_element.get('processes')[0] if not \
1823 reusing else matrix_element
1824 return return_dict
1825
1826 @classmethod
1827 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0,
1828 split_orders=-1):
1829 """ This version of get_me_value is simplified for the purpose of this
1830 class. No compilation is necessary. The CT mode can be specified."""
1831
1832
1833 StabChecker.stdin.write('\x1a')
1834 StabChecker.stdin.write('1\n')
1835 StabChecker.stdin.write('%d\n'%mode)
1836 StabChecker.stdin.write('%s\n'%PSpoint)
1837 StabChecker.stdin.write('%.16E\n'%mu_r)
1838 StabChecker.stdin.write('%d\n'%hel)
1839 StabChecker.stdin.write('%d\n'%split_orders)
1840
1841 try:
1842 while True:
1843 output = StabChecker.stdout.readline()
1844 if output != '':
1845 last_non_empty = output
1846 if output==' ##TAG#RESULT_START#TAG##\n':
1847 break
1848
1849 ret_code = StabChecker.poll()
1850 if not ret_code is None:
1851 output = StabChecker.stdout.readline()
1852 if output != '':
1853 last_non_empty = output
1854 error = StabChecker.stderr.readline()
1855 raise MadGraph5Error, \
1856 "The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1857 (ret_code, last_non_empty, error)
1858
1859 res = ""
1860 while True:
1861 output = StabChecker.stdout.readline()
1862 if output != '':
1863 last_non_empty = output
1864 if output==' ##TAG#RESULT_STOP#TAG##\n':
1865 break
1866 else:
1867 res += output
1868 ret_code = StabChecker.poll()
1869 if not ret_code is None:
1870 output = StabChecker.stdout.readline()
1871 if output != '':
1872 last_non_empty = output
1873 error = StabChecker.stderr.readline()
1874 raise MadGraph5Error, \
1875 "The MadLoop stability checker crashed with return code = %d, and last output:\n\nstdout: %s\nstderr: %s\n"%\
1876 (ret_code, last_non_empty, error)
1877
1878 return cls.parse_check_output(res,format='tuple')[0][0]
1879 except IOError as e:
1880 logging.warning("Error while running MadLoop. Exception = %s"%str(e))
1881 raise e
1882
1885 """ Perform a python evaluation of the matrix element independently for
1886 all possible helicity configurations for a fixed number of points N and
1887 returns the average for each in the format [[hel_config, eval],...].
1888 This is used to determine what are the vanishing and dependent helicity
1889 configurations at generation time and accordingly setup the output.
1890 This is not yet implemented at LO."""
1891
1892
1893 assert isinstance(process,base_objects.Process)
1894 assert process.get('perturbation_couplings')==[]
1895
1896 N_eval=50
1897
1898 evaluator = MatrixElementEvaluator(process.get('model'), param_card,
1899 auth_skipping = False, reuse = True)
1900
1901 amplitude = diagram_generation.Amplitude(process)
1902 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False)
1903
1904 cumulative_helEvals = []
1905
1906 for i in range(N_eval):
1907 p, w_rambo = evaluator.get_momenta(process)
1908 helEvals = evaluator.evaluate_matrix_element(\
1909 matrix_element, p = p, output = 'helEvals')['helEvals']
1910 if cumulative_helEvals==[]:
1911 cumulative_helEvals=copy.copy(helEvals)
1912 else:
1913 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \
1914 enumerate(cumulative_helEvals)]
1915
1916
1917 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals]
1918
1919
1920
1921 clean_added_globals(ADDED_GLOBAL)
1922
1923 return cumulative_helEvals
1924
1927 """A wrapper function for running an iteration of a function over
1928 a multiprocess, without having to first create a process list
1929 (which makes a big difference for very large multiprocesses.
1930 stored_quantities is a dictionary for any quantities that we want
1931 to reuse between runs."""
1932
1933 model = multiprocess.get('model')
1934 isids = [leg.get('ids') for leg in multiprocess.get('legs') \
1935 if not leg.get('state')]
1936 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \
1937 if leg.get('state')]
1938
1939 id_anti_id_dict = {}
1940 for id in set(tuple(sum(isids+fsids, []))):
1941 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code()
1942 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id
1943 sorted_ids = []
1944 results = []
1945 for is_prod in apply(itertools.product, isids):
1946 for fs_prod in apply(itertools.product, fsids):
1947
1948
1949 if check_already_checked(is_prod, fs_prod, sorted_ids,
1950 multiprocess, model, id_anti_id_dict):
1951 continue
1952
1953 process = multiprocess.get_process_with_legs(base_objects.LegList(\
1954 [base_objects.Leg({'id': id, 'state':False}) for \
1955 id in is_prod] + \
1956 [base_objects.Leg({'id': id, 'state':True}) for \
1957 id in fs_prod]))
1958
1959 if opt is not None:
1960 if isinstance(opt, dict):
1961 try:
1962 value = opt[process.base_string()]
1963 except Exception:
1964 continue
1965 result = function(process, stored_quantities, value, options=options)
1966 else:
1967 result = function(process, stored_quantities, opt, options=options)
1968 else:
1969 result = function(process, stored_quantities, options=options)
1970
1971 if result:
1972 results.append(result)
1973
1974 return results
1975
1976
1977
1978
1979
1980 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model,
1981 id_anti_id_dict = {}):
1982 """Check if process already checked, if so return True, otherwise add
1983 process and antiprocess to sorted_ids."""
1984
1985
1986 if id_anti_id_dict:
1987 is_ids = [id_anti_id_dict[id] for id in \
1988 is_ids]
1989 else:
1990 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \
1991 is_ids]
1992
1993 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \
1994 [process.get('id')])
1995
1996 if ids in sorted_ids:
1997
1998 return True
1999
2000
2001 sorted_ids.append(ids)
2002
2003
2004 return False
2005
2011 """ Generate a loop matrix element from the process definition, and returns
2012 it along with the timing information dictionary.
2013 If reuse is True, it reuses the already output directory if found.
2014 There is the possibility of specifying the proc_name."""
2015
2016 assert isinstance(process_definition,
2017 (base_objects.ProcessDefinition,base_objects.Process))
2018 assert process_definition.get('perturbation_couplings')!=[]
2019
2020 if isinstance(process_definition,base_objects.ProcessDefinition):
2021 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')):
2022 raise InvalidCmd("This check can only be performed on single "+
2023 " processes. (i.e. without multiparticle labels).")
2024
2025 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2026 if not leg.get('state')]
2027 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \
2028 if leg.get('state')]
2029
2030
2031 process = process_definition.get_process(isids,fsids)
2032 else:
2033 process = process_definition
2034
2035 if not output_path is None:
2036 root_path = output_path
2037 else:
2038 root_path = cmd._mgme_dir
2039
2040 timing = {'Diagrams_generation': None,
2041 'n_loops': None,
2042 'HelasDiagrams_generation': None,
2043 'n_loop_groups': None,
2044 'n_loop_wfs': None,
2045 'loop_wfs_ranks': None}
2046
2047 if proc_name:
2048 proc_dir = pjoin(root_path,proc_name)
2049 else:
2050 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%(
2051 '_'.join(process.shell_string().split('_')[1:])))
2052 if reuse and os.path.isdir(proc_dir):
2053 logger.info("Reusing directory %s"%str(proc_dir))
2054
2055 return timing, process
2056
2057 logger.info("Generating p%s"%process_definition.nice_string()[1:])
2058
2059 start=time.time()
2060 try:
2061 amplitude = loop_diagram_generation.LoopAmplitude(process,
2062 loop_filter=loop_filter)
2063 except InvalidCmd:
2064
2065
2066 return time.time()-start, None
2067 if not amplitude.get('diagrams'):
2068
2069 return time.time()-start, None
2070
2071
2072
2073 loop_optimized_output = cmd.options['loop_optimized_output']
2074 timing['Diagrams_generation']=time.time()-start
2075 timing['n_loops']=len(amplitude.get('loop_diagrams'))
2076 start=time.time()
2077
2078 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2079 optimized_output = loop_optimized_output,gen_color=True)
2080
2081
2082
2083 matrix_element.compute_all_analytic_information()
2084 timing['HelasDiagrams_generation']=time.time()-start
2085
2086 if loop_optimized_output:
2087 timing['n_loop_groups']=len(matrix_element.get('loop_groups'))
2088 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \
2089 ldiag.get('loop_wavefunctions')]
2090 timing['n_loop_wfs']=len(lwfs)
2091 timing['loop_wfs_ranks']=[]
2092 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \
2093 for l in lwfs])+1):
2094 timing['loop_wfs_ranks'].append(\
2095 len([1 for l in lwfs if \
2096 l.get_analytic_info('wavefunction_rank')==rank]))
2097
2098 return timing, matrix_element
2099
2100
2101
2102
2103 -def check_profile(process_definition, param_card = None,cuttools="",tir={},
2104 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
2105 """For a single loop process, check both its timings and then its stability
2106 in one go without regenerating it."""
2107
2108 if 'reuse' not in options:
2109 keep_folder=False
2110 else:
2111 keep_folder = options['reuse']
2112
2113 model=process_definition.get('model')
2114
2115 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2116 keep_folder,output_path=output_path,cmd=cmd)
2117 reusing = isinstance(matrix_element, base_objects.Process)
2118 options['reuse'] = reusing
2119 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2120 model=model, output_path=output_path, cmd=cmd)
2121
2122 if not myProfiler.loop_optimized_output:
2123 MLoptions={}
2124 else:
2125 MLoptions=MLOptions
2126
2127 timing2 = myProfiler.time_matrix_element(matrix_element, reusing,
2128 param_card, keep_folder=keep_folder,options=options,
2129 MLOptions = MLoptions)
2130
2131 if timing2 == None:
2132 return None, None
2133
2134
2135 timing = dict(timing1.items()+timing2.items())
2136 stability = myProfiler.check_matrix_element_stability(matrix_element,
2137 options=options, infos_IN=timing,param_card=param_card,
2138 keep_folder = keep_folder,
2139 MLOptions = MLoptions)
2140 if stability == None:
2141 return None, None
2142 else:
2143 timing['loop_optimized_output']=myProfiler.loop_optimized_output
2144 stability['loop_optimized_output']=myProfiler.loop_optimized_output
2145 return timing, stability
2146
2147
2148
2149
2150 -def check_stability(process_definition, param_card = None,cuttools="",tir={},
2151 options=None,nPoints=100, output_path=None,
2152 cmd = FakeInterface(), MLOptions = {}):
2153 """For a single loop process, give a detailed summary of the generation and
2154 execution timing."""
2155
2156 if "reuse" in options:
2157 reuse=options['reuse']
2158 else:
2159 reuse=False
2160
2161 reuse=options['reuse']
2162 keep_folder = reuse
2163 model=process_definition.get('model')
2164
2165 timing, matrix_element = generate_loop_matrix_element(process_definition,
2166 reuse, output_path=output_path, cmd=cmd)
2167 reusing = isinstance(matrix_element, base_objects.Process)
2168 options['reuse'] = reusing
2169 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
2170 output_path=output_path,model=model,cmd=cmd)
2171
2172 if not myStabilityChecker.loop_optimized_output:
2173 MLoptions = {}
2174 else:
2175 MLoptions = MLOptions
2176 if "MLReductionLib" not in MLOptions:
2177 MLoptions["MLReductionLib"] = []
2178 if cuttools:
2179 MLoptions["MLReductionLib"].extend([1])
2180 if "iregi_dir" in tir:
2181 MLoptions["MLReductionLib"].extend([3])
2182 if "pjfry_dir" in tir:
2183 MLoptions["MLReductionLib"].extend([2])
2184 if "golem_dir" in tir:
2185 MLoptions["MLReductionLib"].extend([4])
2186 if "samurai_dir" in tir:
2187 MLoptions["MLReductionLib"].extend([5])
2188 if "ninja_dir" in tir:
2189 MLoptions["MLReductionLib"].extend([6])
2190
2191 stability = myStabilityChecker.check_matrix_element_stability(matrix_element,
2192 options=options,param_card=param_card,
2193 keep_folder=keep_folder,
2194 MLOptions=MLoptions)
2195
2196 if stability == None:
2197 return None
2198 else:
2199 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output
2200 return stability
2201
2202
2203
2204
2205 -def check_timing(process_definition, param_card= None, cuttools="",tir={},
2206 output_path=None, options={}, cmd = FakeInterface(),
2207 MLOptions = {}):
2208 """For a single loop process, give a detailed summary of the generation and
2209 execution timing."""
2210
2211 if 'reuse' not in options:
2212 keep_folder = False
2213 else:
2214 keep_folder = options['reuse']
2215 model=process_definition.get('model')
2216 timing1, matrix_element = generate_loop_matrix_element(process_definition,
2217 keep_folder, output_path=output_path, cmd=cmd)
2218 reusing = isinstance(matrix_element, base_objects.Process)
2219 options['reuse'] = reusing
2220 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir,
2221 output_path=output_path, cmd=cmd)
2222
2223 if not myTimer.loop_optimized_output:
2224 MLoptions = {}
2225 else:
2226 MLoptions = MLOptions
2227 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card,
2228 keep_folder = keep_folder, options=options,
2229 MLOptions = MLoptions)
2230
2231 if timing2 == None:
2232 return None
2233 else:
2234
2235 res = dict(timing1.items()+timing2.items())
2236 res['loop_optimized_output']=myTimer.loop_optimized_output
2237 return res
2238
2239
2240
2241
2242 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={},
2243 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2244 """Check processes by generating them with all possible orderings
2245 of particles (which means different diagram building and Helas
2246 calls), and comparing the resulting matrix element values."""
2247
2248 cmass_scheme = cmd.options['complex_mass_scheme']
2249 if isinstance(processes, base_objects.ProcessDefinition):
2250
2251
2252 multiprocess = processes
2253 model = multiprocess.get('model')
2254
2255
2256 if multiprocess.get('perturbation_couplings')==[]:
2257 evaluator = MatrixElementEvaluator(model,
2258 auth_skipping = True, reuse = False, cmd = cmd)
2259 else:
2260 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
2261 model=model, auth_skipping = True,
2262 reuse = False, output_path=output_path, cmd = cmd)
2263
2264 results = run_multiprocs_no_crossings(check_process,
2265 multiprocess,
2266 evaluator,
2267 quick,
2268 options)
2269
2270 if "used_lorentz" not in evaluator.stored_quantities:
2271 evaluator.stored_quantities["used_lorentz"] = []
2272
2273 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
2274
2275 clean_up(output_path)
2276
2277 return results, evaluator.stored_quantities["used_lorentz"]
2278
2279 elif isinstance(processes, base_objects.Process):
2280 processes = base_objects.ProcessList([processes])
2281 elif isinstance(processes, base_objects.ProcessList):
2282 pass
2283 else:
2284 raise InvalidCmd("processes is of non-supported format")
2285
2286 if not processes:
2287 raise InvalidCmd("No processes given")
2288
2289 model = processes[0].get('model')
2290
2291
2292 if processes[0].get('perturbation_couplings')==[]:
2293 evaluator = MatrixElementEvaluator(model, param_card,
2294 auth_skipping = True, reuse = False, cmd = cmd)
2295 else:
2296 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
2297 model=model,param_card=param_card,
2298 auth_skipping = True, reuse = False,
2299 output_path=output_path, cmd = cmd)
2300
2301
2302
2303 sorted_ids = []
2304 comparison_results = []
2305
2306
2307 for process in processes:
2308
2309
2310 if check_already_checked([l.get('id') for l in process.get('legs') if \
2311 not l.get('state')],
2312 [l.get('id') for l in process.get('legs') if \
2313 l.get('state')],
2314 sorted_ids, process, model):
2315 continue
2316
2317 res = check_process(process, evaluator, quick, options)
2318 if res:
2319 comparison_results.append(res)
2320
2321 if "used_lorentz" not in evaluator.stored_quantities:
2322 evaluator.stored_quantities["used_lorentz"] = []
2323
2324 if processes[0].get('perturbation_couplings')!=[] and not reuse:
2325
2326 clean_up(output_path)
2327
2328 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2329
2331 """Check the helas calls for a process by generating the process
2332 using all different permutations of the process legs (or, if
2333 quick, use a subset of permutations), and check that the matrix
2334 element is invariant under this."""
2335
2336 model = process.get('model')
2337
2338
2339 for i, leg in enumerate(process.get('legs')):
2340 leg.set('number', i+1)
2341
2342 logger.info("Checking crossings of %s" % \
2343 process.nice_string().replace('Process:', 'process'))
2344
2345 process_matrix_elements = []
2346
2347
2348
2349 if quick:
2350 leg_positions = [[] for leg in process.get('legs')]
2351 quick = range(1,len(process.get('legs')) + 1)
2352
2353 values = []
2354
2355
2356 number_checked=0
2357 for legs in itertools.permutations(process.get('legs')):
2358
2359 order = [l.get('number') for l in legs]
2360 if quick:
2361 found_leg = True
2362 for num in quick:
2363
2364
2365 leg_position = legs.index([l for l in legs if \
2366 l.get('number') == num][0])
2367
2368 if not leg_position in leg_positions[num-1]:
2369 found_leg = False
2370 leg_positions[num-1].append(leg_position)
2371
2372 if found_leg:
2373 continue
2374
2375
2376
2377 if quick and process.get('perturbation_couplings') and number_checked >3:
2378 continue
2379
2380 legs = base_objects.LegList(legs)
2381
2382 if order != range(1,len(legs) + 1):
2383 logger.info("Testing permutation: %s" % \
2384 order)
2385
2386 newproc = copy.copy(process)
2387 newproc.set('legs',legs)
2388
2389
2390 try:
2391 if newproc.get('perturbation_couplings')==[]:
2392 amplitude = diagram_generation.Amplitude(newproc)
2393 else:
2394
2395 loop_base_objects.cutting_method = 'optimal' if \
2396 number_checked%2 == 0 else 'default'
2397 amplitude = loop_diagram_generation.LoopAmplitude(newproc)
2398 except InvalidCmd:
2399 result=False
2400 else:
2401 result = amplitude.get('diagrams')
2402
2403 loop_base_objects.cutting_method = 'optimal'
2404
2405 if not result:
2406
2407 logging.info("No diagrams for %s" % \
2408 process.nice_string().replace('Process', 'process'))
2409 break
2410
2411 if order == range(1,len(legs) + 1):
2412
2413 p, w_rambo = evaluator.get_momenta(process, options)
2414
2415
2416 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
2417 matrix_element = helas_objects.HelasMatrixElement(amplitude,
2418 gen_color=False)
2419 else:
2420 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
2421 optimized_output=evaluator.loop_optimized_output)
2422
2423
2424
2425
2426 if amplitude.get('process').get('has_born'):
2427
2428
2429 if matrix_element in process_matrix_elements:
2430
2431
2432 continue
2433
2434 process_matrix_elements.append(matrix_element)
2435
2436 res = evaluator.evaluate_matrix_element(matrix_element, p = p,
2437 options=options)
2438 if res == None:
2439 break
2440
2441 values.append(res[0])
2442 number_checked += 1
2443
2444
2445
2446 if abs(max(values)) + abs(min(values)) > 0 and \
2447 2 * abs(max(values) - min(values)) / \
2448 (abs(max(values)) + abs(min(values))) > 0.01:
2449 break
2450
2451
2452 if not values:
2453 return None
2454
2455
2456
2457 diff = 0
2458 if abs(max(values)) + abs(min(values)) > 0:
2459 diff = 2* abs(max(values) - min(values)) / \
2460 (abs(max(values)) + abs(min(values)))
2461
2462
2463 if process.get('perturbation_couplings'):
2464 passed = diff < 1.e-5
2465 else:
2466 passed = diff < 1.e-8
2467
2468 return {"process": process,
2469 "momenta": p,
2470 "values": values,
2471 "difference": diff,
2472 "passed": passed}
2473
2475 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of
2476 the LoopMatrixEvaluator (when its argument proliferate is set to true). """
2477
2478 if mg_root is None:
2479 pass
2480
2481 directories = glob.glob(pjoin(mg_root, '%s*'%temp_dir_prefix))
2482 if directories != []:
2483 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix)
2484 for dir in directories:
2485
2486 if os.path.isdir(pjoin(dir,'SubProcesses')):
2487 shutil.rmtree(dir)
2488
2497
2498 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2499 """Present the results from a timing and stability consecutive check"""
2500
2501 opt = timing['loop_optimized_output']
2502
2503 text = 'Timing result for the '+('optimized' if opt else 'default')+\
2504 ' output:\n'
2505 text += output_timings(myprocdef,timing)
2506
2507 text += '\nStability result for the '+('optimized' if opt else 'default')+\
2508 ' output:\n'
2509 text += output_stability(stability,output_path, reusing=reusing)
2510
2511 mode = 'optimized' if opt else 'default'
2512 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\
2513 %(mode,stability['Process'].shell_string()))
2514 logFile = open(logFilePath, 'w')
2515 logFile.write(text)
2516 logFile.close()
2517 logger.info('Log of this profile check was output to file %s'\
2518 %str(logFilePath))
2519 return text
2520
2522 """Present the result of a stability check in a nice format.
2523 The full info is printed out in 'Stability_result_<proc_shell_string>.dat'
2524 under the MadGraph5_aMC@NLO root folder (output_path)"""
2525
2526 def accuracy(eval_list):
2527 """ Compute the accuracy from different evaluations."""
2528 return (2.0*(max(eval_list)-min(eval_list))/
2529 abs(max(eval_list)+min(eval_list)))
2530
2531 def best_estimate(eval_list):
2532 """ Returns the best estimate from different evaluations."""
2533 return (max(eval_list)+min(eval_list))/2.0
2534
2535 def loop_direction_test_power(eval_list):
2536 """ Computes the loop direction test power P is computed as follow:
2537 P = accuracy(loop_dir_test) / accuracy(all_test)
2538 So that P is large if the loop direction test is effective.
2539 The tuple returned is (log(median(P)),log(min(P)),frac)
2540 where frac is the fraction of events with powers smaller than -3
2541 which means events for which the reading direction test shows an
2542 accuracy three digits higher than it really is according to the other
2543 tests."""
2544 powers=[]
2545 for eval in eval_list:
2546 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']]
2547
2548 other_evals = [eval[key] for key in eval.keys() if key not in \
2549 ['CTModeB','Accuracy']]
2550 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0:
2551 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals))
2552
2553 n_fail=0
2554 for p in powers:
2555 if (math.log(p)/math.log(10))<-3:
2556 n_fail+=1
2557
2558 if len(powers)==0:
2559 return (None,None,None)
2560
2561 return (math.log(median(powers))/math.log(10),
2562 math.log(min(powers))/math.log(10),
2563 n_fail/len(powers))
2564
2565 def test_consistency(dp_eval_list, qp_eval_list):
2566 """ Computes the consistency test C from the DP and QP evaluations.
2567 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2568 So a consistent test would have C as close to one as possible.
2569 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))"""
2570 consistencies = []
2571 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list):
2572 dp_evals = [dp_eval[key] for key in dp_eval.keys() \
2573 if key!='Accuracy']
2574 qp_evals = [qp_eval[key] for key in qp_eval.keys() \
2575 if key!='Accuracy']
2576 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \
2577 accuracy(dp_evals)!=0.0:
2578 consistencies.append(accuracy(dp_evals)/(abs(\
2579 best_estimate(qp_evals)-best_estimate(dp_evals))))
2580
2581 if len(consistencies)==0:
2582 return (None,None,None)
2583
2584 return (math.log(median(consistencies))/math.log(10),
2585 math.log(min(consistencies))/math.log(10),
2586 math.log(max(consistencies))/math.log(10))
2587
2588 def median(orig_list):
2589 """ Find the median of a sorted float list. """
2590 list=copy.copy(orig_list)
2591 list.sort()
2592 if len(list)%2==0:
2593 return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0
2594 else:
2595 return list[int((len(list)-1)/2)]
2596
2597
2598 f = format_output
2599
2600 opt = stability['loop_optimized_output']
2601
2602 mode = 'optimized' if opt else 'default'
2603 process = stability['Process']
2604 res_str = "Stability checking for %s (%s mode)\n"\
2605 %(process.nice_string()[9:],mode)
2606
2607 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\
2608 %(mode,process.shell_string())), 'w')
2609
2610 logFile.write('Stability check results\n\n')
2611 logFile.write(res_str)
2612 data_plot_dict={}
2613 accuracy_dict={}
2614 nPSmax=0
2615 max_acc=0.0
2616 min_acc=1.0
2617 if stability['Stability']:
2618 toolnames= stability['Stability'].keys()
2619 toolnamestr=" | ".join(tn+
2620 ''.join([' ']*(10-len(tn))) for tn in toolnames)
2621 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \
2622 for key,stab in stability['Stability'].items()]
2623 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability])
2624 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability])
2625 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability])
2626 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()]
2627 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr
2628 len_PS=["%i"%len(evals)+\
2629 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability]
2630 len_PS_str=" | ".join(len_PS)
2631 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str
2632 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str
2633 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str
2634 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str
2635 pmedminlist=[]
2636 pfraclist=[]
2637 for key,stab in stability['Stability'].items():
2638 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability'])
2639 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f'))
2640 pfrac_str = f(pfrac,'%.2e')
2641 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str))))
2642 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str))))
2643 pmedminlist_str=" | ".join(pmedminlist)
2644 pfraclist_str=" | ".join(pfraclist)
2645 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str
2646 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str
2647 len_UPS=["%i"%len(upup)+\
2648 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS]
2649 len_UPS_str=" | ".join(len_UPS)
2650 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str
2651 res_str_i += \
2652 """
2653 = Legend for the statistics of the stability tests. (all log below ar log_10)
2654 The loop direction test power P is computed as follow:
2655 P = accuracy(loop_dir_test) / accuracy(all_other_test)
2656 So that log(P) is positive if the loop direction test is effective.
2657 The tuple printed out is (log(median(P)),log(min(P)))
2658 The consistency test C is computed when QP evaluations are available:
2659 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval)
2660 So a consistent test would have log(C) as close to zero as possible.
2661 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n"""
2662 res_str+=res_str_i
2663 for key in stability['Stability'].keys():
2664 toolname=key
2665 stab=stability['Stability'][key]
2666 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']]
2667
2668 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \
2669 stab['QP_stability']]
2670 nPS = len(DP_stability)
2671 if nPS>nPSmax:nPSmax=nPS
2672 UPS = stab['Unstable_PS_points']
2673 UPS_stability_DP = [DP_stability[U[0]] for U in UPS]
2674 UPS_stability_QP = [QP_stability[U[0]] for U in UPS]
2675 EPS = stab['Exceptional_PS_points']
2676 EPS_stability_DP = [DP_stability[E[0]] for E in EPS]
2677 EPS_stability_QP = [QP_stability[E[0]] for E in EPS]
2678 res_str_i = ""
2679
2680 xml_toolname = {'GOLEM95':'GOLEM','IREGI':'IREGI',
2681 'CUTTOOLS':'CUTTOOLS','PJFRY++':'PJFRY',
2682 'NINJA':'NINJA','SAMURAI':'SAMURAI'}[toolname.upper()]
2683 if len(UPS)>0:
2684 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\
2685 %(len(UPS),nPS,toolname)
2686 prefix = 'DP' if toolname=='CutTools' else ''
2687 res_str_i += "|= %s Median inaccuracy.......... %s\n"\
2688 %(prefix,f(median(UPS_stability_DP),'%.2e'))
2689 res_str_i += "|= %s Max accuracy............... %s\n"\
2690 %(prefix,f(min(UPS_stability_DP),'%.2e'))
2691 res_str_i += "|= %s Min accuracy............... %s\n"\
2692 %(prefix,f(max(UPS_stability_DP),'%.2e'))
2693 (pmed,pmin,pfrac)=loop_direction_test_power(\
2694 [stab['DP_stability'][U[0]] for U in UPS])
2695 if toolname=='CutTools':
2696 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\
2697 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2698 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\
2699 %f(pfrac,'%.2e')
2700 res_str_i += "|= QP Median accuracy............ %s\n"\
2701 %f(median(UPS_stability_QP),'%.2e')
2702 res_str_i += "|= QP Max accuracy............... %s\n"\
2703 %f(min(UPS_stability_QP),'%.2e')
2704 res_str_i += "|= QP Min accuracy............... %s\n"\
2705 %f(max(UPS_stability_QP),'%.2e')
2706 (pmed,pmin,pfrac)=loop_direction_test_power(\
2707 [stab['QP_stability'][U[0]] for U in UPS])
2708 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\
2709 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2710 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2711 (pmed,pmin,pmax)=test_consistency(\
2712 [stab['DP_stability'][U[0]] for U in UPS],
2713 [stab['QP_stability'][U[0]] for U in UPS])
2714 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\
2715 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f'))
2716 if len(EPS)==0:
2717 res_str_i += "= Number of Exceptional PS points : 0\n"
2718 if len(EPS)>0:
2719 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\
2720 %(len(EPS),nPS,toolname)
2721 res_str_i += "|= DP Median accuracy............ %s\n"\
2722 %f(median(EPS_stability_DP),'%.2e')
2723 res_str_i += "|= DP Max accuracy............... %s\n"\
2724 %f(min(EPS_stability_DP),'%.2e')
2725 res_str_i += "|= DP Min accuracy............... %s\n"\
2726 %f(max(EPS_stability_DP),'%.2e')
2727 pmed,pmin,pfrac=loop_direction_test_power(\
2728 [stab['DP_stability'][E[0]] for E in EPS])
2729 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\
2730 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2731 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\
2732 %f(pfrac,'%.2e')
2733 res_str_i += "|= QP Median accuracy............ %s\n"\
2734 %f(median(EPS_stability_QP),'%.2e')
2735 res_str_i += "|= QP Max accuracy............... %s\n"\
2736 %f(min(EPS_stability_QP),'%.2e')
2737 res_str_i += "|= QP Min accuracy............... %s\n"\
2738 %f(max(EPS_stability_QP),'%.2e')
2739 pmed,pmin,pfrac=loop_direction_test_power(\
2740 [stab['QP_stability'][E[0]] for E in EPS])
2741 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\
2742 %(f(pmed,'%.1f'),f(pmin,'%.1f'))
2743 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e')
2744
2745 logFile.write(res_str_i)
2746
2747 if len(EPS)>0:
2748 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\
2749 %(len(EPS),toolname))
2750 logFile.write('<EPS_data reduction=%s>\n'%xml_toolname.upper())
2751 for i, eps in enumerate(EPS):
2752 logFile.write('\nEPS #%i\n'%(i+1))
2753 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2754 for p in eps[1]]))
2755 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[eps[0]])
2756 logFile.write(' QP accuracy : %.4e\n'%QP_stability[eps[0]])
2757 logFile.write('</EPS_data>\n')
2758 if len(UPS)>0:
2759 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\
2760 %(len(UPS),toolname))
2761 logFile.write('<UPS_data reduction=%s>\n'%xml_toolname.upper())
2762 for i, ups in enumerate(UPS):
2763 logFile.write('\nUPS #%i\n'%(i+1))
2764 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \
2765 for p in ups[1]]))
2766 logFile.write('\n DP accuracy : %.4e\n'%DP_stability[ups[0]])
2767 logFile.write(' QP accuracy : %.4e\n'%QP_stability[ups[0]])
2768 logFile.write('</UPS_data>\n')
2769
2770 logFile.write('\nData entries for the stability plot.\n')
2771 logFile.write('First row is a maximal accuracy delta, second is the '+\
2772 'fraction of events with DP accuracy worse than delta.\n')
2773 logFile.write('<plot_data reduction=%s>\n'%xml_toolname.upper())
2774
2775 if max(DP_stability)>0.0:
2776 min_digit_acc=int(math.log(max(DP_stability))/math.log(10))
2777 if min_digit_acc>=0:
2778 min_digit_acc = min_digit_acc+1
2779 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)]
2780 else:
2781 logFile.writelines('%.4e %.4e\n'%(accuracies[i], 0.0) for i in \
2782 range(len(accuracies)))
2783 logFile.write('</plot_data>\n')
2784 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\
2785 ' is output then.'
2786 logFile.write('Perfect accuracy over all the trial PS points.')
2787 res_str +=res_str_i
2788 continue
2789
2790 accuracy_dict[toolname]=accuracies
2791 if max(accuracies) > max_acc: max_acc=max(accuracies)
2792 if min(accuracies) < min_acc: min_acc=min(accuracies)
2793 data_plot=[]
2794 for acc in accuracies:
2795 data_plot.append(float(len([d for d in DP_stability if d>acc]))\
2796 /float(len(DP_stability)))
2797 data_plot_dict[toolname]=data_plot
2798
2799 logFile.writelines('%.4e %.4e\n'%(accuracies[i], data_plot[i]) for i in \
2800 range(len(accuracies)))
2801 logFile.write('</plot_data>\n')
2802 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\
2803 %(nPS,toolname))
2804 logFile.write('First row is DP, second is QP (if available).\n\n')
2805 logFile.write('<accuracies reduction=%s>\n'%xml_toolname.upper())
2806 logFile.writelines('%.4e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \
2807 else '%.4e\n'%QP_stability[i]) for i in range(nPS))
2808 logFile.write('</accuracies>\n')
2809 res_str+=res_str_i
2810 logFile.close()
2811 res_str += "\n= Stability details of the run are output to the file"+\
2812 " stability_%s_%s.log\n"%(mode,process.shell_string())
2813
2814
2815
2816
2817 if any(isinstance(handler,logging.FileHandler) for handler in \
2818 logging.getLogger('madgraph').handlers):
2819 return res_str
2820
2821 try:
2822 import matplotlib.pyplot as plt
2823 colorlist=['b','r','g','y','m','c']
2824 for i,key in enumerate(data_plot_dict.keys()):
2825 color=colorlist[i]
2826 data_plot=data_plot_dict[key]
2827 accuracies=accuracy_dict[key]
2828 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\
2829 label=key)
2830 plt.axis([min_acc,max_acc,\
2831 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1])
2832 plt.yscale('log')
2833 plt.xscale('log')
2834 plt.title('Stability plot for %s (%s mode, %d points)'%\
2835 (process.nice_string()[9:],mode,nPSmax))
2836 plt.ylabel('Fraction of events')
2837 plt.xlabel('Maximal precision')
2838 plt.legend()
2839 if not reusing:
2840 logger.info('Some stability statistics will be displayed once you '+\
2841 'close the plot window')
2842 plt.show()
2843 else:
2844 fig_output_file = str(pjoin(output_path,
2845 'stability_plot_%s_%s.png'%(mode,process.shell_string())))
2846 logger.info('Stability plot output to file %s. '%fig_output_file)
2847 plt.savefig(fig_output_file)
2848 return res_str
2849 except Exception as e:
2850 if isinstance(e, ImportError):
2851 res_str += "\n= Install matplotlib to get a "+\
2852 "graphical display of the results of this check."
2853 else:
2854 res_str += "\n= Could not produce the stability plot because of "+\
2855 "the following error: %s"%str(e)
2856 return res_str
2857
2859 """Present the result of a timings check in a nice format """
2860
2861
2862 f = format_output
2863 loop_optimized_output = timings['loop_optimized_output']
2864
2865 res_str = "%s \n"%process.nice_string()
2866 try:
2867 gen_total = timings['HELAS_MODEL_compilation']+\
2868 timings['HelasDiagrams_generation']+\
2869 timings['Process_output']+\
2870 timings['Diagrams_generation']+\
2871 timings['Process_compilation']+\
2872 timings['Initialization']
2873 except TypeError:
2874 gen_total = None
2875 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs')
2876 res_str += "|= Diagrams generation....... %s\n"\
2877 %f(timings['Diagrams_generation'],'%.3gs')
2878 res_str += "|= Helas Diagrams generation. %s\n"\
2879 %f(timings['HelasDiagrams_generation'],'%.3gs')
2880 res_str += "|= Process output............ %s\n"\
2881 %f(timings['Process_output'],'%.3gs')
2882 res_str += "|= HELAS+model compilation... %s\n"\
2883 %f(timings['HELAS_MODEL_compilation'],'%.3gs')
2884 res_str += "|= Process compilation....... %s\n"\
2885 %f(timings['Process_compilation'],'%.3gs')
2886 res_str += "|= Initialization............ %s\n"\
2887 %f(timings['Initialization'],'%.3gs')
2888
2889 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\
2890 %(timings['run_unpolarized_total']*1000.0)
2891 if loop_optimized_output:
2892 coef_time=timings['run_unpolarized_coefs']*1000.0
2893 loop_time=(timings['run_unpolarized_total']-\
2894 timings['run_unpolarized_coefs'])*1000.0
2895 total=coef_time+loop_time
2896 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2897 %(coef_time,int(round(100.0*coef_time/total)))
2898 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2899 %(loop_time,int(round(100.0*loop_time/total)))
2900 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\
2901 %(timings['run_polarized_total']*1000.0)
2902 if loop_optimized_output:
2903 coef_time=timings['run_polarized_coefs']*1000.0
2904 loop_time=(timings['run_polarized_total']-\
2905 timings['run_polarized_coefs'])*1000.0
2906 total=coef_time+loop_time
2907 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\
2908 %(coef_time,int(round(100.0*coef_time/total)))
2909 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\
2910 %(loop_time,int(round(100.0*loop_time/total)))
2911 res_str += "\n= Miscellaneous ========================\n"
2912 res_str += "|= Number of hel. computed... %s/%s\n"\
2913 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d'))
2914 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d')
2915 if loop_optimized_output:
2916 res_str += "|= Number of loop groups..... %s\n"\
2917 %f(timings['n_loop_groups'],'%d')
2918 res_str += "|= Number of loop wfs........ %s\n"\
2919 %f(timings['n_loop_wfs'],'%d')
2920 if timings['loop_wfs_ranks']!=None:
2921 for i, r in enumerate(timings['loop_wfs_ranks']):
2922 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r)
2923 res_str += "|= Loading time (Color data). ~%.3gms\n"\
2924 %(timings['Booting_time']*1000.0)
2925 res_str += "|= Maximum RAM usage (rss)... %s\n"\
2926 %f(float(timings['ram_usage']/1000.0),'%.3gMb')
2927 res_str += "\n= Output disk size =====================\n"
2928 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb')
2929 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb')
2930 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb')
2931 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb')
2932
2933 return res_str
2934
2936 """Present the results of a comparison in a nice list format
2937 mode short: return the number of fail process
2938 """
2939 proc_col_size = 17
2940 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
2941 if pert_coupl:
2942 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
2943 else:
2944 process_header = "Process"
2945
2946 if len(process_header) + 1 > proc_col_size:
2947 proc_col_size = len(process_header) + 1
2948
2949 for proc in comparison_results:
2950 if len(proc['process'].base_string()) + 1 > proc_col_size:
2951 proc_col_size = len(proc['process'].base_string()) + 1
2952
2953 col_size = 18
2954
2955 pass_proc = 0
2956 fail_proc = 0
2957 no_check_proc = 0
2958
2959 failed_proc_list = []
2960 no_check_proc_list = []
2961
2962 res_str = fixed_string_length(process_header, proc_col_size) + \
2963 fixed_string_length("Min element", col_size) + \
2964 fixed_string_length("Max element", col_size) + \
2965 fixed_string_length("Relative diff.", col_size) + \
2966 "Result"
2967
2968 for result in comparison_results:
2969 proc = result['process'].base_string()
2970 values = result['values']
2971
2972 if len(values) <= 1:
2973 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
2974 " * No permutations, process not checked *"
2975 no_check_proc += 1
2976 no_check_proc_list.append(result['process'].nice_string())
2977 continue
2978
2979 passed = result['passed']
2980
2981 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
2982 fixed_string_length("%1.10e" % min(values), col_size) + \
2983 fixed_string_length("%1.10e" % max(values), col_size) + \
2984 fixed_string_length("%1.10e" % result['difference'],
2985 col_size)
2986 if passed:
2987 pass_proc += 1
2988 res_str += "Passed"
2989 else:
2990 fail_proc += 1
2991 failed_proc_list.append(result['process'].nice_string())
2992 res_str += "Failed"
2993
2994 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
2995 (pass_proc, pass_proc + fail_proc,
2996 fail_proc, pass_proc + fail_proc)
2997
2998 if fail_proc != 0:
2999 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3000 if no_check_proc != 0:
3001 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
3002
3003 return res_str
3004
3006 """Helper function to fix the length of a string by cutting it
3007 or adding extra space."""
3008
3009 if len(mystr) > length:
3010 return mystr[0:length]
3011 else:
3012 return mystr + " " * (length - len(mystr))
3013
3014
3015
3016
3017
3018 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False,
3019 options=None, output_path=None, cmd = FakeInterface()):
3020 """Check gauge invariance of the processes by using the BRS check.
3021 For one of the massless external bosons (e.g. gluon or photon),
3022 replace the polarization vector (epsilon_mu) with its momentum (p_mu)
3023 """
3024 cmass_scheme = cmd.options['complex_mass_scheme']
3025 if isinstance(processes, base_objects.ProcessDefinition):
3026
3027
3028 multiprocess = processes
3029
3030 model = multiprocess.get('model')
3031
3032 if multiprocess.get('perturbation_couplings')==[]:
3033 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd,
3034 auth_skipping = True, reuse = False)
3035 else:
3036 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3037 cmd=cmd,model=model, param_card=param_card,
3038 auth_skipping = False, reuse = False,
3039 output_path=output_path)
3040
3041 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]:
3042
3043 logger.info('Set All width to zero for non complex mass scheme checks')
3044 for particle in evaluator.full_model.get('particles'):
3045 if particle.get('width') != 'ZERO':
3046 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3047 results = run_multiprocs_no_crossings(check_gauge_process,
3048 multiprocess,
3049 evaluator,
3050 options=options
3051 )
3052
3053 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3054
3055 clean_up(output_path)
3056
3057 return results
3058
3059 elif isinstance(processes, base_objects.Process):
3060 processes = base_objects.ProcessList([processes])
3061 elif isinstance(processes, base_objects.ProcessList):
3062 pass
3063 else:
3064 raise InvalidCmd("processes is of non-supported format")
3065
3066 assert processes, "No processes given"
3067
3068 model = processes[0].get('model')
3069
3070
3071 if processes[0].get('perturbation_couplings')==[]:
3072 evaluator = MatrixElementEvaluator(model, param_card,
3073 auth_skipping = True, reuse = False,
3074 cmd = cmd)
3075 else:
3076 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3077 model=model, param_card=param_card,
3078 auth_skipping = False, reuse = False,
3079 output_path=output_path, cmd = cmd)
3080 comparison_results = []
3081 comparison_explicit_flip = []
3082
3083
3084 for process in processes:
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094 result = check_gauge_process(process, evaluator,options=options)
3095 if result:
3096 comparison_results.append(result)
3097
3098 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3099
3100 clean_up(output_path)
3101
3102 return comparison_results
3103
3106 """Check gauge invariance for the process, unless it is already done."""
3107
3108 model = process.get('model')
3109
3110
3111 found_gauge = False
3112 for i, leg in enumerate(process.get('legs')):
3113 part = model.get_particle(leg.get('id'))
3114 if part.get('spin') == 3 and part.get('mass').lower() == 'zero':
3115 found_gauge = True
3116 break
3117 if not found_gauge:
3118 logger.info("No ward identity for %s" % \
3119 process.nice_string().replace('Process', 'process'))
3120
3121 return None
3122
3123 for i, leg in enumerate(process.get('legs')):
3124 leg.set('number', i+1)
3125
3126 logger.info("Checking ward identities for %s" % \
3127 process.nice_string().replace('Process', 'process'))
3128
3129 legs = process.get('legs')
3130
3131
3132 try:
3133 if process.get('perturbation_couplings')==[]:
3134 amplitude = diagram_generation.Amplitude(process)
3135 else:
3136 amplitude = loop_diagram_generation.LoopAmplitude(process)
3137 except InvalidCmd:
3138 logging.info("No diagrams for %s" % \
3139 process.nice_string().replace('Process', 'process'))
3140 return None
3141 if not amplitude.get('diagrams'):
3142
3143 logging.info("No diagrams for %s" % \
3144 process.nice_string().replace('Process', 'process'))
3145 return None
3146
3147 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3148 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3149 gen_color = False)
3150 else:
3151 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3152 optimized_output=evaluator.loop_optimized_output)
3153
3154
3155
3156
3157
3158
3159
3160
3161 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True,
3162 output='jamp', options=options)
3163
3164 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude):
3165 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3166 gen_color = False)
3167
3168 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False,
3169 output='jamp', options=options)
3170
3171 if mvalue and mvalue['m2']:
3172 return {'process':process,'value':mvalue,'brs':brsvalue}
3173
3175 """Present the results of a comparison in a nice list format"""
3176
3177 proc_col_size = 17
3178
3179 pert_coupl = comparison_results[0]['process']['perturbation_couplings']
3180
3181
3182 if pert_coupl:
3183 threshold=1e-5
3184 else:
3185 threshold=1e-10
3186
3187 if pert_coupl:
3188 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
3189 else:
3190 process_header = "Process"
3191
3192 if len(process_header) + 1 > proc_col_size:
3193 proc_col_size = len(process_header) + 1
3194
3195 for one_comp in comparison_results:
3196 proc = one_comp['process'].base_string()
3197 mvalue = one_comp['value']
3198 brsvalue = one_comp['brs']
3199 if len(proc) + 1 > proc_col_size:
3200 proc_col_size = len(proc) + 1
3201
3202 col_size = 18
3203
3204 pass_proc = 0
3205 fail_proc = 0
3206
3207 failed_proc_list = []
3208 no_check_proc_list = []
3209
3210 res_str = fixed_string_length(process_header, proc_col_size) + \
3211 fixed_string_length("matrix", col_size) + \
3212 fixed_string_length("BRS", col_size) + \
3213 fixed_string_length("ratio", col_size) + \
3214 "Result"
3215
3216 for one_comp in comparison_results:
3217 proc = one_comp['process'].base_string()
3218 mvalue = one_comp['value']
3219 brsvalue = one_comp['brs']
3220 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2']))
3221 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
3222 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \
3223 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \
3224 fixed_string_length("%1.10e" % ratio, col_size)
3225
3226 if ratio > threshold:
3227 fail_proc += 1
3228 proc_succeed = False
3229 failed_proc_list.append(proc)
3230 res_str += "Failed"
3231 else:
3232 pass_proc += 1
3233 proc_succeed = True
3234 res_str += "Passed"
3235
3236
3237
3238
3239
3240 if len(mvalue['jamp'])!=0:
3241 for k in range(len(mvalue['jamp'][0])):
3242 m_sum = 0
3243 brs_sum = 0
3244
3245 for j in range(len(mvalue['jamp'])):
3246
3247 m_sum += abs(mvalue['jamp'][j][k])**2
3248 brs_sum += abs(brsvalue['jamp'][j][k])**2
3249
3250
3251 if not m_sum:
3252 continue
3253 ratio = abs(brs_sum) / abs(m_sum)
3254
3255 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
3256 fixed_string_length("%1.10e" % m_sum, col_size) + \
3257 fixed_string_length("%1.10e" % brs_sum, col_size) + \
3258 fixed_string_length("%1.10e" % ratio, col_size)
3259
3260 if ratio > 1e-15:
3261 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
3262 fail_proc += 1
3263 pass_proc -= 1
3264 failed_proc_list.append(proc)
3265 res_str += tmp_str + "Failed"
3266 elif not proc_succeed:
3267 res_str += tmp_str + "Passed"
3268
3269
3270 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
3271 (pass_proc, pass_proc + fail_proc,
3272 fail_proc, pass_proc + fail_proc)
3273
3274 if fail_proc != 0:
3275 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
3276
3277 if output=='text':
3278 return res_str
3279 else:
3280 return fail_proc
3281
3282
3283
3284 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \
3285 reuse = False, output_path=None, cmd = FakeInterface()):
3286 """ Check if the square matrix element (sum over helicity) is lorentz
3287 invariant by boosting the momenta with different value."""
3288
3289 cmass_scheme = cmd.options['complex_mass_scheme']
3290 if isinstance(processes, base_objects.ProcessDefinition):
3291
3292
3293 multiprocess = processes
3294 model = multiprocess.get('model')
3295
3296 if multiprocess.get('perturbation_couplings')==[]:
3297 evaluator = MatrixElementEvaluator(model,
3298 cmd= cmd, auth_skipping = False, reuse = True)
3299 else:
3300 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3301 model=model, auth_skipping = False, reuse = True,
3302 output_path=output_path, cmd = cmd)
3303
3304 if not cmass_scheme and processes.get('perturbation_couplings')==[]:
3305
3306 logger.info('Set All width to zero for non complex mass scheme checks')
3307 for particle in evaluator.full_model.get('particles'):
3308 if particle.get('width') != 'ZERO':
3309 evaluator.full_model.get('parameter_dict')[\
3310 particle.get('width')] = 0.
3311
3312 results = run_multiprocs_no_crossings(check_lorentz_process,
3313 multiprocess,
3314 evaluator,
3315 options=options)
3316
3317 if multiprocess.get('perturbation_couplings')!=[] and not reuse:
3318
3319 clean_up(output_path)
3320
3321 return results
3322
3323 elif isinstance(processes, base_objects.Process):
3324 processes = base_objects.ProcessList([processes])
3325 elif isinstance(processes, base_objects.ProcessList):
3326 pass
3327 else:
3328 raise InvalidCmd("processes is of non-supported format")
3329
3330 assert processes, "No processes given"
3331
3332 model = processes[0].get('model')
3333
3334
3335 if processes[0].get('perturbation_couplings')==[]:
3336 evaluator = MatrixElementEvaluator(model, param_card,
3337 auth_skipping = False, reuse = True,
3338 cmd=cmd)
3339 else:
3340 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir,
3341 model=model,param_card=param_card,
3342 auth_skipping = False, reuse = True,
3343 output_path=output_path, cmd = cmd)
3344
3345 comparison_results = []
3346
3347
3348 for process in processes:
3349
3350
3351
3352
3353
3354
3355
3356
3357
3358 result = check_lorentz_process(process, evaluator,options=options)
3359 if result:
3360 comparison_results.append(result)
3361
3362 if processes[0].get('perturbation_couplings')!=[] and not reuse:
3363
3364 clean_up(output_path)
3365
3366 return comparison_results
3367
3370 """Check gauge invariance for the process, unless it is already done."""
3371
3372 amp_results = []
3373 model = process.get('model')
3374
3375 for i, leg in enumerate(process.get('legs')):
3376 leg.set('number', i+1)
3377
3378 logger.info("Checking lorentz transformations for %s" % \
3379 process.nice_string().replace('Process:', 'process'))
3380
3381 legs = process.get('legs')
3382
3383
3384 try:
3385 if process.get('perturbation_couplings')==[]:
3386 amplitude = diagram_generation.Amplitude(process)
3387 else:
3388 amplitude = loop_diagram_generation.LoopAmplitude(process)
3389 except InvalidCmd:
3390 logging.info("No diagrams for %s" % \
3391 process.nice_string().replace('Process', 'process'))
3392 return None
3393
3394 if not amplitude.get('diagrams'):
3395
3396 logging.info("No diagrams for %s" % \
3397 process.nice_string().replace('Process', 'process'))
3398 return None
3399
3400
3401 p, w_rambo = evaluator.get_momenta(process, options)
3402
3403
3404 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3405 matrix_element = helas_objects.HelasMatrixElement(amplitude,
3406 gen_color = True)
3407 else:
3408 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
3409 optimized_output = evaluator.loop_optimized_output)
3410
3411 MLOptions = {'ImprovePS':True,'ForceMP':True}
3412 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3413 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3414 auth_skipping = True, options=options)
3415 else:
3416 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp',
3417 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions,
3418 options = options)
3419
3420 if data and data['m2']:
3421 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3422 results = [data]
3423 else:
3424 results = [('Original evaluation',data)]
3425 else:
3426 return {'process':process, 'results':'pass'}
3427
3428
3429
3430
3431 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
3432 for boost in range(1,4):
3433 boost_p = boost_momenta(p, boost)
3434 results.append(evaluator.evaluate_matrix_element(matrix_element,
3435 p=boost_p,output='jamp'))
3436 else:
3437
3438 boost_p = boost_momenta(p, 3)
3439 results.append(('Z-axis boost',
3440 evaluator.evaluate_matrix_element(matrix_element, options=options,
3441 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions)))
3442
3443
3444
3445
3446 if not options['events']:
3447 boost_p = boost_momenta(p, 1)
3448 results.append(('X-axis boost',
3449 evaluator.evaluate_matrix_element(matrix_element, options=options,
3450 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions)))
3451 boost_p = boost_momenta(p, 2)
3452 results.append(('Y-axis boost',
3453 evaluator.evaluate_matrix_element(matrix_element,options=options,
3454 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions)))
3455
3456
3457 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p]
3458 results.append(('Z-axis pi/2 rotation',
3459 evaluator.evaluate_matrix_element(matrix_element,options=options,
3460 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions)))
3461
3462 sq2 = math.sqrt(2.0)
3463 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p]
3464 results.append(('Z-axis pi/4 rotation',
3465 evaluator.evaluate_matrix_element(matrix_element,options=options,
3466 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions)))
3467
3468
3469 return {'process': process, 'results': results}
3470
3471
3472
3473
3474 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None,
3475 options=None, tir={}, output_path=None,
3476 cuttools="", reuse=False, cmd = FakeInterface()):
3477 """Check gauge invariance of the processes by flipping
3478 the gauge of the model
3479 """
3480
3481 mg_root = cmd._mgme_dir
3482
3483 cmass_scheme = cmd.options['complex_mass_scheme']
3484
3485 if isinstance(processes_unit, base_objects.ProcessDefinition):
3486
3487
3488 multiprocess_unit = processes_unit
3489 model = multiprocess_unit.get('model')
3490
3491
3492
3493 loop_optimized_bu = cmd.options['loop_optimized_output']
3494 if processes_unit.get('squared_orders'):
3495 if processes_unit.get('perturbation_couplings') in [[],['QCD']]:
3496 cmd.options['loop_optimized_output'] = True
3497 else:
3498 raise InvalidCmd("The gauge test cannot be performed for "+
3499 " a process with more than QCD corrections and which"+
3500 " specifies squared order constraints.")
3501 else:
3502 cmd.options['loop_optimized_output'] = False
3503
3504 aloha.unitary_gauge = True
3505 if processes_unit.get('perturbation_couplings')==[]:
3506 evaluator = MatrixElementEvaluator(model, param_card,
3507 cmd=cmd,auth_skipping = False, reuse = True)
3508 else:
3509 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3510 cmd=cmd, model=model,
3511 param_card=param_card,
3512 auth_skipping = False,
3513 output_path=output_path,
3514 reuse = False)
3515 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]:
3516 logger.info('Set All width to zero for non complex mass scheme checks')
3517 for particle in evaluator.full_model.get('particles'):
3518 if particle.get('width') != 'ZERO':
3519 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3520
3521 output_u = run_multiprocs_no_crossings(get_value,
3522 multiprocess_unit,
3523 evaluator,
3524 options=options)
3525
3526 clean_added_globals(ADDED_GLOBAL)
3527
3528 if processes_unit.get('perturbation_couplings')!=[]:
3529 clean_up(output_path)
3530
3531 momentum = {}
3532 for data in output_u:
3533 momentum[data['process']] = data['p']
3534
3535 multiprocess_feynm = processes_feynm
3536 model = multiprocess_feynm.get('model')
3537
3538
3539 aloha.unitary_gauge = False
3540
3541
3542 cmd.options['loop_optimized_output'] = True
3543 if processes_feynm.get('perturbation_couplings')==[]:
3544 evaluator = MatrixElementEvaluator(model, param_card,
3545 cmd= cmd, auth_skipping = False, reuse = False)
3546 else:
3547 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir,
3548 cmd= cmd, model=model,
3549 param_card=param_card,
3550 auth_skipping = False,
3551 output_path=output_path,
3552 reuse = False)
3553
3554 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]:
3555
3556 for particle in evaluator.full_model.get('particles'):
3557 if particle.get('width') != 'ZERO':
3558 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0.
3559
3560 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm,
3561 evaluator, momentum,
3562 options=options)
3563 output = [processes_unit]
3564 for data in output_f:
3565 local_dico = {}
3566 local_dico['process'] = data['process']
3567 local_dico['value_feynm'] = data['value']
3568 local_dico['value_unit'] = [d['value'] for d in output_u
3569 if d['process'] == data['process']][0]
3570 output.append(local_dico)
3571
3572 if processes_feynm.get('perturbation_couplings')!=[] and not reuse:
3573
3574 clean_up(output_path)
3575
3576
3577 cmd.options['loop_optimized_output'] = loop_optimized_bu
3578
3579 return output
3580
3581
3582
3583
3584 else:
3585 raise InvalidCmd("processes is of non-supported format")
3586
3592 """Check complex mass scheme consistency in the offshell region of s-channels
3593 detected for this process, by varying the expansion paramer consistently
3594 with the corresponding width and making sure that the difference between
3595 the complex mass-scheme and the narrow-width approximation is higher order.
3596 """
3597
3598 if not isinstance(process_line, str):
3599 raise InvalidCmd("Proces definition must be given as a stirng for this check")
3600
3601
3602 cmd.do_set('complex_mass_scheme False', log=False)
3603
3604 multiprocess_nwa = cmd.extract_process(process_line)
3605
3606
3607 has_FRdecay = os.path.isfile(pjoin(cmd._curr_model.get('modelpath'),
3608 'decays.py'))
3609
3610
3611 missing_perturbations = cmd._curr_model.get_coupling_orders()-\
3612 set(multiprocess_nwa.get('perturbation_couplings'))
3613
3614 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3615 len(missing_perturbations)>0:
3616 logger.warning("------------------------------------------------------")
3617 logger.warning("The process considered does not specify the following "+
3618 "type of loops to be included : %s"%str(list(missing_perturbations)))
3619 logger.warning("Consequently, the CMS check will be unsuccessful if the"+
3620 " process involves any resonating particle whose LO decay is "+
3621 "mediated by one of these orders.")
3622 logger.warning("You can use the syntax '[virt=all]' to automatically"+
3623 " include all loops supported by the model.")
3624 logger.warning("------------------------------------------------------")
3625
3626 if len(multiprocess_nwa.get('perturbation_couplings'))>0 and \
3627 len(multiprocess_nwa.get('legs'))<=4:
3628 logger.warning("------------------------------------------------------")
3629 logger.warning("Processes with four or less external states are typically not"+\
3630 " sensitive to incorrect Complex Mass Scheme implementations.")
3631 logger.warning("You can test this sensitivity by making sure that the"+
3632 " same check on the leading-order counterpart of this process *fails*"+
3633 " when using the option '--diff_lambda_power=2'.")
3634 logger.warning("If it does not, then consider adding a massless "+
3635 "gauge vector to the external states.")
3636 logger.warning("------------------------------------------------------")
3637
3638 if options['recompute_width']=='auto':
3639 if multiprocess_nwa.get('perturbation_couplings')!=[]:
3640
3641 options['recompute_width'] = 'first_time'
3642 else:
3643 options['recompute_width'] = 'never'
3644
3645
3646 if options['recompute_width'] in ['first_time', 'always'] and \
3647 not has_FRdecay and not 'cached_widths' in options:
3648 logger.info('The LO widths will need to be recomputed but the '+
3649 'model considered does not appear to have a decay module.\nThe widths'+
3650 ' will need to be computed numerically and it will slow down the test.\n'+
3651 'Consider using a param_card already specifying correct LO widths and'+
3652 " adding the option --recompute_width=never when doing this check.")
3653
3654 if options['recompute_width']=='never' and \
3655 any(order in multiprocess_nwa.get('perturbation_couplings') for order in
3656 options['expansion_orders']):
3657 logger.warning('You chose not to recompute the widths while including'+
3658 ' loop corrections. The check will be successful only if the width'+\
3659 ' specified in the default param_card is LO accurate (Remember that'+\
3660 ' the default values of alpha_s and awem1 are set to 0.1 and 10.0'+\
3661 ' respectively by default).')
3662
3663
3664
3665
3666
3667 if options['recompute_width'] in ['first_time', 'always'] and has_FRdecay:
3668 modelname = cmd._curr_model.get('modelpath+restriction')
3669 with misc.MuteLogger(['madgraph'], ['INFO']):
3670 model = import_ufo.import_model(modelname, decay=True,
3671 complex_mass_scheme=False)
3672 multiprocess_nwa.set('model', model)
3673
3674 run_options = copy.deepcopy(options)
3675
3676
3677 if options['seed'] > 0:
3678 random.seed(options['seed'])
3679
3680
3681 run_options['param_card'] = param_card
3682 if isinstance(cmd, FakeInterface):
3683 raise MadGraph5Error, "Check CMS cannot be run with a FakeInterface."
3684 run_options['cmd'] = cmd
3685 run_options['MLOptions'] = MLOptions
3686 if output_path:
3687 run_options['output_path'] = output_path
3688 else:
3689 run_options['output_path'] = cmd._mgme_dir
3690
3691
3692 run_options['has_FRdecay'] = has_FRdecay
3693
3694
3695 if 'cached_widths' not in run_options:
3696 run_options['cached_widths'] = {}
3697
3698
3699 run_options['cached_param_card'] = {'NWA':[None,None],'CMS':[None,None]}
3700
3701 if options['tweak']['name']:
3702 logger.info("Now running the CMS check for tweak '%s'"\
3703 %options['tweak']['name'])
3704
3705 model = multiprocess_nwa.get('model')
3706
3707 for particle in model.get('particles'):
3708 mass_param = model.get_parameter(particle.get('mass'))
3709 if particle.get('mass')!='ZERO' and 'external' not in mass_param.depend:
3710 if model.get('name') not in ['sm','loop_sm']:
3711 logger.warning("The mass '%s' of particle '%s' is not an external"%\
3712 (model.get_parameter(particle.get('mass')).name,particle.get('name'))+\
3713 " parameter as required by this check. \nMG5_aMC will try to"+\
3714 " modify the model to remedy the situation. No guarantee.")
3715 status = model.change_electroweak_mode(set(['mz','mw','alpha']))
3716 if not status:
3717 raise InvalidCmd('The EW scheme could apparently not be changed'+\
3718 ' so as to have the W-boson mass external. The check cannot'+\
3719 ' proceed.')
3720 break
3721
3722 veto_orders = [order for order in model.get('coupling_orders') if \
3723 order not in options['expansion_orders']]
3724 if len(veto_orders)>0:
3725 logger.warning('You did not define any parameter scaling rule for the'+\
3726 " coupling orders %s. They will be "%','.join(veto_orders))+\
3727 "forced to zero in the tests. Consider adding the scaling rule to"+\
3728 "avoid this. (see option '--cms' in 'help check')"
3729 for order in veto_orders:
3730 multiprocess_nwa.get('orders')[order]==0
3731 multiprocess_nwa.set('perturbation_couplings', [order for order in
3732 multiprocess_nwa['perturbation_couplings'] if order not in veto_orders])
3733
3734 if multiprocess_nwa.get('perturbation_couplings')==[]:
3735 evaluator = MatrixElementEvaluator(model, param_card,
3736 cmd=cmd,auth_skipping = False, reuse = True)
3737 else:
3738 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3739 cmd=cmd, model=model,
3740 param_card=param_card,
3741 auth_skipping = False,
3742 output_path=output_path,
3743 reuse = False)
3744
3745 cached_information = []
3746 output_nwa = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3747 multiprocess_nwa,
3748 evaluator,
3749
3750
3751
3752
3753
3754 opt = cached_information,
3755 options=run_options)
3756
3757
3758 clean_added_globals(ADDED_GLOBAL)
3759
3760
3761 cmd.do_set('complex_mass_scheme True', log=False)
3762
3763
3764 multiprocess_cms = cmd.extract_process(process_line)
3765 model = multiprocess_cms.get('model')
3766
3767 if len(veto_orders)>0:
3768 for order in veto_orders:
3769 multiprocess_cms.get('orders')[order]==0
3770 multiprocess_cms.set('perturbation_couplings', [order for order in
3771 multiprocess_cms['perturbation_couplings'] if order not in veto_orders])
3772
3773 if multiprocess_cms.get('perturbation_couplings')==[]:
3774 evaluator = MatrixElementEvaluator(model, param_card,
3775 cmd=cmd,auth_skipping = False, reuse = True)
3776 else:
3777 evaluator = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir,
3778 cmd=cmd, model=model,
3779 param_card=param_card,
3780 auth_skipping = False,
3781 output_path=output_path,
3782 reuse = False)
3783
3784 output_cms = run_multiprocs_no_crossings(check_complex_mass_scheme_process,
3785 multiprocess_cms,
3786 evaluator,
3787
3788 opt = dict(cached_information),
3789 options=run_options)
3790
3791 if multiprocess_cms.get('perturbation_couplings')!=[] and not options['reuse']:
3792
3793 clean_up(output_path)
3794
3795
3796
3797
3798 result = {'ordered_processes':[],'lambdaCMS':options['lambdaCMS']}
3799
3800 result['perturbation_orders']=multiprocess_nwa.get('perturbation_couplings')
3801 for i, proc_res in enumerate(output_nwa):
3802 result['ordered_processes'].append(proc_res[0])
3803 result[proc_res[0]] = {
3804 'NWA':proc_res[1]['resonances_result'],
3805 'CMS':output_cms[i][1]['resonances_result'],
3806 'born_order':proc_res[1]['born_order'],
3807 'loop_order':proc_res[1]['loop_order']}
3808
3809
3810
3811 options['cached_widths'] = run_options['cached_widths']
3812
3813
3814 result['recompute_width'] = options['recompute_width']
3815 result['has_FRdecay'] = has_FRdecay
3816 result['widths_computed'] = []
3817 cached_widths = sorted(options['cached_widths'].items(), key=lambda el: \
3818 abs(el[0][0]))
3819 for (pdg, lambda_value), width in cached_widths:
3820 if lambda_value != 1.0:
3821 continue
3822 result['widths_computed'].append((model.get_particle(pdg).get_name(),
3823 width))
3824
3825
3826 clean_added_globals(ADDED_GLOBAL)
3827
3828 return result
3829
3834 """Check CMS for the process in argument. The options 'opt' is quite important.
3835 When opt is a list, it means that we are doing NWA and we are filling the
3836 list with the following tuple
3837 ('proc_name',({'ParticlePDG':ParticlePDG,
3838 'FinalStateMothersNumbers':set([]),
3839 'PS_point_used':[]},...))
3840 When opt is a dictionary, we are in the CMS mode and it will be reused then.
3841 """
3842
3843
3844
3845 NLO = process.get('perturbation_couplings') != []
3846
3847 def glue_momenta(production, decay):
3848 """ Merge together the kinematics for the production of particle
3849 positioned last in the 'production' array with the 1>N 'decay' kinematic'
3850 provided where the decay particle is first."""
3851
3852 from MadSpin.decay import momentum
3853
3854 full = production[:-1]
3855
3856
3857
3858
3859
3860 for p in decay[1:]:
3861 bp = momentum(*p).boost(momentum(*production[-1]))
3862 full.append([bp.E,bp.px,bp.py,bp.pz])
3863
3864 return full
3865
3866 def find_resonances(diagrams):
3867 """ Find all the resonances in the matrix element in argument """
3868
3869 model = process['model']
3870 resonances_found = []
3871
3872 for ll, diag in enumerate(diagrams):
3873 for amp in diag.get('amplitudes'):
3874
3875
3876 s_channels, t_channels = amp.\
3877 get_s_and_t_channels(process.get_ninitial(), model, 0)
3878
3879
3880 replacement_dict = {}
3881 for s_channel in s_channels:
3882 new_resonance = {
3883 'ParticlePDG':s_channel.get('legs')[-1].get('id'),
3884 'FSMothersNumbers':[],
3885 'PS_point_used':[]}
3886 for leg in s_channel.get('legs')[:-1]:
3887 if leg.get('number')>0:
3888 new_resonance['FSMothersNumbers'].append(
3889 leg.get('number'))
3890 else:
3891 try:
3892 new_resonance['FSMothersNumbers'].extend(
3893 replacement_dict[leg.get('number')])
3894 except KeyError:
3895 raise Exception, 'The following diagram '+\
3896 'is malformed:'+diag.nice_string()
3897
3898 replacement_dict[s_channel.get('legs')[-1].get('number')] = \
3899 new_resonance['FSMothersNumbers']
3900 new_resonance['FSMothersNumbers'] = set(
3901 new_resonance['FSMothersNumbers'])
3902 if new_resonance not in resonances_found:
3903 resonances_found.append(new_resonance)
3904
3905
3906 kept_resonances = []
3907 for resonance in resonances_found:
3908
3909 if resonance['ParticlePDG'] == 0:
3910 continue
3911
3912
3913 if abs(resonance['ParticlePDG']) in \
3914 [abs(l.get('id')) for l in process.get('legs')]:
3915 continue
3916
3917 mass_string = evaluator.full_model.get_particle(
3918 resonance['ParticlePDG']).get('mass')
3919 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
3920
3921 if mass==0.0:
3922 continue
3923
3924 width_string = evaluator.full_model.get_particle(
3925 resonance['ParticlePDG']).get('width')
3926 width = evaluator.full_model.get('parameter_dict')[width_string].real
3927
3928
3929 if width==0.0:
3930 continue
3931
3932 final_state_energy = sum(
3933 evaluator.full_model.get('parameter_dict')[
3934 evaluator.full_model.get_particle(l.get('id')).get('mass')].real
3935 for l in process.get('legs') if l.get('number') in
3936 resonance['FSMothersNumbers'])
3937
3938
3939 special_mass = (1.0 + options['offshellness'])*mass
3940
3941
3942 if special_mass<final_state_energy:
3943 raise InvalidCmd('The offshellness specified (%s) is such'\
3944 %options['offshellness']+' that the resulting kinematic is '+\
3945 'impossible for resonance %s %s.'%(evaluator.full_model.
3946 get_particle(resonance['ParticlePDG']).get_name(),
3947 str(list(resonance['FSMothersNumbers']))))
3948 continue
3949
3950
3951 kept_resonances.append(resonance)
3952
3953 for resonance in kept_resonances:
3954
3955 set_PSpoint(resonance, force_other_res_offshell=kept_resonances)
3956
3957
3958
3959 return tuple(kept_resonances)
3960
3961 def set_PSpoint(resonance, force_other_res_offshell=[],
3962 allow_energy_increase=1.5, isolation_cuts=True):
3963 """ Starting from the specified resonance, construct a phase space point
3964 for it and possibly also enforce other resonances to be onshell. Possibly
3965 allow to progressively increase enregy by steps of the integer specified
3966 (negative float to forbid it) and possible enforce default isolation cuts
3967 as well."""
3968
3969 def invmass(momenta):
3970 """ Computes the invariant mass of a list of momenta."""
3971 ptot = [sum(p[i] for p in momenta) for i in range(4)]
3972 return math.sqrt(ptot[0]**2-ptot[1]**2-ptot[2]**2-ptot[3]**2)
3973
3974 model = evaluator.full_model
3975 def getmass(pdg):
3976 """ Returns the mass of a particle given the current model and its
3977 pdg given in argument."""
3978 return model.get('parameter_dict')[
3979 model.get_particle(pdg).get('mass')].real
3980
3981 N_trials = 0
3982 max_trial = 1e4
3983 nstep_for_energy_increase = 1e3
3984 PS_point_found = None
3985 if options['offshellness'] > 0.0:
3986 offshellness = options['offshellness']
3987 else:
3988
3989
3990
3991
3992 offshellness = (0.25*(options['offshellness']+1.0))-1.0
3993
3994
3995
3996
3997 if options['offshellness'] < 0.0:
3998 energy_increase = math.sqrt(allow_energy_increase)
3999 else:
4000 energy_increase = allow_energy_increase
4001
4002 other_res_offshell = [res for res in force_other_res_offshell if
4003 res!=resonance]
4004
4005
4006
4007 all_other_res_masses = [getmass(res['ParticlePDG'])
4008 for res in other_res_offshell]
4009 resonance_mass = getmass(resonance['ParticlePDG'])
4010
4011 str_res = '%s %s'%(model.get_particle(
4012 resonance['ParticlePDG']).get_name(),
4013 str(list(resonance['FSMothersNumbers'])))
4014 leg_number_to_leg = dict((l.get('number'),l) for l in process.get('legs'))
4015
4016
4017
4018 daughter_masses = sum(getmass(leg_number_to_leg[\
4019 number].get('id')) for number in resonance['FSMothersNumbers'])
4020 min_offshellnes = 4.0*((daughter_masses*1.2)/resonance_mass)-1.0
4021
4022
4023
4024 min_energy = max(sum(getmass(l.get('id')) for l in \
4025 process.get('legs') if l.get('state')==True),
4026 sum(getmass(l.get('id')) for l in \
4027 process.get('legs') if l.get('state')==False))
4028
4029
4030
4031 daughter_offshellnesses = [(1.0+options['offshellness'])*mass
4032 for i, mass in enumerate(all_other_res_masses) if
4033 other_res_offshell[i]['FSMothersNumbers'].issubset(
4034 resonance['FSMothersNumbers'])]
4035
4036 if options['offshellness'] >= 0.0:
4037
4038 if len(daughter_offshellnesses)>0:
4039 max_mass = max(daughter_offshellnesses)
4040
4041 offshellness = max(2.0*(max_mass/resonance_mass)-1.0,
4042 options['offshellness'])
4043
4044 max_mass = max([(1.0+options['offshellness'])*mass for mass in \
4045 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4046
4047
4048
4049 target = max(min_energy*1.2,max_mass*2.0)
4050 if target > options['energy']:
4051 logger.warning("The user-defined energy %f seems "%options['energy']+
4052 " insufficient to reach the minimum propagator invariant mass "+
4053 "%f required for the chosen offshellness %f."%(max_mass,
4054 options['offshellness']) + " Energy reset to %f."%target)
4055 options['energy'] = target
4056
4057 else:
4058 if len(daughter_offshellnesses) > 0:
4059 min_mass = min(daughter_offshellnesses)
4060
4061 offshellness = min(0.25*(min_mass/resonance_mass)-1.0,
4062 options['offshellness'])
4063
4064
4065
4066 if (1.0+offshellness)*resonance_mass < daughter_masses*1.2:
4067 msg = 'The resonance %s cannot accomodate'%str_res+\
4068 ' an offshellness of %f because the daughter'%options['offshellness']+\
4069 ' masses are %f.'%daughter_masses
4070 if options['offshellness']<min_offshellnes:
4071 msg += ' Try again with an offshellness'+\
4072 ' smaller (in absolute value) of at least %f.'%min_offshellnes
4073 else:
4074 msg += ' Try again with a smalled offshellness (in absolute value).'
4075 raise InvalidCmd(msg)
4076
4077 min_mass = min([(1.0+options['offshellness'])*mass for mass in \
4078 all_other_res_masses]+[(1.0+offshellness)*resonance_mass])
4079
4080
4081 if 2.0*min_mass < options['energy']:
4082 new_energy = max(min_energy*1.2, 2.0*min_mass)
4083 logger.warning("The user-defined energy %f seems "%options['energy']+
4084 " too large to not overshoot the maximum propagator invariant mass "+
4085 "%f required for the chosen offshellness %f."%(min_mass,
4086 options['offshellness']) + " Energy reset to %f."%new_energy)
4087 options['energy'] = new_energy
4088
4089 if options['offshellness'] < 0.0 and options['energy'] >= min_mass:
4090 logger.debug("The target energy is not compatible with the mass"+
4091 " of the external states for this process (%f). It is "%min_mass+
4092 "unlikely that a valid kinematic configuration will be found.")
4093
4094 if options['offshellness']<0.0 and offshellness<options['offshellness'] or \
4095 options['offshellness']>0.0 and offshellness>options['offshellness']:
4096 logger.debug("Offshellness increased to %f"%offshellness+
4097 " so as to try to find a kinematical configuration with"+
4098 " offshellness at least equal to %f"%options['offshellness']+
4099 " for all resonances.")
4100
4101 start_energy = options['energy']
4102 while N_trials<max_trial:
4103 N_trials += 1
4104 if N_trials%nstep_for_energy_increase==0:
4105 if allow_energy_increase > 0.0:
4106 old_offshellness = offshellness
4107 if offshellness > 0.0:
4108 options['energy'] *= energy_increase
4109 offshellness *= energy_increase
4110 else:
4111 options['energy'] = max(options['energy']/energy_increase,
4112 min_energy*1.2)
4113 offshellness = max(min_offshellnes,
4114 ((offshellness+1.0)/energy_increase)-1.0)
4115 if old_offshellness!=offshellness:
4116 logger.debug('Trying to find a valid kinematic'+\
4117 " configuration for resonance '%s'"%str_res+\
4118 ' with increased offshellness %f'%offshellness)
4119
4120 candidate = get_PSpoint_for_resonance(resonance, offshellness)
4121 pass_offshell_test = True
4122 for i, res in enumerate(other_res_offshell):
4123
4124 if offshellness > 0.0:
4125 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) <\
4126 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4127 pass_offshell_test = False
4128 break
4129 else:
4130 if invmass([candidate[j-1] for j in res['FSMothersNumbers']]) >\
4131 ((1.0+options['offshellness'])*all_other_res_masses[i]):
4132 pass_offshell_test = False
4133 break
4134 if not pass_offshell_test:
4135 continue
4136
4137 if isolation_cuts:
4138
4139 if not evaluator.pass_isolation_cuts(candidate,
4140 ptcut=0.05*invmass([candidate[0],candidate[1]]), drcut=0.4):
4141 continue
4142 PS_point_found = candidate
4143 break
4144
4145
4146 options['energy'] = start_energy
4147
4148 if PS_point_found is None:
4149 err_msg = 'Could not find a valid PS point in %d'%max_trial+\
4150 ' trials. Try increasing the energy, modify the offshellness '+\
4151 'or relax some constraints.'
4152 if options['offshellness']<0.0:
4153 err_msg +='Try with a positive offshellness instead (or a '+\
4154 'negative one of smaller absolute value)'
4155 raise InvalidCmd, err_msg
4156 else:
4157
4158
4159 resonance['offshellnesses'] = []
4160 all_other_res_masses = [resonance_mass] + all_other_res_masses
4161 other_res_offshell = [resonance] + other_res_offshell
4162 for i, res in enumerate(other_res_offshell):
4163 if i==0:
4164 res_str = 'self'
4165 else:
4166 res_str = '%s %s'%(model.get_particle(
4167 res['ParticlePDG']).get_name(),
4168 str(list(res['FSMothersNumbers'])))
4169 resonance['offshellnesses'].append((res_str,(
4170 (invmass([PS_point_found[j-1] for j in
4171 res['FSMothersNumbers']])/all_other_res_masses[i])-1.0)))
4172
4173 resonance['PS_point_used'] = PS_point_found
4174
4175 def get_PSpoint_for_resonance(resonance, offshellness = options['offshellness']):
4176 """ Assigns a kinematic configuration to the resonance dictionary
4177 given in argument."""
4178
4179
4180 mass_string = evaluator.full_model.get_particle(
4181 resonance['ParticlePDG']).get('mass')
4182 mass = evaluator.full_model.get('parameter_dict')[mass_string].real
4183
4184
4185 special_mass = (1.0 + offshellness)*mass
4186
4187
4188 prod_proc = base_objects.Process({'legs':base_objects.LegList(
4189 copy.copy(leg) for leg in process.get('legs') if
4190 leg.get('number') not in resonance['FSMothersNumbers'])})
4191
4192
4193
4194 prod_proc.get('legs').append(base_objects.Leg({
4195 'number':max(l.get('number') for l in process.get('legs'))+1,
4196 'state':True,
4197 'id':0}))
4198
4199 decay_proc = base_objects.Process({'legs':base_objects.LegList(
4200 copy.copy(leg) for leg in process.get('legs') if leg.get('number')
4201 in resonance['FSMothersNumbers'] and not leg.get('state')==False)})
4202
4203
4204
4205
4206 decay_proc.get('legs').insert(0,base_objects.Leg({
4207 'number':-1,
4208 'state':False,
4209 'id':0}))
4210 prod_kinematic = evaluator.get_momenta(prod_proc, options=options,
4211 special_mass=special_mass)[0]
4212 decay_kinematic = evaluator.get_momenta(decay_proc, options=options,
4213 special_mass=special_mass)[0]
4214 momenta = glue_momenta(prod_kinematic,decay_kinematic)
4215
4216
4217
4218 ordered_momenta = [(prod_proc.get('legs')[i].get('number'),momenta[i])
4219 for i in range(len(prod_proc.get('legs'))-1)]
4220
4221 ordered_momenta += [(decay_proc.get('legs')[-i].get('number'),
4222 momenta[-i]) for i in range(1,len(decay_proc.get('legs')))]
4223
4224
4225 return [m[1] for m in sorted(ordered_momenta, key = lambda el: el[0])]
4226
4227
4228
4229 @misc.mute_logger()
4230 def get_width(PDG, lambdaCMS, param_card):
4231 """ Returns the width to use for particle with absolute PDG 'PDG' and
4232 for the the lambdaCMS value 'lambdaCMS' using the cache if possible."""
4233
4234
4235
4236 if abs(PDG) in [abs(leg.get('id')) for leg in process.get('legs')]:
4237 return 0.0
4238
4239 particle = evaluator.full_model.get_particle(PDG)
4240
4241
4242
4243 if particle.get('ghost') or particle.get('goldstone'):
4244 return 0.0
4245
4246
4247 if particle.get('width')=='ZERO':
4248 return 0.0
4249
4250 if (PDG,lambdaCMS) in options['cached_widths']:
4251 return options['cached_widths'][(PDG,lambdaCMS)]
4252
4253 if options['recompute_width'] == 'never':
4254 width = evaluator.full_model.\
4255 get('parameter_dict')[particle.get('width')].real
4256 else:
4257
4258 if aloha.complex_mass:
4259 raise MadGraph5Error, "The width for particle with PDG %d and"%PDG+\
4260 " lambdaCMS=%f should have already been "%lambdaCMS+\
4261 "computed during the NWA run."
4262
4263
4264 if options['recompute_width'] in ['always','first_time']:
4265 particle_name = particle.get_name()
4266 with misc.TMP_directory(dir=options['output_path']) as path:
4267 param_card.write(pjoin(path,'tmp.dat'))
4268
4269
4270
4271 command = '%s --output=%s'%(particle_name,pjoin(path,'tmp.dat'))+\
4272 ' --path=%s --body_decay=2'%pjoin(path,'tmp.dat')+\
4273 ' --precision_channel=0.001'
4274
4275 param_card.write(pjoin(options['output_path'],'tmp.dat'))
4276
4277
4278
4279 orig_model = options['cmd']._curr_model
4280 orig_fortran_model = options['cmd']._curr_fortran_model
4281 options['cmd'].do_compute_widths(command, evaluator.full_model)
4282
4283 options['cmd']._curr_model = orig_model
4284 options['cmd']._curr_fortran_model = orig_fortran_model
4285
4286
4287 evaluator.full_model.set_parameters_and_couplings(
4288 param_card=param_card)
4289 try:
4290 tmp_param_card = check_param_card.ParamCard(pjoin(path,'tmp.dat'))
4291 except:
4292 raise MadGraph5Error, 'Error occured during width '+\
4293 'computation with command:\n compute_widths %s'%command
4294 width = tmp_param_card['decay'].get(PDG).value
4295
4296
4297
4298
4299
4300
4301
4302 if options['recompute_width'] in ['never','first_time']:
4303
4304 for lam in options['lambdaCMS']:
4305 options['cached_widths'][(PDG,lam)]=width*(lam/lambdaCMS)
4306 else:
4307 options['cached_widths'][(PDG,lambdaCMS)] = width
4308
4309 return options['cached_widths'][(PDG,lambdaCMS)]
4310
4311 def get_order(diagrams, diagsName):
4312 """Compute the common summed of coupling orders used for this cms check
4313 in the diagrams specified. When inconsistency occurs, use orderName
4314 in the warning message if throwm."""
4315
4316 orders = set([])
4317 for diag in diagrams:
4318 diag_orders = diag.calculate_orders()
4319 orders.add(sum((diag_orders[order] if order in diag_orders else 0)
4320 for order in options['expansion_orders']))
4321 if len(orders)>1:
4322 logger.warning(msg%('%s '%diagsName,str(orders)))
4323 return min(list(orders))
4324 else:
4325 return list(orders)[0]
4326
4327 MLoptions = copy.copy(options['MLOptions'])
4328
4329 MLoptions['DoubleCheckHelicityFilter'] = False
4330
4331
4332 for tweak in options['tweak']['custom']:
4333 if tweak.startswith('seed'):
4334 try:
4335 new_seed = int(tweak[4:])
4336 except ValueError:
4337 raise MadGraph5Error, "Seed '%s' is not of the right format 'seed<int>'."%tweak
4338 random.seed(new_seed)
4339
4340 mode = 'CMS' if aloha.complex_mass else 'NWA'
4341 for i, leg in enumerate(process.get('legs')):
4342 leg.set('number', i+1)
4343
4344 logger.info("Running CMS check for process %s (now doing %s scheme)" % \
4345 ( process.nice_string().replace('Process:', 'process'), mode))
4346
4347 proc_dir = None
4348 resonances = None
4349 warning_msg = "All %sdiagrams do not share the same sum of orders "+\
4350 "%s; found %%s."%(','.join(options['expansion_orders']))+\
4351 " This potentially problematic for the CMS check."
4352 if NLO:
4353
4354
4355
4356 if options['name']=='auto':
4357 proc_name = "%s%s_%s%s__%s__"%(('SAVED' if options['reuse'] else ''),
4358 temp_dir_prefix, '_'.join(process.shell_string().split('_')[1:]),
4359 ('_' if process.get('perturbation_couplings') else '')+
4360 '_'.join(process.get('perturbation_couplings')),mode)
4361 else:
4362 proc_name = "%s%s_%s__%s__"%(('SAVED' if options['reuse'] else ''),
4363 temp_dir_prefix,options['name'], mode)
4364
4365 timing, matrix_element = generate_loop_matrix_element(process,
4366 options['reuse'], output_path=options['output_path'],
4367 cmd = options['cmd'], proc_name=proc_name,
4368 loop_filter=options['loop_filter'])
4369 if matrix_element is None:
4370
4371 return None
4372
4373 reusing = isinstance(matrix_element, base_objects.Process)
4374 proc_dir = pjoin(options['output_path'],proc_name)
4375
4376
4377 infos = evaluator.setup_process(matrix_element, proc_dir,
4378 reusing = reusing, param_card = options['param_card'],
4379 MLOptions=MLoptions)
4380
4381 evaluator.fix_MadLoopParamCard(pjoin(proc_dir,'Cards'),
4382 mp = None, loop_filter = True,MLOptions=MLoptions)
4383
4384
4385 tmp_card_backup = pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__')
4386 if os.path.isfile(tmp_card_backup):
4387
4388 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4389 " Now reverting 'param_card.dat' to its original value.")
4390 shutil.copy(tmp_card_backup, pjoin(proc_dir, 'Cards','param_card.dat'))
4391 else:
4392
4393 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'), tmp_card_backup)
4394
4395 tmp_modelfunc_backup = pjoin(proc_dir,'Source','MODEL',
4396 'model_functions.f__TemporaryBackup__')
4397 if os.path.isfile(tmp_modelfunc_backup):
4398
4399 logger.info("Last run in process '%s' apparently aborted."%proc_dir+\
4400 " Now reverting 'model_functions.f' to its original value.")
4401 shutil.copy(tmp_modelfunc_backup, pjoin(proc_dir,'Source','MODEL',
4402 'model_functions.f'))
4403 evaluator.apply_log_tweak(proc_dir, 'recompile')
4404 else:
4405
4406 shutil.copy(pjoin(proc_dir,'Source','MODEL','model_functions.f'),
4407 tmp_modelfunc_backup)
4408
4409
4410 MadLoopInitializer.fix_PSPoint_in_check(pjoin(proc_dir,'SubProcesses'),
4411 read_ps = True, npoints = 1, hel_config = options['helicity'],
4412 split_orders=options['split_orders'])
4413
4414
4415
4416 for dir in glob.glob(pjoin(proc_dir,'SubProcesses','P*_*')):
4417 if not (re.search(r'.*P\d+_\w*$', dir) or not os.path.isdir(dir)):
4418 continue
4419 try:
4420 os.remove(pjoin(dir,'check'))
4421 os.remove(pjoin(dir,'check_sa.o'))
4422 except OSError:
4423 pass
4424
4425 with open(os.devnull, 'w') as devnull:
4426 retcode = subprocess.call(['make','check'],
4427 cwd=dir, stdout=devnull, stderr=devnull)
4428 if retcode != 0:
4429 raise MadGraph5Error, "Compilation error with "+\
4430 "'make check' in %s"%dir
4431
4432
4433 pkl_path = pjoin(proc_dir,'resonance_specs.pkl')
4434 if reusing:
4435
4436
4437 if not os.path.isfile(pkl_path):
4438 raise InvalidCmd('The folder %s could'%proc_dir+\
4439 " not be reused because the resonance specification file "+
4440 "'resonance_specs.pkl' is missing.")
4441 else:
4442 proc_name, born_order, loop_order, resonances = \
4443 save_load_object.load_from_file(pkl_path)
4444
4445
4446 for res in resonances:
4447 set_PSpoint(res, force_other_res_offshell=resonances)
4448
4449
4450 if isinstance(opt, list):
4451 opt.append((proc_name, resonances))
4452 else:
4453 resonances = opt
4454 else:
4455 helas_born_diagrams = matrix_element.get_born_diagrams()
4456 if len(helas_born_diagrams)==0:
4457 logger.warning('The CMS check for loop-induced process is '+\
4458 'not yet available (nor is it very interesting).')
4459 return None
4460 born_order = get_order(helas_born_diagrams,'Born')
4461 loop_order = get_order(matrix_element.get_loop_diagrams(),'loop')
4462
4463
4464 if isinstance(opt, list):
4465 opt.append((process.base_string(),find_resonances(helas_born_diagrams)))
4466 resonances = opt[-1][1]
4467 else:
4468 resonances = opt
4469
4470
4471 save_load_object.save_to_file(pkl_path, (process.base_string(),
4472 born_order, loop_order,resonances))
4473
4474 else:
4475
4476 try:
4477 amplitude = diagram_generation.Amplitude(process)
4478 except InvalidCmd:
4479 logging.info("No diagrams for %s" % \
4480 process.nice_string().replace('Process', 'process'))
4481 return None
4482 if not amplitude.get('diagrams'):
4483
4484 logging.info("No diagrams for %s" % \
4485 process.nice_string().replace('Process', 'process'))
4486 return None
4487
4488 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4489 gen_color=True)
4490 diagrams = matrix_element.get('diagrams')
4491 born_order = get_order(diagrams,'Born')
4492
4493 loop_order = -1
4494
4495 if isinstance(opt, list):
4496 opt.append((process.base_string(),find_resonances(diagrams)))
4497 resonances = opt[-1][1]
4498 else:
4499 resonances= opt
4500
4501 if len(resonances)==0:
4502 logger.info("No resonance found for process %s."\
4503 %process.base_string())
4504 return None
4505
4506
4507 if not options['cached_param_card'][mode][0]:
4508 if NLO:
4509 param_card = check_param_card.ParamCard(
4510 pjoin(proc_dir,'Cards','param_card.dat'))
4511 else:
4512 param_card = check_param_card.ParamCard(
4513 StringIO.StringIO(evaluator.full_model.write_param_card()))
4514 options['cached_param_card'][mode][0] = param_card
4515 name2block, _ = param_card.analyze_param_card()
4516 options['cached_param_card'][mode][1] = name2block
4517
4518 else:
4519 param_card = options['cached_param_card'][mode][0]
4520 name2block = options['cached_param_card'][mode][1]
4521
4522
4523 if loop_order != -1 and (loop_order+born_order)%2 != 0:
4524 raise MadGraph5Error, 'The summed squared matrix element '+\
4525 " order '%d' is not even."%(loop_order+born_order)
4526 result = {'born_order':born_order,
4527 'loop_order': (-1 if loop_order==-1 else (loop_order+born_order)/2),
4528 'resonances_result':[]}
4529
4530
4531 if NLO:
4532 try:
4533 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat'),
4534 pjoin(proc_dir,'Cards','param_card.dat__backUp__'))
4535 except:
4536 pass
4537
4538
4539 had_log_tweaks=False
4540 if NLO:
4541 for tweak in options['tweak']['custom']:
4542 if tweak.startswith('seed'):
4543 continue
4544 try:
4545 logstart, logend = tweak.split('->')
4546 except:
4547 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4548 if logstart in ['logp','logm', 'log'] and \
4549 logend in ['logp','logm', 'log']:
4550 if NLO:
4551 evaluator.apply_log_tweak(proc_dir, [logstart, logend])
4552 had_log_tweaks = True
4553 else:
4554 raise Madgraph5Error, "Tweak '%s' not reckognized."%tweak
4555 if had_log_tweaks:
4556 evaluator.apply_log_tweak(proc_dir, 'recompile')
4557
4558
4559 if options['resonances']=='all':
4560 resonances_to_run = resonances
4561 elif isinstance(options['resonances'],int):
4562 resonances_to_run = resonances[:options['resonances']]
4563 elif isinstance(options['resonances'],list):
4564 resonances_to_run = []
4565 for res in resonances:
4566 for res_selection in options['resonances']:
4567 if abs(res['ParticlePDG'])==res_selection[0] and \
4568 res['FSMothersNumbers']==set(res_selection[1]):
4569 resonances_to_run.append(res)
4570 break
4571 else:
4572 raise InvalidCmd("Resonance selection '%s' not reckognized"%\
4573 str(options['resonances']))
4574
4575
4576
4577 if NLO and options['show_plot']:
4578 widgets = ['ME evaluations:', pbar.Percentage(), ' ',
4579 pbar.Bar(),' ', pbar.ETA(), ' ']
4580 progress_bar = pbar.ProgressBar(widgets=widgets,
4581 maxval=len(options['lambdaCMS'])*len(resonances_to_run), fd=sys.stdout)
4582 progress_bar.update(0)
4583
4584 sys.stdout.flush()
4585 else:
4586 progress_bar = None
4587
4588 for resNumber, res in enumerate(resonances_to_run):
4589
4590
4591 result['resonances_result'].append({'resonance':res,'born':[]})
4592 if NLO:
4593 result['resonances_result'][-1]['finite'] = []
4594
4595 for lambdaNumber, lambdaCMS in enumerate(options['lambdaCMS']):
4596
4597
4598 new_param_card = check_param_card.ParamCard(param_card)
4599
4600 for param, replacement in options['expansion_parameters'].items():
4601
4602
4603 orig_param = param.replace('__tmpprefix__','')
4604 if orig_param not in name2block:
4605
4606
4607
4608 continue
4609 for block, lhaid in name2block[orig_param]:
4610 orig_value = float(param_card[block].get(lhaid).value)
4611 new_value = eval(replacement,
4612 {param:orig_value,'lambdacms':lambdaCMS})
4613 new_param_card[block].get(lhaid).value=new_value
4614
4615
4616
4617
4618
4619
4620
4621
4622 evaluator.full_model.set_parameters_and_couplings(
4623 param_card=new_param_card)
4624
4625 for decay in new_param_card['decay'].keys():
4626 if mode=='CMS':
4627 new_width = get_width(abs(decay[0]), lambdaCMS,
4628 new_param_card)
4629 else:
4630 new_width = 0.0
4631 new_param_card['decay'].get(decay).value= new_width
4632
4633
4634 evaluator.full_model.set_parameters_and_couplings(
4635 param_card=new_param_card)
4636 if NLO:
4637 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4638
4639
4640 if lambdaCMS==1.0 and mode=='CMS' and \
4641 options['recompute_width'] in ['always','first_time']:
4642 new_param_card.write(pjoin(proc_dir,
4643 'Cards','param_card.dat_recomputed_widths'))
4644
4645
4646
4647 if mode=='NWA' and (options['recompute_width']=='always' or (
4648 options['recompute_width']=='first_time' and lambdaCMS==1.0)):
4649
4650 tmp_param_card = check_param_card.ParamCard(new_param_card)
4651
4652
4653 for decay in new_param_card['decay'].keys():
4654 particle_name = evaluator.full_model.get_particle(\
4655 abs(decay[0])).get_name()
4656 new_width = get_width(abs(decay[0]),lambdaCMS,new_param_card)
4657 tmp_param_card['decay'].get(decay).value = new_width
4658 if not options['has_FRdecay'] and new_width != 0.0 and \
4659 (abs(decay[0]),lambdaCMS) not in options['cached_widths']:
4660 logger.info('Numerically computed width of particle'+\
4661 ' %s for lambda=%.4g : %-9.6gGeV'%
4662 (particle_name,lambdaCMS,new_width))
4663
4664
4665
4666 if lambdaCMS==1.0 and NLO:
4667 tmp_param_card.write(pjoin(proc_dir,
4668 'Cards','param_card.dat_recomputed_widths'))
4669
4670
4671 for param, replacement in options['tweak']['params'].items():
4672
4673
4674 orig_param = param.replace('__tmpprefix__','')
4675
4676 if orig_param.lower() == 'allwidths':
4677
4678 for decay in new_param_card['decay'].keys():
4679 orig_value = float(new_param_card['decay'].get(decay).value)
4680 new_value = eval(replacement,
4681 {param:orig_value,'lambdacms':lambdaCMS})
4682 new_param_card['decay'].get(decay).value = new_value
4683 continue
4684 if orig_param not in name2block:
4685
4686
4687 continue
4688 for block, lhaid in name2block[orig_param]:
4689 orig_value = float(new_param_card[block].get(lhaid).value)
4690 new_value = eval(replacement,
4691 {param:orig_value,'lambdacms':lambdaCMS})
4692 new_param_card[block].get(lhaid).value=new_value
4693
4694 if options['tweak']['params']:
4695
4696 evaluator.full_model.set_parameters_and_couplings(
4697 param_card=new_param_card)
4698 if NLO:
4699 new_param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4700
4701
4702 if NLO:
4703 ME_res = LoopMatrixElementEvaluator.get_me_value(process, 0,
4704 proc_dir, PSpoint=res['PS_point_used'], verbose=False,
4705 format='dict', skip_compilation=True)
4706
4707
4708
4709
4710 result['resonances_result'][-1]['born'].append(ME_res['born'])
4711 result['resonances_result'][-1]['finite'].append(
4712 ME_res['finite']*ME_res['born']*ME_res['alphaS_over_2pi'])
4713 else:
4714 ME_res = evaluator.evaluate_matrix_element(matrix_element,
4715 p=res['PS_point_used'], auth_skipping=False, output='m2')[0]
4716 result['resonances_result'][-1]['born'].append(ME_res)
4717 if not progress_bar is None:
4718 progress_bar.update(resNumber*len(options['lambdaCMS'])+\
4719 (lambdaNumber+1))
4720
4721 sys.stdout.flush()
4722
4723
4724 log_reversed = False
4725 for tweak in options['tweak']['custom']:
4726 if tweak.startswith('log') and had_log_tweaks:
4727 if log_reversed:
4728 continue
4729 if NLO:
4730 evaluator.apply_log_tweak(proc_dir, 'default')
4731 evaluator.apply_log_tweak(proc_dir, 'recompile')
4732 log_reversed = True
4733
4734
4735 evaluator.full_model.set_parameters_and_couplings(param_card=param_card)
4736 if NLO:
4737 try:
4738 shutil.copy(pjoin(proc_dir,'Cards','param_card.dat__backUp__'),
4739 pjoin(proc_dir,'Cards','param_card.dat'))
4740 except:
4741 param_card.write(pjoin(proc_dir,'Cards','param_card.dat'))
4742
4743
4744
4745 try:
4746 os.remove(pjoin(proc_dir,'Cards','param_card.dat__TemporaryBackup__'))
4747 os.remove(pjoin(proc_dir,'Source','MODEL',
4748 'model_functions.f__TemporaryBackup__'))
4749 except:
4750 pass
4751
4752 return (process.nice_string().replace('Process:', '').strip(),result)
4753
4754 -def get_value(process, evaluator, p=None, options=None):
4755 """Return the value/momentum for a phase space point"""
4756
4757 for i, leg in enumerate(process.get('legs')):
4758 leg.set('number', i+1)
4759
4760 logger.info("Checking %s in %s gauge" % \
4761 ( process.nice_string().replace('Process:', 'process'),
4762 'unitary' if aloha.unitary_gauge else 'feynman'))
4763
4764 legs = process.get('legs')
4765
4766
4767 try:
4768 if process.get('perturbation_couplings')==[]:
4769 amplitude = diagram_generation.Amplitude(process)
4770 else:
4771 amplitude = loop_diagram_generation.LoopAmplitude(process)
4772 except InvalidCmd:
4773 logging.info("No diagrams for %s" % \
4774 process.nice_string().replace('Process', 'process'))
4775 return None
4776
4777 if not amplitude.get('diagrams'):
4778
4779 logging.info("No diagrams for %s" % \
4780 process.nice_string().replace('Process', 'process'))
4781 return None
4782
4783 if not p:
4784
4785 p, w_rambo = evaluator.get_momenta(process, options)
4786
4787
4788 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude):
4789 matrix_element = helas_objects.HelasMatrixElement(amplitude,
4790 gen_color = True)
4791 else:
4792 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude,
4793 gen_color = True, optimized_output = evaluator.loop_optimized_output)
4794
4795 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p,
4796 output='jamp',options=options)
4797
4798 if mvalue and mvalue['m2']:
4799 return {'process':process.base_string(),'value':mvalue,'p':p}
4800
4802 """Present the results of a comparison in a nice list format for loop
4803 processes. It detail the results from each lorentz transformation performed.
4804 """
4805
4806 process = comparison_results[0]['process']
4807 results = comparison_results[0]['results']
4808
4809
4810 threshold_rotations = 1e-6
4811
4812
4813
4814 threshold_boosts = 1e-3
4815 res_str = "%s" % process.base_string()
4816
4817 transfo_col_size = 17
4818 col_size = 18
4819 transfo_name_header = 'Transformation name'
4820
4821 if len(transfo_name_header) + 1 > transfo_col_size:
4822 transfo_col_size = len(transfo_name_header) + 1
4823
4824 misc.sprint(results)
4825 for transfo_name, value in results:
4826 if len(transfo_name) + 1 > transfo_col_size:
4827 transfo_col_size = len(transfo_name) + 1
4828
4829 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \
4830 fixed_string_length("Value", col_size) + \
4831 fixed_string_length("Relative diff.", col_size) + "Result"
4832
4833 ref_value = results[0]
4834 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \
4835 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size)
4836
4837
4838 all_pass = True
4839 for res in results[1:]:
4840 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \
4841 threshold_rotations
4842 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\
4843 /((ref_value[1]['m2']+res[1]['m2'])/2.0))
4844 this_pass = rel_diff <= threshold
4845 if not this_pass:
4846 all_pass = False
4847 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \
4848 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \
4849 fixed_string_length("%1.10e" % rel_diff, col_size) + \
4850 ("Passed" if this_pass else "Failed")
4851 if all_pass:
4852 res_str += '\n' + 'Summary: passed'
4853 else:
4854 res_str += '\n' + 'Summary: failed'
4855
4856 return res_str
4857
4859 """Present the results of a comparison in a nice list format
4860 if output='fail' return the number of failed process -- for test--
4861 """
4862
4863
4864 if comparison_results[0]['process']['perturbation_couplings']!=[]:
4865 return output_lorentz_inv_loop(comparison_results, output)
4866
4867 proc_col_size = 17
4868
4869 threshold=1e-10
4870 process_header = "Process"
4871
4872 if len(process_header) + 1 > proc_col_size:
4873 proc_col_size = len(process_header) + 1
4874
4875 for proc, values in comparison_results:
4876 if len(proc) + 1 > proc_col_size:
4877 proc_col_size = len(proc) + 1
4878
4879 col_size = 18
4880
4881 pass_proc = 0
4882 fail_proc = 0
4883 no_check_proc = 0
4884
4885 failed_proc_list = []
4886 no_check_proc_list = []
4887
4888 res_str = fixed_string_length(process_header, proc_col_size) + \
4889 fixed_string_length("Min element", col_size) + \
4890 fixed_string_length("Max element", col_size) + \
4891 fixed_string_length("Relative diff.", col_size) + \
4892 "Result"
4893
4894 for one_comp in comparison_results:
4895 proc = one_comp['process'].base_string()
4896 data = one_comp['results']
4897
4898 if data == 'pass':
4899 no_check_proc += 1
4900 no_check_proc_list.append(proc)
4901 continue
4902
4903 values = [data[i]['m2'] for i in range(len(data))]
4904
4905 min_val = min(values)
4906 max_val = max(values)
4907 diff = (max_val - min_val) / abs(max_val)
4908
4909 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
4910 fixed_string_length("%1.10e" % min_val, col_size) + \
4911 fixed_string_length("%1.10e" % max_val, col_size) + \
4912 fixed_string_length("%1.10e" % diff, col_size)
4913
4914 if diff < threshold:
4915 pass_proc += 1
4916 proc_succeed = True
4917 res_str += "Passed"
4918 else:
4919 fail_proc += 1
4920 proc_succeed = False
4921 failed_proc_list.append(proc)
4922 res_str += "Failed"
4923
4924
4925
4926
4927
4928 if len(data[0]['jamp'])!=0:
4929 for k in range(len(data[0]['jamp'][0])):
4930 sum = [0] * len(data)
4931
4932 for j in range(len(data[0]['jamp'])):
4933
4934 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
4935 sum = [sum[i] + values[i] for i in range(len(values))]
4936
4937
4938 min_val = min(sum)
4939 max_val = max(sum)
4940 if not max_val:
4941 continue
4942 diff = (max_val - min_val) / max_val
4943
4944 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \
4945 fixed_string_length("%1.10e" % min_val, col_size) + \
4946 fixed_string_length("%1.10e" % max_val, col_size) + \
4947 fixed_string_length("%1.10e" % diff, col_size)
4948
4949 if diff > 1e-10:
4950 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
4951 fail_proc += 1
4952 pass_proc -= 1
4953 failed_proc_list.append(proc)
4954 res_str += tmp_str + "Failed"
4955 elif not proc_succeed:
4956 res_str += tmp_str + "Passed"
4957
4958
4959
4960 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
4961 (pass_proc, pass_proc + fail_proc,
4962 fail_proc, pass_proc + fail_proc)
4963
4964 if fail_proc != 0:
4965 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
4966 if no_check_proc:
4967 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
4968
4969 if output == 'text':
4970 return res_str
4971 else:
4972 return fail_proc
4973
4975 """Present the results of a comparison in a nice list format
4976 if output='fail' return the number of failed process -- for test--
4977 """
4978
4979 proc_col_size = 17
4980
4981
4982
4983 pert_coupl = comparison_results[0]['perturbation_couplings']
4984 comparison_results = comparison_results[1:]
4985
4986 if pert_coupl:
4987 process_header = "Process [virt="+" ".join(pert_coupl)+"]"
4988 else:
4989 process_header = "Process"
4990
4991 if len(process_header) + 1 > proc_col_size:
4992 proc_col_size = len(process_header) + 1
4993
4994 for data in comparison_results:
4995 proc = data['process']
4996 if len(proc) + 1 > proc_col_size:
4997 proc_col_size = len(proc) + 1
4998
4999 pass_proc = 0
5000 fail_proc = 0
5001 no_check_proc = 0
5002
5003 failed_proc_list = []
5004 no_check_proc_list = []
5005
5006 col_size = 18
5007
5008 res_str = fixed_string_length(process_header, proc_col_size) + \
5009 fixed_string_length("Unitary", col_size) + \
5010 fixed_string_length("Feynman", col_size) + \
5011 fixed_string_length("Relative diff.", col_size) + \
5012 "Result"
5013
5014 for one_comp in comparison_results:
5015 proc = one_comp['process']
5016 data = [one_comp['value_unit'], one_comp['value_feynm']]
5017
5018
5019 if data[0] == 'pass':
5020 no_check_proc += 1
5021 no_check_proc_list.append(proc)
5022 continue
5023
5024 values = [data[i]['m2'] for i in range(len(data))]
5025
5026 min_val = min(values)
5027 max_val = max(values)
5028
5029
5030 diff = (max_val - min_val) / abs(max_val)
5031
5032 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \
5033 fixed_string_length("%1.10e" % values[0], col_size) + \
5034 fixed_string_length("%1.10e" % values[1], col_size) + \
5035 fixed_string_length("%1.10e" % diff, col_size)
5036
5037 if diff < 1e-8:
5038 pass_proc += 1
5039 proc_succeed = True
5040 res_str += "Passed"
5041 else:
5042 fail_proc += 1
5043 proc_succeed = False
5044 failed_proc_list.append(proc)
5045 res_str += "Failed"
5046
5047
5048
5049
5050
5051 if len(data[0]['jamp'])>0:
5052 for k in range(len(data[0]['jamp'][0])):
5053 sum = [0, 0]
5054
5055 for j in range(len(data[0]['jamp'])):
5056
5057 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))]
5058 sum = [sum[i] + values[i] for i in range(len(values))]
5059
5060
5061 min_val = min(sum)
5062 max_val = max(sum)
5063 if not max_val:
5064 continue
5065 diff = (max_val - min_val) / max_val
5066
5067 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \
5068 fixed_string_length("%1.10e" % sum[0], col_size) + \
5069 fixed_string_length("%1.10e" % sum[1], col_size) + \
5070 fixed_string_length("%1.10e" % diff, col_size)
5071
5072 if diff > 1e-10:
5073 if not len(failed_proc_list) or failed_proc_list[-1] != proc:
5074 fail_proc += 1
5075 pass_proc -= 1
5076 failed_proc_list.append(proc)
5077 res_str += tmp_str + "Failed"
5078 elif not proc_succeed:
5079 res_str += tmp_str + "Passed"
5080
5081
5082
5083 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \
5084 (pass_proc, pass_proc + fail_proc,
5085 fail_proc, pass_proc + fail_proc)
5086
5087 if fail_proc != 0:
5088 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list)
5089 if no_check_proc:
5090 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list)
5091
5092
5093 if output == 'text':
5094 return res_str
5095 else:
5096 return fail_proc
5097
5098 -def CMS_save_path(extension, cms_res, used_model, opts, output_path=None):
5099 """Creates a suitable filename for saving these results."""
5100
5101 if opts['name']=='auto' and opts['analyze']!='None':
5102
5103 return '%s.%s'%(os.path.splitext(opts['analyze'].split(',')[0])\
5104 [0],extension)
5105
5106 if opts['name']!='auto':
5107 basename = opts['name']
5108 else:
5109 prefix = 'cms_check_'
5110
5111 if len(cms_res['ordered_processes'])==1:
5112 proc = cms_res['ordered_processes'][0]
5113 replacements = {' ':'','+':'p','-':'m','~':'x', '>':'_','=':'eq'}
5114
5115 try:
5116 proc=proc[:proc.index('[')]
5117 except ValueError:
5118 pass
5119
5120 for key, value in replacements.items():
5121 proc = proc.replace(key,value)
5122
5123 basename =prefix+proc+'_%s_'%used_model.get('name')+\
5124 ( ('_'+'_'.join(cms_res['perturbation_orders'])) if \
5125 cms_res['perturbation_orders']!=[] else '')
5126
5127 else:
5128 basename = prefix+datetime.datetime.now().strftime("%Y_%m_%d_%Hh%Mm%Ss")
5129
5130 suffix = '_%s'%opts['tweak']['name'] if opts['tweak']['name']!='' else ''
5131 if output_path:
5132 return pjoin(output_path,'%s%s.%s'%(basename,suffix,extension))
5133 else:
5134 return '%s%s.%s'%(basename,suffix,extension)
5135
5137 """ Outputs nicely the outcome of the complex mass scheme check performed
5138 by varying the width in the offshell region of resonances found for eahc process.
5139 Output just specifies whether text should be returned or a list of failed
5140 processes. Use 'concise_text' for a consise report of the results."""
5141
5142 pert_orders=result['perturbation_orders']
5143
5144
5145
5146
5147
5148
5149
5150
5151
5152 diff_lambda_power = options['diff_lambda_power']
5153
5154
5155
5156
5157
5158
5159
5160 if 'has_FRdecay' in result:
5161 has_FRdecay = result['has_FRdecay']
5162 else:
5163 has_FRdecay = False
5164
5165 if not pert_orders:
5166 CMS_test_threshold = 1e-3
5167 else:
5168
5169
5170
5171
5172
5173
5174 if not has_FRdecay and ('recomputed_with' not in result or \
5175 result['recompute_width'] in ['always','first_time']):
5176 CMS_test_threshold = 2e-2*(1.0e-4/min(result['lambdaCMS']))
5177 else:
5178
5179
5180 CMS_test_threshold = 2e-2*(1.0e-5/min(result['lambdaCMS']))
5181
5182
5183
5184
5185 consideration_threshold = min(CMS_test_threshold/10.0, 0.05)
5186
5187
5188 group_val = 3
5189
5190
5191
5192
5193 diff_zero_threshold = 1e-3
5194
5195
5196 lambda_range = options['lambda_plot_range']
5197
5198
5199
5200
5201
5202
5203
5204
5205
5206
5207 res_str = ''
5208
5209 concise_str = ''
5210 concise_data = '%%(process)-%ds%%(asymptot)-15s%%(cms_check)-25s%%(status)-25s\n'
5211 concise_repl_dict = {'Header':{'process':'Process',
5212 'asymptot':'Asymptot',
5213 'cms_check':'Deviation to asymptot',
5214 'status':'Result'}}
5215
5216
5217
5218
5219
5220 useLatexParticleName = 'built-in'
5221 name2tex = {'e+':r'e^+','w+':r'W^+','a':r'\gamma','g':'g',
5222 'e-':r'e^-','w-':r'W^-','z':'Z','h':'H',
5223 'mu+':r'\mu^+',
5224 'mu-':r'\mu^-',
5225 'ta+':r'\tau^+',
5226 'ta-':r'\tau^-'}
5227 for p in ['e','m','t']:
5228 d = {'e':'e','m':r'\mu','t':r'\tau'}
5229 name2tex['v%s'%p]=r'\nu_{%s}'%d[p]
5230 name2tex['v%s~'%p]=r'\bar{\nu_{%s}}'%d[p]
5231
5232 for p in ['u','d','c','s','b','t']:
5233 name2tex[p]=p
5234 name2tex['%s~'%p]=r'\bar{%s}'%p
5235
5236 def format_particle_name(particle, latex=useLatexParticleName):
5237 p_name = particle
5238 if latex=='model':
5239 try:
5240 texname = model.get_particle(particle).get('texname')
5241 if texname and texname!='none':
5242 p_name = r'$\displaystyle %s$'%texname
5243 except:
5244 pass
5245 elif latex=='built-in':
5246 try:
5247 p_name = r'$\displaystyle %s$'%name2tex[particle]
5248 except:
5249 pass
5250 return p_name
5251
5252 def resonance_str(resonance, latex=useLatexParticleName):
5253 """ Provides a concise string to characterize the resonance """
5254 particle_name = model.get_particle(resonance['ParticlePDG']).get_name()
5255 mothersID=['%d'%n for n in sorted(resonance['FSMothersNumbers'])]
5256 return r"%s [%s]"%(format_particle_name(particle_name,latex=latex),
5257 ','.join(mothersID))
5258
5259 def format_title(process, resonance):
5260 """ Format the plot title given the process and resonance """
5261
5262 process_string = []
5263 for particle in process.split():
5264 if particle=='$$':
5265 process_string.append(r'\$\$')
5266 continue
5267 if particle=='>':
5268 process_string.append(r'$\displaystyle \rightarrow$')
5269 continue
5270 process_string.append(format_particle_name(particle))
5271
5272 if resonance=='':
5273 return r'CMS check for %s' %(' '.join(process_string))
5274 else:
5275 return r'CMS check for %s ( resonance %s )'\
5276 %(' '.join(process_string),resonance)
5277
5278 def guess_lambdaorder(ME_values_list, lambda_values, expected=None,
5279 proc=None, res=None):
5280 """ Guess the lambda scaling from a list of ME values and return it.
5281 Also compare with the expected result if specified and trigger a
5282 warning if not in agreement."""
5283
5284 bpowers = []
5285 for i, lambdaCMS in enumerate(lambda_values[1:]):
5286 bpowers.append(round(math.log(ME_values_list[0]/ME_values_list[i+1],\
5287 lambda_values[0]/lambdaCMS)))
5288
5289
5290 bpower = sorted([(el, bpowers.count(el)) for el in set(bpowers)],
5291 key = lambda elem: elem[1], reverse=True)[0][0]
5292 if not expected:
5293 return bpower
5294 if bpower != expected:
5295 logger.warning('The apparent scaling of the squared amplitude'+
5296 'seems inconsistent w.r.t to detected value '+
5297 '(%i vs %i). %i will be used.'%(expected,bpower,bpower)+
5298 ' This happend for process %s and resonance %s'%(proc, res))
5299 return bpower
5300
5301 def check_stability(ME_values, lambda_values, lambda_scaling, values_name):
5302 """ Checks if the values passed in argument are stable and return the
5303 stability check outcome warning if it is not precise enough. """
5304
5305 values = sorted([
5306 abs(val*(lambda_values[0]/lambda_values[i])**lambda_scaling) for \
5307 i, val in enumerate(ME_values)])
5308 median = values[len(values)//2]
5309 max_diff = max(abs(values[0]-median),abs(values[-1]-median))
5310 stability = max_diff/median
5311 stab_threshold = 1e-2
5312 if stability >= stab_threshold:
5313 return "== WARNING: Stability check failed for '%s' with stability %.2e.\n"\
5314 %(values_name, stability)
5315 else:
5316 return None
5317
5318 if options['analyze']=='None':
5319 if options['reuse']:
5320 save_path = CMS_save_path('pkl', result, model, options,
5321 output_path=output_path)
5322 buff = "\nThe results of this check have been stored on disk and its "+\
5323 "analysis can be rerun at anytime with the MG5aMC command:\n "+\
5324 " check cms --analyze=%s\n"%save_path
5325 res_str += buff
5326 concise_str += buff
5327 save_load_object.save_to_file(save_path, result)
5328 elif len(result['ordered_processes'])>0:
5329 buff = "\nUse the following synthax if you want to store "+\
5330 "the raw results on disk.\n"+\
5331 " check cms -reuse <proc_def> <options>\n"
5332 res_str += buff
5333 concise_str += buff
5334
5335
5336
5337
5338
5339 checks = []
5340 for process in result['ordered_processes']:
5341 checks.extend([(process,resID) for resID in \
5342 range(len(result[process]['CMS']))])
5343
5344 if options['reuse']:
5345 logFile = open(CMS_save_path(
5346 'log', result, model, options, output_path=output_path),'w')
5347
5348 lambdaCMS_list=result['lambdaCMS']
5349
5350
5351 failed_procs = []
5352
5353
5354 bar = lambda char: char*47
5355
5356
5357 if 'widths_computed' in result:
5358 res_str += '\n%s%s%s\n'%(bar('='),' Widths ',bar('='))
5359 if result['recompute_width'] == 'never':
5360 res_str += '| Widths extracted from the param_card.dat'
5361 else:
5362 res_str += '| Widths computed %s'%('analytically' if has_FRdecay
5363 else 'numerically')
5364 if result['recompute_width'] == 'first_time':
5365 res_str += ' for \lambda = 1'
5366 elif result['recompute_width'] == 'always':
5367 res_str += ' for all \lambda values'
5368 res_str += " using mode '--recompute_width=%s'.\n"%result['recompute_width']
5369 for particle_name, width in result['widths_computed']:
5370 res_str += '| %-10s = %-11.6gGeV\n'%('Width(%s)'%particle_name,width)
5371 res_str += '%s%s%s\n'%(bar('='),'='*8,bar('='))
5372
5373
5374
5375
5376 nstab_points=group_val
5377
5378 differences_target = {}
5379 for process, resID in checks:
5380
5381
5382 concise_repl_dict[process] = {'process':process,
5383 'asymptot':'N/A',
5384 'cms_check':'N/A',
5385 'status':'N/A'}
5386 proc_res = result[process]
5387 cms_res = proc_res['CMS'][resID]
5388 nwa_res = proc_res['NWA'][resID]
5389 resonance = resonance_str(cms_res['resonance'], latex='none')
5390 cms_born=cms_res['born']
5391 nwa_born=nwa_res['born']
5392
5393 res_str += '\n%s%s%s\n'%(bar('='),'='*8,bar('='))
5394
5395 proc_title = "%s (resonance %s)"%(process,resonance)
5396 centering = (bar(2)+8-len(proc_title))//2
5397 res_str += "%s%s\n"%(' '*centering,proc_title)
5398
5399 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5400
5401
5402 if diff_lambda_power!=1:
5403 res_str += "== WARNING diff_lambda_power is not 1 but = %g\n"%diff_lambda_power
5404 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5405
5406 born_power = guess_lambdaorder(nwa_born,lambdaCMS_list,
5407 expected=proc_res['born_order'], proc=process, res=resonance)
5408 stab_cms_born = check_stability(cms_born[-nstab_points:],
5409 lambdaCMS_list[-nstab_points:], born_power, 'CMS Born')
5410 if stab_cms_born:
5411 res_str += stab_cms_born
5412 stab_nwa_born = check_stability(nwa_born[-nstab_points:],
5413 lambdaCMS_list[-nstab_points:], born_power, 'NWA Born')
5414 if stab_nwa_born:
5415 res_str += stab_nwa_born
5416
5417 res_str += "== Kinematic configuration in GeV (E,px,pypz)\n"
5418 for i, p in enumerate(cms_res['resonance']['PS_point_used']):
5419 res_str += " | p%-2.d = "%(i+1)
5420 for pi in p:
5421 res_str += '%-24.17g'%pi if pi<0.0 else ' %-23.17g'%pi
5422 res_str += "\n"
5423
5424 res_str += "== Offshellnesses of all detected resonances\n"
5425 for res_name, offshellness in cms_res['resonance']['offshellnesses']:
5426 res_str += " | %-15s = %f\n"%(res_name, offshellness)
5427 res_str += '%s%s%s\n'%(bar('-'),'-'*8,bar('-'))
5428
5429 if not pert_orders:
5430 res_str += "== Born scaling lambda^n_born. nborn = %d\n"%born_power
5431 else:
5432 cms_finite=cms_res['finite']
5433 nwa_finite=nwa_res['finite']
5434 loop_power = guess_lambdaorder(nwa_finite,lambdaCMS_list,
5435 expected=proc_res['loop_order'], proc=process, res=resonance)
5436 res_str += "== Scaling lambda^n. nborn, nloop = %d, %d\n"\
5437 %(born_power,loop_power)
5438 stab_cms_finite = check_stability(cms_finite[-nstab_points:],
5439 lambdaCMS_list[-nstab_points:], loop_power, 'CMS finite')
5440 if stab_cms_finite:
5441 res_str += stab_cms_finite
5442 stab_nwa_finite = check_stability(nwa_finite[-nstab_points:],
5443 lambdaCMS_list[-nstab_points:], loop_power, 'NWA finite')
5444 if stab_nwa_finite:
5445 res_str += stab_nwa_finite
5446
5447 CMSData = []
5448 NWAData = []
5449 DiffData = []
5450 for idata, lam in enumerate(lambdaCMS_list):
5451 if not pert_orders:
5452 new_cms=cms_born[idata]/(lam**born_power)
5453 new_nwa=nwa_born[idata]/(lam**born_power)
5454 else:
5455 new_cms=(cms_finite[idata]+cms_born[idata]-nwa_born[idata])/(lam*nwa_born[idata])
5456 new_nwa=nwa_finite[idata]/(lam*nwa_born[idata])
5457 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5458 CMSData.append(new_cms)
5459 NWAData.append(new_nwa)
5460 DiffData.append(new_diff)
5461
5462
5463
5464
5465
5466
5467 trim_range=int(((1.0-0.6)/2.0)*len(DiffData))
5468 low_diff_median = sorted(DiffData[trim_range:-trim_range])\
5469 [(len(DiffData)-2*trim_range)//2]
5470
5471
5472
5473
5474
5475
5476 current_median = 0
5477
5478 scan_index = 0
5479 reference = abs(sorted(NWAData)[len(NWAData)//2])
5480 if low_diff_median!= 0.0:
5481 if abs(reference/low_diff_median)<diff_zero_threshold:
5482 reference = abs(low_diff_median)
5483 while True:
5484 scanner = DiffData[scan_index:group_val+scan_index]
5485 current_median = sorted(scanner)[len(scanner)//2]
5486
5487
5488 if abs(current_median-low_diff_median)/reference<\
5489 consideration_threshold:
5490 break;
5491 scan_index += 1
5492 if (group_val+scan_index)>=len(DiffData):
5493
5494
5495 logger.warning('The median scanning failed during the CMS check '+
5496 'for process %s'%proc_title+\
5497 'This is means that the difference plot has not stable'+\
5498 'intermediate region and MG5_aMC will arbitrarily consider the'+\
5499 'left half of the values.')
5500 scan_index = -1
5501 break;
5502
5503 if scan_index == -1:
5504 cms_check_data_range = len(DiffData)//2
5505 else:
5506 cms_check_data_range = scan_index + group_val
5507
5508 res_str += "== Data range considered (min, max, n_val) = (%.1e, %.1e, %d)\n"\
5509 %(lambdaCMS_list[-1],lambdaCMS_list[scan_index],
5510 len(lambdaCMS_list)-scan_index)
5511
5512 CMScheck_values = DiffData[cms_check_data_range:]
5513
5514
5515
5516
5517 if scan_index >= 0:
5518
5519 scan_index = len(CMScheck_values)
5520 used_group_val = max(3,group_val)
5521 unstability_found = True
5522 while True:
5523 scanner = CMScheck_values[scan_index-used_group_val:scan_index]
5524 maxdiff = max(abs(scan-low_diff_median) for scan in scanner)
5525 if maxdiff/reference<consideration_threshold:
5526 break;
5527 if (scan_index-used_group_val)==0:
5528
5529
5530 unstability_found = False
5531 break;
5532
5533 scan_index -= 1
5534
5535
5536 if unstability_found:
5537 unstab_check=CMScheck_values[scan_index:]
5538 relative_array = [val > CMScheck_values[scan_index-1] for
5539 val in unstab_check]
5540 upper = relative_array.count(True)
5541 lower = relative_array.count(False)
5542 if not ((lower==0 and upper>=0) or (lower>=0 and upper==0)):
5543 logger.warning(
5544 """For process %s, a numerically unstable region was detected starting from lambda < %.1e.
5545 Look at the plot in this region (and possibly throw more points using the option --lambdaCMS).
5546 If this is indeed a stability issue, then either decrease MLStabThreshold in MadLoop or decrease the
5547 minimum value of lambda to be considered in the CMS check."""\
5548 %(proc_title, lambdaCMS_list[cms_check_data_range+scan_index-1]))
5549
5550
5551
5552
5553 scan_index = 0
5554 max_diff = 0.0
5555 res_str += "== Ref. value used in the ratios (Born NWA) = %s\n"\
5556 %('%.3g'%reference)
5557 res_str += "== Asymptotic difference value detected = %s\n"\
5558 %('%.3g'%low_diff_median)
5559 concise_repl_dict[process]['asymptot'] = '%.3e'%low_diff_median
5560
5561
5562 differences_target[(process,resID)]= low_diff_median
5563
5564 while True:
5565 current_vals = CMScheck_values[scan_index:scan_index+group_val]
5566 max_diff = max(max_diff, abs(low_diff_median-
5567 sorted(current_vals)[len(current_vals)//2])/reference)
5568 if (scan_index+group_val)>=len(CMScheck_values):
5569 break
5570 scan_index += 1
5571
5572
5573 cms_check = (max_diff*100.0, '>' if max_diff>CMS_test_threshold else '<',
5574 CMS_test_threshold*100.0)
5575 res_str += "== CMS check result (threshold) = %.3g%% (%s%.3g%%)\n"%cms_check
5576 concise_repl_dict[process]['cms_check'] = \
5577 "%-10s (%s%.3g%%)"%('%.3g%%'%cms_check[0],cms_check[1],cms_check[2])
5578
5579 if max_diff>CMS_test_threshold:
5580 failed_procs.append((process,resonance))
5581 res_str += "%s %s %s\n"%(bar('='),
5582 'FAILED' if max_diff>CMS_test_threshold else 'PASSED',bar('='))
5583 concise_repl_dict[process]['status'] = 'Failed' if max_diff>CMS_test_threshold \
5584 else 'Passed'
5585
5586 if output=='concise_text':
5587
5588 max_proc_size = max(
5589 [len(process) for process in result['ordered_processes']]+[10])
5590
5591 res_str = concise_str
5592 res_str += '\n'+concise_data%(max_proc_size+4)%concise_repl_dict['Header']
5593 for process in result['ordered_processes']:
5594 res_str += (concise_data%(max_proc_size+4)%concise_repl_dict[process])
5595
5596 if len(checks):
5597 res_str += "Summary: %i/%i passed"%(len(checks)-len(failed_procs),len(checks))+\
5598 ('.\n' if not failed_procs else ', failed checks are for:\n')
5599 else:
5600 return "\nNo CMS check to perform, the process either has no diagram or does not "+\
5601 "not feature any massive s-channel resonance."
5602
5603 for process, resonance in failed_procs:
5604 res_str += "> %s, %s\n"%(process, resonance)
5605
5606 if output=='concise_text':
5607 res_str += '\nMore detailed information on this check available with the command:\n'
5608 res_str += ' MG5_aMC>display checks\n'
5609
5610
5611
5612
5613 if not options['show_plot']:
5614 if options['reuse']:
5615 logFile.write(res_str)
5616 logFile.close()
5617 if output.endswith('text'):
5618 return res_str
5619 else:
5620 return failed_procs
5621
5622 fig_output_file = CMS_save_path('pdf', result, model, options,
5623 output_path=output_path)
5624 base_fig_name = fig_output_file[:-4]
5625 suffix = 1
5626 while os.path.isfile(fig_output_file):
5627 fig_output_file = '%s__%d__.pdf'%(base_fig_name,suffix)
5628 suffix+=1
5629
5630 process_data_plot_dict={}
5631
5632
5633
5634 all_res = [(result, None)]
5635 for i, add_res in enumerate(options['analyze'].split(',')[1:]):
5636 specs =re.match(r'^(?P<filename>.*)\((?P<title>.*)\)$', add_res)
5637 if specs:
5638 filename = specs.group('filename')
5639 title = specs.group('title')
5640 else:
5641 filename = add_res
5642 title = '#%d'%(i+1)
5643
5644 new_result = save_load_object.load_from_file(filename)
5645 if new_result is None:
5646 raise InvalidCmd('The complex mass scheme check result'+
5647 " file below could not be read.\n %s"%filename)
5648 if len(new_result['ordered_processes'])!=len(result['ordered_processes']) \
5649 or len(new_result['lambdaCMS'])!=len(result['lambdaCMS']):
5650 raise self.InvalidCmd('The complex mass scheme check result'+
5651 " file below does not seem compatible.\n %s"%filename)
5652 all_res.append((new_result,title))
5653
5654
5655 for process, resID in checks:
5656 data1=[]
5657 data2=[]
5658 info ={}
5659 for res in all_res:
5660 proc_res = res[0][process]
5661 cms_res = proc_res['CMS'][resID]
5662 nwa_res = proc_res['NWA'][resID]
5663 resonance = resonance_str(cms_res['resonance'])
5664 if options['resonances']!=1:
5665 info['title'] = format_title(process, resonance)
5666 else:
5667 info['title'] = format_title(process, '')
5668
5669 cms_born=cms_res['born']
5670 nwa_born=nwa_res['born']
5671 if len(cms_born) != len(lambdaCMS_list) or\
5672 len(nwa_born) != len(lambdaCMS_list):
5673 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5674 ' lambdaCMS values specified for process %s'%process
5675 if pert_orders:
5676 cms_finite=cms_res['finite']
5677 nwa_finite=nwa_res['finite']
5678 if len(cms_finite) != len(lambdaCMS_list) or\
5679 len(nwa_finite) != len(lambdaCMS_list):
5680 raise MadGraph5Error, 'Inconsistent list of results w.r.t. the'+\
5681 ' lambdaCMS values specified for process %s'%process
5682
5683 bpower = guess_lambdaorder(nwa_born,lambdaCMS_list,
5684 expected=proc_res['born_order'], proc=process, res=resonance)
5685
5686 CMSData = []
5687 NWAData = []
5688 DiffData = []
5689 for idata, lam in enumerate(lambdaCMS_list):
5690 if not pert_orders:
5691 new_cms = cms_born[idata]/lam**bpower
5692 new_nwa = nwa_born[idata]/lam**bpower
5693 else:
5694 new_cms=cms_finite[idata]+cms_born[idata]-nwa_born[idata]
5695 new_nwa=nwa_finite[idata]
5696 new_cms /= lam*nwa_born[idata]
5697 new_nwa /= lam*nwa_born[idata]
5698 new_diff=(new_cms-new_nwa)/(lam**diff_lambda_power)
5699 CMSData.append(new_cms)
5700 NWAData.append(new_nwa)
5701 DiffData.append(new_diff)
5702 if res[1] is None:
5703 if not pert_orders:
5704 data1.append([r'$\displaystyle CMS\;=\;\mathcal{M}_{CMS}^{(0)}/\lambda^%d$'%bpower,CMSData])
5705 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}_{NWA}^{(0)}/\lambda^%d$'%bpower,NWAData])
5706 else:
5707 data1.append([r'$\displaystyle CMS\;=\;(\mathcal{M}^{(1)}_{CMS}+\mathcal{M}_{CMS}^{(0)}-\mathcal{M}^{(0)}_{NWA})/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',CMSData])
5708 data1.append([r'$\displaystyle NWA\;=\;\mathcal{M}^{(1)}_{NWA}/(\lambda\cdot\mathcal{M}^{(0)}_{NWA})$',NWAData])
5709 data2.append([r'$\displaystyle\Delta\;=\;(CMS-NWA)/\lambda%s$'\
5710 %('' if diff_lambda_power==1 else r'^{%g}'%diff_lambda_power)
5711 ,DiffData])
5712 data2.append([r'Detected asymptot',[differences_target[(process,resID)]
5713 for i in range(len(lambdaCMS_list))]])
5714 else:
5715 data1.append([r'$\displaystyle CMS$ %s'%res[1].replace('_',' '),CMSData])
5716 data1.append([r'$\displaystyle NWA$ %s'%res[1].replace('_',' '),NWAData])
5717 data2.append([r'$\displaystyle\Delta$ %s'%res[1].replace('_',' '),DiffData])
5718
5719 process_data_plot_dict[(process,resID)]=(data1,data2, info)
5720
5721
5722 try:
5723 import matplotlib.pyplot as plt
5724 from matplotlib.backends.backend_pdf import PdfPages
5725 logger.info('Rendering plots... (this can take some time because of the latex labels)')
5726
5727 res_str += \
5728 """\n-----------------------------------------------------------------------------------------------
5729 | In the plots, the Complex Mass Scheme check is successful if the normalized difference |
5730 | between the CMS and NWA result (lower inset) tends to a constant when \lambda goes to zero. |
5731 -----------------------------------------------------------------------------------------------\n"""
5732
5733
5734 if lambda_range[1]>0:
5735 min_lambda_index = -1
5736 for i, lam in enumerate(lambdaCMS_list):
5737 if lam<=lambda_range[1]:
5738 min_lambda_index = i
5739 break
5740 else:
5741 min_lambda_index = 0
5742 if lambda_range[0]>0:
5743 max_lambda_index = -1
5744 for i, lam in enumerate(lambdaCMS_list):
5745 if lam<=lambda_range[0]:
5746 max_lambda_index=i-1
5747 break
5748 else:
5749 max_lambda_index=len(lambdaCMS_list)-1
5750
5751 if max_lambda_index==-1 or min_lambda_index==-1 or \
5752 min_lambda_index==max_lambda_index:
5753 raise InvalidCmd('Invalid lambda plotting range: (%.1e,%.1e)'%\
5754 (lambda_range[0],lambda_range[1]))
5755
5756 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5757 lambdaCMS_list = lambdaCMS_list[min_lambda_index:max_lambda_index+1]
5758
5759 plt.rc('text', usetex=True)
5760 plt.rc('font', family='serif')
5761 pp=PdfPages(fig_output_file)
5762 if len(checks)==0 or len(process_data_plot_dict[checks[0]][1])<=7:
5763 colorlist=['b','r','g','k','c','m','y']
5764 else:
5765 import matplotlib.colors as colors
5766 import matplotlib.cm as mplcm
5767 import matplotlib.colors as colors
5768
5769
5770 cm = plt.get_cmap('gist_rainbow')
5771 cNorm = colors.Normalize(vmin=0, vmax=(len(data2)-1))
5772 scalarMap = mplcm.ScalarMappable(norm=cNorm, cmap=cm)
5773
5774 colorlist = [scalarMap.to_rgba(i*0.9) for i in range(len(data2))]
5775
5776
5777
5778
5779
5780
5781 legend_size = 10
5782 for iproc, (process, resID) in enumerate(checks):
5783 data1,data2, info=process_data_plot_dict[(process,resID)]
5784
5785 if lambda_range[0]>0.0 or lambda_range[1]>0.0:
5786 for i in range(len(data1)):
5787 data1[i][1]=data1[i][1][min_lambda_index:max_lambda_index+1]
5788 for i in range(len(data2)):
5789 data2[i][1]=data2[i][1][min_lambda_index:max_lambda_index+1]
5790 plt.figure(iproc+1)
5791 plt.subplot(211)
5792 minvalue=1e+99
5793 maxvalue=-1e+99
5794 for i, d1 in enumerate(data1):
5795
5796 color=colorlist[i//2]
5797 data_plot=d1[1]
5798 minvalue=min(min(data_plot),minvalue)
5799 maxvalue=max(max(data_plot),maxvalue)
5800 plt.plot(lambdaCMS_list, data_plot, color=color, marker='', \
5801 linestyle=('-' if i%2==0 else '--'),
5802 label=(d1[0] if (i%2==0 or i==1) else '_nolegend_'))
5803 ymin = minvalue-(maxvalue-minvalue)/5.
5804 ymax = maxvalue+(maxvalue-minvalue)/5.
5805
5806 plt.yscale('linear')
5807 plt.xscale('log')
5808 plt.title(info['title'],fontsize=12,y=1.08)
5809 plt.ylabel(r'$\displaystyle \mathcal{M}$')
5810
5811 if ymax*len(data1)-sum(max(d1[1][-len(d1[1])//2:]) \
5812 for d1 in data1) > 0.5*(ymax-ymin)*len(data1):
5813 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5814 else:
5815 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5816
5817 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list), ymin, ymax])
5818
5819 plt.subplot(212)
5820 minvalue=1e+99
5821 maxvalue=-1e+99
5822
5823 try:
5824 asymptot_index = [d2[0] for d2 in data2].index('Detected asymptot')
5825 plt.plot(lambdaCMS_list, data2[asymptot_index][1],
5826 color='0.75', marker='', linestyle='-', label='')
5827 except ValueError:
5828 pass
5829
5830 color_ID = -1
5831 for d2 in data2:
5832
5833 if d2[0]=='Detected asymptot':
5834 continue
5835 color_ID += 1
5836 color=colorlist[color_ID]
5837 data_plot=d2[1]
5838 minvalue=min(min(data_plot),minvalue)
5839 maxvalue=max(max(data_plot),maxvalue)
5840 plt.plot(lambdaCMS_list, data_plot, color=color, marker='',\
5841 linestyle='-', label=d2[0])
5842 ymin = minvalue-(maxvalue-minvalue)/5.
5843 ymax = maxvalue+(maxvalue-minvalue)/5.
5844
5845 plt.yscale('linear')
5846 plt.xscale('log')
5847 plt.ylabel(r'$\displaystyle \Delta$')
5848 plt.xlabel(r'$\displaystyle \lambda$')
5849
5850
5851 sd = [sorted(d2[1][-len(d2[1])//2:]) for d2 in data2]
5852 left_stability = sum(abs(s[0]-s[-1]) for s in sd)
5853 sd = [sorted(d2[1][:-len(d2[1])//2]) for d2 in data2]
5854 right_stability = sum(abs(s[0]-s[-1]) for s in sd)
5855 left_stable = False if right_stability==0.0 else \
5856 (left_stability/right_stability)<0.1
5857
5858 if left_stable:
5859 if ymax*len(data2)-sum(max(d2[1][-len(d2[1])//2:]) \
5860 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5861 plt.legend(prop={'size':legend_size},loc='upper left', frameon=False)
5862 else:
5863 plt.legend(prop={'size':legend_size},loc='lower left', frameon=False)
5864 else:
5865 if ymax*len(data2)-sum(max(d2[1][:-len(d2[1])//2]) \
5866 for d2 in data2) > 0.5*(ymax-ymin)*len(data2):
5867 plt.legend(prop={'size':legend_size},loc='upper right', frameon=False)
5868 else:
5869 plt.legend(prop={'size':legend_size},loc='lower right', frameon=False)
5870
5871 plt.axis([min(lambdaCMS_list),max(lambdaCMS_list),\
5872 minvalue-(maxvalue-minvalue)/5., maxvalue+(maxvalue-minvalue)/5.])
5873
5874 plt.savefig(pp,format='pdf')
5875
5876 pp.close()
5877
5878 if len(checks)>0:
5879 logger.info('Complex Mass Scheme check plot output to file %s. '%fig_output_file)
5880
5881 if sys.platform.startswith('linux'):
5882 misc.call(["xdg-open", fig_output_file])
5883 elif sys.platform.startswith('darwin'):
5884 misc.call(["open", fig_output_file])
5885
5886 plt.close("all")
5887
5888 except Exception as e:
5889 if isinstance(e, ImportError):
5890 res_str += "\n= Install matplotlib to get a "+\
5891 "graphical display of the results of the cms check."
5892 else:
5893 general_error = "\n= Could not produce the cms check plot because of "+\
5894 "the following error: %s"%str(e)
5895 try:
5896 import Tkinter
5897 if isinstance(e, Tkinter.TclError):
5898 res_str += "\n= Plots are not generated because your system"+\
5899 " does not support graphical display."
5900 else:
5901 res_str += general_error
5902 except:
5903 res_str += general_error
5904
5905 if options['reuse']:
5906 logFile.write(res_str)
5907 logFile.close()
5908
5909 if output.endswith('text'):
5910 return res_str
5911 else:
5912 return failed_procs
5913