Package madgraph :: Package various :: Module process_checks
[hide private]
[frames] | no frames]

Source Code for Module madgraph.various.process_checks

   1  ################################################################################ 
   2  # 
   3  # Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors 
   4  # 
   5  # This file is a part of the MadGraph5_aMC@NLO project, an application which  
   6  # automatically generates Feynman diagrams and matrix elements for arbitrary 
   7  # high-energy processes in the Standard Model and beyond. 
   8  # 
   9  # It is subject to the MadGraph5_aMC@NLO license which should accompany this  
  10  # distribution. 
  11  # 
  12  # For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch 
  13  # 
  14  ################################################################################ 
  15  """Several different checks for processes (and hence models): 
  16  permutation tests, gauge invariance tests, lorentz invariance 
  17  tests. Also class for evaluation of Python matrix elements, 
  18  MatrixElementEvaluator.""" 
  19   
  20  from __future__ import division 
  21   
  22  import array 
  23  import copy 
  24  import fractions 
  25  import itertools 
  26  import logging 
  27  import math 
  28  import os 
  29  import sys 
  30  import re 
  31  import shutil 
  32  import random 
  33  import glob 
  34  import re 
  35  import subprocess 
  36  import time 
  37  import datetime 
  38  import errno 
  39  # If psutil becomes standard, the RAM check can be performed with it instead 
  40  #import psutil 
  41   
  42  import aloha 
  43  import aloha.aloha_writers as aloha_writers 
  44  import aloha.create_aloha as create_aloha 
  45   
  46  import madgraph.iolibs.export_python as export_python 
  47  import madgraph.iolibs.helas_call_writers as helas_call_writers 
  48  import models.import_ufo as import_ufo 
  49  import madgraph.iolibs.save_load_object as save_load_object 
  50   
  51  import madgraph.core.base_objects as base_objects 
  52  import madgraph.core.color_algebra as color 
  53  import madgraph.core.color_amp as color_amp 
  54  import madgraph.core.helas_objects as helas_objects 
  55  import madgraph.core.diagram_generation as diagram_generation 
  56   
  57  import madgraph.various.rambo as rambo 
  58  import madgraph.various.misc as misc 
  59  import madgraph.various.progressbar as pbar 
  60  import madgraph.various.banner as bannermod 
  61   
  62  import madgraph.loop.loop_diagram_generation as loop_diagram_generation 
  63  import madgraph.loop.loop_helas_objects as loop_helas_objects 
  64  import madgraph.loop.loop_base_objects as loop_base_objects 
  65   
  66  from madgraph.interface.madevent_interface import MadLoopInitializer 
  67   
  68  from madgraph import MG5DIR, InvalidCmd, MadGraph5Error 
  69   
  70  from madgraph.iolibs.files import cp 
  71   
  72  import models.model_reader as model_reader 
  73  import aloha.template_files.wavefunctions as wavefunctions 
  74  from aloha.template_files.wavefunctions import \ 
  75       ixxxxx, oxxxxx, vxxxxx, sxxxxx, txxxxx, irxxxx, orxxxx 
  76   
  77  ADDED_GLOBAL = [] 
  78   
  79  temp_dir_prefix = "TMP_CHECK" 
  80   
  81  pjoin = os.path.join 
82 83 -def clean_added_globals(to_clean):
84 for value in list(to_clean): 85 del globals()[value] 86 to_clean.remove(value)
87
88 #=============================================================================== 89 # Fake interface to be instancied when using process_checks from tests instead. 90 #=============================================================================== 91 -class FakeInterface(object):
92 """ Just an 'option container' to mimick the interface which is passed to the 93 tests. We put in only what is now used from interface by the test: 94 cmd.options['fortran_compiler'] 95 cmd.options['complex_mass_scheme'] 96 cmd._mgme_dir"""
97 - def __init__(self, mgme_dir = "", complex_mass_scheme = False, 98 fortran_compiler = 'gfortran' ):
99 self._mgme_dir = mgme_dir 100 self.options = {} 101 self.options['complex_mass_scheme']=complex_mass_scheme 102 self.options['fortran_compiler']=fortran_compiler
103 104 #=============================================================================== 105 # Logger for process_checks 106 #=============================================================================== 107 108 logger = logging.getLogger('madgraph.various.process_checks')
109 110 111 # Helper function to boost momentum 112 -def boost_momenta(p, boost_direction=1, beta=0.5):
113 """boost the set momenta in the 'boost direction' by the 'beta' 114 factor""" 115 116 boost_p = [] 117 gamma = 1/ math.sqrt(1 - beta**2) 118 for imp in p: 119 bosst_p = imp[boost_direction] 120 E, px, py, pz = imp 121 boost_imp = [] 122 # Energy: 123 boost_imp.append(gamma * E - gamma * beta * bosst_p) 124 # PX 125 if boost_direction == 1: 126 boost_imp.append(-gamma * beta * E + gamma * px) 127 else: 128 boost_imp.append(px) 129 # PY 130 if boost_direction == 2: 131 boost_imp.append(-gamma * beta * E + gamma * py) 132 else: 133 boost_imp.append(py) 134 # PZ 135 if boost_direction == 3: 136 boost_imp.append(-gamma * beta * E + gamma * pz) 137 else: 138 boost_imp.append(pz) 139 #Add the momenta to the list 140 boost_p.append(boost_imp) 141 142 return boost_p
143
144 #=============================================================================== 145 # Helper class MatrixElementEvaluator 146 #=============================================================================== 147 -class MatrixElementEvaluator(object):
148 """Class taking care of matrix element evaluation, storing 149 relevant quantities for speedup.""" 150
151 - def __init__(self, model , param_card = None, 152 auth_skipping = False, reuse = True, cmd = FakeInterface()):
153 """Initialize object with stored_quantities, helas_writer, 154 model, etc. 155 auth_skipping = True means that any identical matrix element will be 156 evaluated only once 157 reuse = True means that the matrix element corresponding to a 158 given process can be reused (turn off if you are using 159 different models for the same process)""" 160 161 self.cmd = cmd 162 163 # Writer for the Python matrix elements 164 self.helas_writer = helas_call_writers.PythonUFOHelasCallWriter(model) 165 166 # Read a param_card and calculate couplings 167 self.full_model = model_reader.ModelReader(model) 168 try: 169 self.full_model.set_parameters_and_couplings(param_card) 170 except MadGraph5Error: 171 if isinstance(param_card, (str,file)): 172 raise 173 logger.warning('param_card present in the event file not compatible. We will use the default one.') 174 self.full_model.set_parameters_and_couplings() 175 176 self.auth_skipping = auth_skipping 177 self.reuse = reuse 178 self.cmass_scheme = cmd.options['complex_mass_scheme'] 179 self.store_aloha = [] 180 self.stored_quantities = {}
181 182 #=============================================================================== 183 # Helper function evaluate_matrix_element 184 #===============================================================================
185 - def evaluate_matrix_element(self, matrix_element, p=None, full_model=None, 186 gauge_check=False, auth_skipping=None, output='m2', 187 options=None):
188 """Calculate the matrix element and evaluate it for a phase space point 189 output is either m2, amp, jamp 190 """ 191 192 if full_model: 193 self.full_model = full_model 194 process = matrix_element.get('processes')[0] 195 model = process.get('model') 196 197 if "matrix_elements" not in self.stored_quantities: 198 self.stored_quantities['matrix_elements'] = [] 199 matrix_methods = {} 200 201 if self.reuse and "Matrix_%s" % process.shell_string() in globals() and p: 202 # Evaluate the matrix element for the momenta p 203 matrix = eval("Matrix_%s()" % process.shell_string()) 204 me_value = matrix.smatrix(p, self.full_model) 205 if output == "m2": 206 return matrix.smatrix(p, self.full_model), matrix.amp2 207 else: 208 m2 = matrix.smatrix(p, self.full_model) 209 return {'m2': m2, output:getattr(matrix, output)} 210 if (auth_skipping or self.auth_skipping) and matrix_element in \ 211 self.stored_quantities['matrix_elements']: 212 # Exactly the same matrix element has been tested 213 logger.info("Skipping %s, " % process.nice_string() + \ 214 "identical matrix element already tested" \ 215 ) 216 return None 217 218 self.stored_quantities['matrix_elements'].append(matrix_element) 219 220 # Create an empty color basis, and the list of raw 221 # colorize objects (before simplification) associated 222 # with amplitude 223 if "list_colorize" not in self.stored_quantities: 224 self.stored_quantities["list_colorize"] = [] 225 if "list_color_basis" not in self.stored_quantities: 226 self.stored_quantities["list_color_basis"] = [] 227 if "list_color_matrices" not in self.stored_quantities: 228 self.stored_quantities["list_color_matrices"] = [] 229 230 col_basis = color_amp.ColorBasis() 231 new_amp = matrix_element.get_base_amplitude() 232 matrix_element.set('base_amplitude', new_amp) 233 colorize_obj = col_basis.create_color_dict_list(new_amp) 234 235 try: 236 # If the color configuration of the ME has 237 # already been considered before, recycle 238 # the information 239 col_index = self.stored_quantities["list_colorize"].index(colorize_obj) 240 except ValueError: 241 # If not, create color basis and color 242 # matrix accordingly 243 self.stored_quantities['list_colorize'].append(colorize_obj) 244 col_basis.build() 245 self.stored_quantities['list_color_basis'].append(col_basis) 246 col_matrix = color_amp.ColorMatrix(col_basis) 247 self.stored_quantities['list_color_matrices'].append(col_matrix) 248 col_index = -1 249 250 # Set the color for the matrix element 251 matrix_element.set('color_basis', 252 self.stored_quantities['list_color_basis'][col_index]) 253 matrix_element.set('color_matrix', 254 self.stored_quantities['list_color_matrices'][col_index]) 255 256 # Create the needed aloha routines 257 if "used_lorentz" not in self.stored_quantities: 258 self.stored_quantities["used_lorentz"] = [] 259 260 me_used_lorentz = set(matrix_element.get_used_lorentz()) 261 me_used_lorentz = [lorentz for lorentz in me_used_lorentz \ 262 if lorentz not in self.store_aloha] 263 264 aloha_model = create_aloha.AbstractALOHAModel(model.get('name')) 265 aloha_model.add_Lorentz_object(model.get('lorentz')) 266 aloha_model.compute_subset(me_used_lorentz) 267 268 # Write out the routines in Python 269 aloha_routines = [] 270 for routine in aloha_model.values(): 271 aloha_routines.append(routine.write(output_dir = None, 272 mode='mg5', 273 language = 'Python')) 274 for routine in aloha_model.external_routines: 275 aloha_routines.append( 276 open(aloha_model.locate_external(routine, 'Python')).read()) 277 278 # Define the routines to be available globally 279 previous_globals = list(globals().keys()) 280 for routine in aloha_routines: 281 exec(routine, globals()) 282 for key in globals().keys(): 283 if key not in previous_globals: 284 ADDED_GLOBAL.append(key) 285 286 # Add the defined Aloha routines to used_lorentz 287 self.store_aloha.extend(me_used_lorentz) 288 # Export the matrix element to Python calls 289 exporter = export_python.ProcessExporterPython(matrix_element, 290 self.helas_writer) 291 292 try: 293 matrix_methods = exporter.get_python_matrix_methods(\ 294 gauge_check=gauge_check) 295 # print "I got matrix_methods=",str(matrix_methods.items()[0][1]) 296 except helas_call_writers.HelasWriterError, error: 297 logger.info(error) 298 return None 299 # If one wants to output the python code generated for the computation 300 # of these matrix elements, it is possible to run the following cmd 301 # open('output_path','w').write(matrix_methods[process.shell_string()]) 302 if self.reuse: 303 # Define the routines (globally) 304 exec(matrix_methods[process.shell_string()], globals()) 305 ADDED_GLOBAL.append('Matrix_%s' % process.shell_string()) 306 else: 307 # Define the routines (locally is enough) 308 exec(matrix_methods[process.shell_string()]) 309 # Generate phase space point to use 310 if not p: 311 p, w_rambo = self.get_momenta(process, options) 312 # Evaluate the matrix element for the momenta p 313 exec("data = Matrix_%s()" % process.shell_string()) 314 if output == "m2": 315 return data.smatrix(p, self.full_model), data.amp2 316 else: 317 m2 = data.smatrix(p,self.full_model) 318 return {'m2': m2, output:getattr(data, output)}
319 320 #=============================================================================== 321 # Helper function get_momenta 322 #===============================================================================
323 - def get_momenta(self, process, options=None):
324 """Get a point in phase space for the external states in the given 325 process, with the CM energy given. The incoming particles are 326 assumed to be oriented along the z axis, with particle 1 along the 327 positive z axis.""" 328 329 if not options: 330 energy=1000 331 events=None 332 else: 333 energy = options['energy'] 334 events = options['events'] 335 to_skip = 0 336 337 if not (isinstance(process, base_objects.Process) and \ 338 isinstance(energy, (float,int))): 339 raise rambo.RAMBOError, "Not correct type for arguments to get_momenta" 340 341 342 sorted_legs = sorted(process.get('legs'), lambda l1, l2:\ 343 l1.get('number') - l2.get('number')) 344 345 # If an events file is given use it for getting the momentum 346 if events: 347 ids = [l.get('id') for l in sorted_legs] 348 import MadSpin.decay as madspin 349 if not hasattr(self, 'event_file'): 350 fsock = open(events) 351 self.event_file = madspin.Event(fsock) 352 353 skip = 0 354 while self.event_file.get_next_event() != 'no_event': 355 event = self.event_file.particle 356 #check if the event is compatible 357 event_ids = [p['pid'] for p in event.values()] 358 if event_ids == ids: 359 skip += 1 360 if skip > to_skip: 361 break 362 else: 363 raise MadGraph5Error, 'No compatible events for %s' % ids 364 p = [] 365 for part in event.values(): 366 m = part['momentum'] 367 p.append([m.E, m.px, m.py, m.pz]) 368 return p, 1 369 370 nincoming = len([leg for leg in sorted_legs if leg.get('state') == False]) 371 nfinal = len(sorted_legs) - nincoming 372 373 # Find masses of particles 374 mass_strings = [self.full_model.get_particle(l.get('id')).get('mass') \ 375 for l in sorted_legs] 376 mass = [self.full_model.get('parameter_dict')[m] for m in mass_strings] 377 mass = [m.real for m in mass] 378 #mass = [math.sqrt(m.real) for m in mass] 379 380 381 382 # Make sure energy is large enough for incoming and outgoing particles 383 energy = max(energy, sum(mass[:nincoming]) + 200., 384 sum(mass[nincoming:]) + 200.) 385 386 if nfinal == 1: 387 p = [] 388 energy = mass[-1] 389 p.append([energy/2,0,0,energy/2]) 390 p.append([energy/2,0,0,-energy/2]) 391 p.append([mass[-1],0,0,0]) 392 return p, 1.0 393 394 e2 = energy**2 395 m1 = mass[0] 396 p = [] 397 398 masses = rambo.FortranList(nfinal) 399 for i in range(nfinal): 400 masses[i+1] = mass[nincoming + i] 401 402 if nincoming == 1: 403 404 # Momenta for the incoming particle 405 p.append([abs(m1), 0., 0., 0.]) 406 407 p_rambo, w_rambo = rambo.RAMBO(nfinal, abs(m1), masses) 408 409 # Reorder momenta from px,py,pz,E to E,px,py,pz scheme 410 for i in range(1, nfinal+1): 411 momi = [p_rambo[(4,i)], p_rambo[(1,i)], 412 p_rambo[(2,i)], p_rambo[(3,i)]] 413 p.append(momi) 414 415 return p, w_rambo 416 417 if nincoming != 2: 418 raise rambo.RAMBOError('Need 1 or 2 incoming particles') 419 420 if nfinal == 1: 421 energy = masses[1] 422 if masses[1] == 0.0: 423 raise rambo.RAMBOError('The kinematic 2 > 1 with the final'+\ 424 ' state particle massless is invalid') 425 426 e2 = energy**2 427 m2 = mass[1] 428 429 mom = math.sqrt((e2**2 - 2*e2*m1**2 + m1**4 - 2*e2*m2**2 - \ 430 2*m1**2*m2**2 + m2**4) / (4*e2)) 431 e1 = math.sqrt(mom**2+m1**2) 432 e2 = math.sqrt(mom**2+m2**2) 433 # Set momenta for incoming particles 434 p.append([e1, 0., 0., mom]) 435 p.append([e2, 0., 0., -mom]) 436 437 if nfinal == 1: 438 p.append([energy, 0., 0., 0.]) 439 return p, 1. 440 441 p_rambo, w_rambo = rambo.RAMBO(nfinal, energy, masses) 442 443 # Reorder momenta from px,py,pz,E to E,px,py,pz scheme 444 for i in range(1, nfinal+1): 445 momi = [p_rambo[(4,i)], p_rambo[(1,i)], 446 p_rambo[(2,i)], p_rambo[(3,i)]] 447 p.append(momi) 448 449 return p, w_rambo
450
451 #=============================================================================== 452 # Helper class LoopMatrixElementEvaluator 453 #=============================================================================== 454 -class LoopMatrixElementEvaluator(MatrixElementEvaluator):
455 """Class taking care of matrix element evaluation for loop processes.""" 456
457 - def __init__(self,cuttools_dir=None, output_path=None, tir_dir={}, 458 cmd=FakeInterface(),*args,**kwargs):
459 """Allow for initializing the MG5 root where the temporary fortran 460 output for checks is placed.""" 461 462 super(LoopMatrixElementEvaluator,self).__init__(*args,cmd=cmd,**kwargs) 463 464 self.mg_root=self.cmd._mgme_dir 465 # If no specific output path is specified, then write in MG5 root directory 466 if output_path is None: 467 self.output_path = self.cmd._mgme_dir 468 else: 469 self.output_path = output_path 470 471 self.cuttools_dir=cuttools_dir 472 self.tir_dir=tir_dir 473 self.loop_optimized_output = cmd.options['loop_optimized_output'] 474 # Set proliferate to true if you want to keep the produced directories 475 # and eventually reuse them if possible 476 self.proliferate=True
477 478 #=============================================================================== 479 # Helper function evaluate_matrix_element for loops 480 #===============================================================================
481 - def evaluate_matrix_element(self, matrix_element, p=None, options=None, 482 gauge_check=False, auth_skipping=None, output='m2', 483 PS_name = None, MLOptions={}):
484 """Calculate the matrix element and evaluate it for a phase space point 485 Output can only be 'm2. The 'jamp' and 'amp' returned values are just 486 empty lists at this point. 487 If PS_name is not none the written out PS.input will be saved in 488 the file PS.input_<PS_name> as well.""" 489 490 process = matrix_element.get('processes')[0] 491 model = process.get('model') 492 493 if options and 'split_orders' in options.keys(): 494 split_orders = options['split_orders'] 495 else: 496 split_orders = -1 497 498 if "loop_matrix_elements" not in self.stored_quantities: 499 self.stored_quantities['loop_matrix_elements'] = [] 500 501 if (auth_skipping or self.auth_skipping) and matrix_element in \ 502 [el[0] for el in self.stored_quantities['loop_matrix_elements']]: 503 # Exactly the same matrix element has been tested 504 logger.info("Skipping %s, " % process.nice_string() + \ 505 "identical matrix element already tested" ) 506 return None 507 508 # Generate phase space point to use 509 if not p: 510 p, w_rambo = self.get_momenta(process, options=options) 511 512 if matrix_element in [el[0] for el in \ 513 self.stored_quantities['loop_matrix_elements']]: 514 export_dir=self.stored_quantities['loop_matrix_elements'][\ 515 [el[0] for el in self.stored_quantities['loop_matrix_elements']\ 516 ].index(matrix_element)][1] 517 logger.debug("Reusing generated output %s"%str(export_dir)) 518 else: 519 export_dir=pjoin(self.output_path,temp_dir_prefix) 520 if os.path.isdir(export_dir): 521 if not self.proliferate: 522 raise InvalidCmd("The directory %s already exist. Please remove it."%str(export_dir)) 523 else: 524 id=1 525 while os.path.isdir(pjoin(self.output_path,\ 526 '%s_%i'%(temp_dir_prefix,id))): 527 id+=1 528 export_dir=pjoin(self.output_path,'%s_%i'%(temp_dir_prefix,id)) 529 530 if self.proliferate: 531 self.stored_quantities['loop_matrix_elements'].append(\ 532 (matrix_element,export_dir)) 533 534 # I do the import here because there is some cyclic import of export_v4 535 # otherwise 536 import madgraph.loop.loop_exporters as loop_exporters 537 if self.loop_optimized_output: 538 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA 539 else: 540 exporter_class=loop_exporters.LoopProcessExporterFortranSA 541 542 MLoptions = {'clean': True, 543 'complex_mass': self.cmass_scheme, 544 'export_format':'madloop', 545 'mp':True, 546 'SubProc_prefix':'P', 547 'compute_color_flows': not process.get('has_born'), 548 'loop_dir': pjoin(self.mg_root,'Template','loop_material'), 549 'cuttools_dir': self.cuttools_dir, 550 'fortran_compiler': self.cmd.options['fortran_compiler'], 551 'output_dependencies': self.cmd.options['output_dependencies']} 552 553 MLoptions.update(self.tir_dir) 554 555 FortranExporter = exporter_class(\ 556 self.mg_root, export_dir, MLoptions) 557 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model) 558 FortranExporter.copy_v4template(modelname=model.get('name')) 559 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel) 560 wanted_lorentz = list(set(matrix_element.get_used_lorentz())) 561 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \ 562 for c in l])) 563 FortranExporter.convert_model_to_mg4(model,wanted_lorentz,wanted_couplings) 564 FortranExporter.finalize_v4_directory(None,"",False,False,'gfortran') 565 566 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 567 split_orders=split_orders) 568 569 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'), 570 mp = gauge_check and self.loop_optimized_output, MLOptions=MLOptions) 571 572 if gauge_check: 573 file_path, orig_file_content, new_file_content = \ 574 self.setup_ward_check(pjoin(export_dir,'SubProcesses'), 575 ['helas_calls_ampb_1.f','loop_matrix.f']) 576 file = open(file_path,'w') 577 file.write(new_file_content) 578 file.close() 579 if self.loop_optimized_output: 580 mp_file_path, mp_orig_file_content, mp_new_file_content = \ 581 self.setup_ward_check(pjoin(export_dir,'SubProcesses'), 582 ['mp_helas_calls_ampb_1.f','mp_compute_loop_coefs.f'],mp=True) 583 mp_file = open(mp_file_path,'w') 584 mp_file.write(mp_new_file_content) 585 mp_file.close() 586 587 # Evaluate the matrix element for the momenta p 588 finite_m2 = self.get_me_value(process.shell_string_v4(), 0,\ 589 export_dir, p, PS_name = PS_name, verbose=False)[0][0] 590 591 # Restore the original loop_matrix.f code so that it could be reused 592 if gauge_check: 593 file = open(file_path,'w') 594 file.write(orig_file_content) 595 file.close() 596 if self.loop_optimized_output: 597 mp_file = open(mp_file_path,'w') 598 mp_file.write(mp_orig_file_content) 599 mp_file.close() 600 601 # Now erase the output directory 602 if not self.proliferate: 603 shutil.rmtree(export_dir) 604 605 if output == "m2": 606 # We do not provide details (i.e. amps and Jamps) of the computed 607 # amplitudes, hence the [] 608 return finite_m2, [] 609 else: 610 return {'m2': finite_m2, output:[]}
611
612 - def fix_MadLoopParamCard(self,dir_name, mp=False, loop_filter=False, 613 DoubleCheckHelicityFilter=False, MLOptions={}):
614 """ Set parameters in MadLoopParams.dat suited for these checks.MP 615 stands for multiple precision and can either be a bool or an integer 616 to specify the mode.""" 617 618 # Instanciate a MadLoopParam card 619 file = open(pjoin(dir_name,'MadLoopParams.dat'), 'r') 620 MLCard = bannermod.MadLoopParam(file) 621 622 if isinstance(mp,bool): 623 mode = 4 if mp else 1 624 else: 625 mode = mp 626 627 for key, value in MLOptions.items(): 628 if key == "MLReductionLib": 629 if isinstance(value, int): 630 ml_reds = str(value) 631 if isinstance(value,list): 632 if len(value)==0: 633 ml_reds = '1' 634 else: 635 ml_reds="|".join([str(vl) for vl in value]) 636 elif isinstance(value, str): 637 ml_reds = value 638 elif isinstance(value, int): 639 ml_reds = str(value) 640 else: 641 raise MadGraph5Error, 'The argument %s '%str(value)+\ 642 ' in fix_MadLoopParamCard must be a string, integer'+\ 643 ' or a list.' 644 MLCard.set("MLReductionLib",ml_reds) 645 elif key == 'ImprovePS': 646 MLCard.set('ImprovePSPoint',2 if value else -1) 647 elif key == 'ForceMP': 648 mode = 4 649 elif key in MLCard: 650 MLCard.set(key,value) 651 else: 652 raise Exception, 'The MadLoop options %s specified in function'%key+\ 653 ' fix_MadLoopParamCard does not correspond to an option defined'+\ 654 ' MadLoop nor is it specially handled in this function.' 655 656 MLCard.set('CTModeRun',mode) 657 MLCard.set('CTModeInit',mode) 658 MLCard.set('UseLoopFilter',loop_filter) 659 MLCard.set('DoubleCheckHelicityFilter',DoubleCheckHelicityFilter) 660 MLCard.write(pjoin(dir_name,os.pardir,'SubProcesses','MadLoopParams.dat'))
661 662 @classmethod
663 - def get_me_value(cls, proc, proc_id, working_dir, PSpoint=[], \ 664 PS_name = None, verbose=True):
665 """Compile and run ./check, then parse the output and return the result 666 for process with id = proc_id and PSpoint if specified. 667 If PS_name is not none the written out PS.input will be saved in 668 the file PS.input_<PS_name> as well""" 669 if verbose: 670 sys.stdout.write('.') 671 sys.stdout.flush() 672 673 shell_name = None 674 directories = glob.glob(pjoin(working_dir, 'SubProcesses', 675 'P%i_*' % proc_id)) 676 if directories and os.path.isdir(directories[0]): 677 shell_name = os.path.basename(directories[0]) 678 679 # If directory doesn't exist, skip and return 0 680 if not shell_name: 681 logging.info("Directory hasn't been created for process %s" %proc) 682 return ((0.0, 0.0, 0.0, 0.0, 0), []) 683 684 if verbose: logging.debug("Working on process %s in dir %s" % (proc, shell_name)) 685 686 dir_name = pjoin(working_dir, 'SubProcesses', shell_name) 687 # Make sure to recreate the executable and modified sources 688 if os.path.isfile(pjoin(dir_name,'check')): 689 os.remove(pjoin(dir_name,'check')) 690 try: 691 os.remove(pjoin(dir_name,'check_sa.o')) 692 os.remove(pjoin(dir_name,'loop_matrix.o')) 693 except OSError: 694 pass 695 # Now run make 696 devnull = open(os.devnull, 'w') 697 retcode = subprocess.call(['make','check'], 698 cwd=dir_name, stdout=devnull, stderr=devnull) 699 devnull.close() 700 701 if retcode != 0: 702 logging.info("Error while executing make in %s" % shell_name) 703 return ((0.0, 0.0, 0.0, 0.0, 0), []) 704 705 # If a PS point is specified, write out the corresponding PS.input 706 if PSpoint: 707 misc.write_PS_input(pjoin(dir_name, 'PS.input'),PSpoint) 708 # Also save the PS point used in PS.input_<PS_name> if the user 709 # wanted so. It is used for the lorentz check. 710 if not PS_name is None: 711 misc.write_PS_input(pjoin(dir_name, \ 712 'PS.input_%s'%PS_name),PSpoint) 713 # Run ./check 714 try: 715 output = subprocess.Popen('./check', 716 cwd=dir_name, 717 stdout=subprocess.PIPE, stderr=subprocess.STDOUT).stdout 718 output.read() 719 output.close() 720 if os.path.exists(pjoin(dir_name,'result.dat')): 721 return cls.parse_check_output(file(pjoin(dir_name,\ 722 'result.dat')),format='tuple') 723 else: 724 logging.warning("Error while looking for file %s"%str(os.path\ 725 .join(dir_name,'result.dat'))) 726 return ((0.0, 0.0, 0.0, 0.0, 0), []) 727 except IOError: 728 logging.warning("Error while executing ./check in %s" % shell_name) 729 return ((0.0, 0.0, 0.0, 0.0, 0), [])
730 731 @classmethod
732 - def parse_check_output(cls,output,format='tuple'):
733 """Parse the output string and return a pair where first four values are 734 the finite, born, single and double pole of the ME and the fourth is the 735 GeV exponent and the second value is a list of 4 momenta for all particles 736 involved. Return the answer in two possible formats, 'tuple' or 'dict'.""" 737 738 res_dict = {'res_p':[], 739 'born':0.0, 740 'finite':0.0, 741 '1eps':0.0, 742 '2eps':0.0, 743 'gev_pow':0, 744 'export_format':'Default', 745 'accuracy':0.0, 746 'return_code':0, 747 'Split_Orders_Names':[], 748 'Loop_SO_Results':[], 749 'Born_SO_Results':[], 750 'Born_kept':[], 751 'Loop_kept':[] 752 } 753 res_p = [] 754 755 # output is supposed to be a file, if it is its content directly then 756 # I change it to be the list of line. 757 if isinstance(output,file) or isinstance(output,list): 758 text=output 759 elif isinstance(output,str): 760 text=output.split('\n') 761 else: 762 raise MadGraph5Error, 'Type for argument output not supported in'+\ 763 ' parse_check_output.' 764 for line in text: 765 splitline=line.split() 766 if len(splitline)==0: 767 continue 768 elif splitline[0]=='PS': 769 res_p.append([float(s) for s in splitline[1:]]) 770 elif splitline[0]=='BORN': 771 res_dict['born']=float(splitline[1]) 772 elif splitline[0]=='FIN': 773 res_dict['finite']=float(splitline[1]) 774 elif splitline[0]=='1EPS': 775 res_dict['1eps']=float(splitline[1]) 776 elif splitline[0]=='2EPS': 777 res_dict['2eps']=float(splitline[1]) 778 elif splitline[0]=='EXP': 779 res_dict['gev_pow']=int(splitline[1]) 780 elif splitline[0]=='Export_Format': 781 res_dict['export_format']=splitline[1] 782 elif splitline[0]=='ACC': 783 res_dict['accuracy']=float(splitline[1]) 784 elif splitline[0]=='RETCODE': 785 res_dict['return_code']=int(splitline[1]) 786 elif splitline[0]=='Split_Orders_Names': 787 res_dict['Split_Orders_Names']=splitline[1:] 788 elif splitline[0] in ['Born_kept', 'Loop_kept']: 789 res_dict[splitline[0]] = [kept=='T' for kept in splitline[1:]] 790 elif splitline[0] in ['Loop_SO_Results', 'Born_SO_Results']: 791 # The value for this key of this dictionary is a list of elements 792 # with format ([],{}) where the first list specifies the split 793 # orders to which the dictionary in the second position corresponds 794 # to. 795 res_dict[splitline[0]].append(\ 796 ([int(el) for el in splitline[1:]],{})) 797 elif splitline[0]=='SO_Loop': 798 res_dict['Loop_SO_Results'][-1][1][splitline[1]]=\ 799 float(splitline[2]) 800 elif splitline[0]=='SO_Born': 801 res_dict['Born_SO_Results'][-1][1][splitline[1]]=\ 802 float(splitline[2]) 803 804 res_dict['res_p'] = res_p 805 806 if format=='tuple': 807 return ((res_dict['finite'],res_dict['born'],res_dict['1eps'], 808 res_dict['2eps'],res_dict['gev_pow']), res_dict['res_p']) 809 else: 810 return res_dict
811
812 - def setup_ward_check(self, working_dir, file_names, mp = False):
813 """ Modify loop_matrix.f so to have one external massless gauge boson 814 polarization vector turned into its momentum. It is not a pretty and 815 flexible solution but it works for this particular case.""" 816 817 shell_name = None 818 directories = glob.glob(pjoin(working_dir,'P0_*')) 819 if directories and os.path.isdir(directories[0]): 820 shell_name = os.path.basename(directories[0]) 821 822 dir_name = pjoin(working_dir, shell_name) 823 824 # Look, in order, for all the possible file names provided. 825 ind=0 826 while ind<len(file_names) and not os.path.isfile(pjoin(dir_name, 827 file_names[ind])): 828 ind += 1 829 if ind==len(file_names): 830 raise Exception, "No helas calls output file found." 831 832 helas_file_name=pjoin(dir_name,file_names[ind]) 833 file = open(pjoin(dir_name,helas_file_name), 'r') 834 835 helas_calls_out="" 836 original_file="" 837 gaugeVectorRegExp=re.compile(\ 838 r"CALL (MP\_)?VXXXXX\(P\(0,(?P<p_id>\d+)\),((D)?CMPLX\()?ZERO((,KIND\=16)?\))?,"+ 839 r"NHEL\(\d+\),[\+\-]1\*IC\(\d+\),W\(1,(?P<wf_id>\d+(,H)?)\)\)") 840 foundGauge=False 841 # Now we modify the first massless gauge vector wavefunction 842 for line in file: 843 helas_calls_out+=line 844 original_file+=line 845 if line.find("INCLUDE 'coupl.inc'") != -1 or \ 846 line.find("INCLUDE 'mp_coupl_same_name.inc'") !=-1: 847 helas_calls_out+=" INTEGER WARDINT\n" 848 if not foundGauge: 849 res=gaugeVectorRegExp.search(line) 850 if res!=None: 851 foundGauge=True 852 helas_calls_out+=" DO WARDINT=1,4\n" 853 helas_calls_out+=" W(WARDINT+4,"+res.group('wf_id')+")=" 854 if not mp: 855 helas_calls_out+=\ 856 "DCMPLX(P(WARDINT-1,"+res.group('p_id')+"),0.0D0)\n" 857 else: 858 helas_calls_out+="CMPLX(P(WARDINT-1,"+\ 859 res.group('p_id')+"),0.0E0_16,KIND=16)\n" 860 helas_calls_out+=" ENDDO\n" 861 file.close() 862 863 return pjoin(dir_name,helas_file_name), original_file, helas_calls_out
864
865 #=============================================================================== 866 # Helper class LoopMatrixElementEvaluator 867 #=============================================================================== 868 -class LoopMatrixElementTimer(LoopMatrixElementEvaluator):
869 """Class taking care of matrix element evaluation and running timing for 870 loop processes.""" 871
872 - def __init__(self, *args, **kwargs):
873 """ Same as the mother for now """ 874 LoopMatrixElementEvaluator.__init__(self,*args, **kwargs)
875 876 @classmethod
877 - def get_MadLoop_Params(cls,MLCardPath):
878 """ Return a dictionary of the parameter of the MadLoopParamCard. 879 The key is the name of the parameter and the value is the corresponding 880 string read from the card.""" 881 882 return bannermod.MadLoopParam(MLCardPath)
883 884 885 @classmethod
886 - def set_MadLoop_Params(cls,MLCardPath,params):
887 """ Set the parameters in MadLoopParamCard to the values specified in 888 the dictionary params. 889 The key is the name of the parameter and the value is the corresponding 890 string to write in the card.""" 891 892 MLcard = bannermod.MadLoopParam(MLCardPath) 893 for key,value in params.items(): 894 MLcard.set(key, value, ifnotdefault=False) 895 MLcard.write(MLCardPath, commentdefault=True)
896
897 - def skip_loop_evaluation_setup(self, dir_name, skip=True):
898 """ Edit loop_matrix.f in order to skip the loop evaluation phase. 899 Notice this only affects the double precision evaluation which is 900 normally fine as we do not make the timing check on mp.""" 901 902 file = open(pjoin(dir_name,'loop_matrix.f'), 'r') 903 loop_matrix = file.read() 904 file.close() 905 906 file = open(pjoin(dir_name,'loop_matrix.f'), 'w') 907 loop_matrix = re.sub(r"SKIPLOOPEVAL=\S+\)","SKIPLOOPEVAL=%s)"%('.TRUE.' 908 if skip else '.FALSE.'), loop_matrix) 909 file.write(loop_matrix) 910 file.close()
911
912 - def boot_time_setup(self, dir_name, bootandstop=True):
913 """ Edit loop_matrix.f in order to set the flag which stops the 914 execution after booting the program (i.e. reading the color data).""" 915 916 file = open(pjoin(dir_name,'loop_matrix.f'), 'r') 917 loop_matrix = file.read() 918 file.close() 919 920 file = open(pjoin(dir_name,'loop_matrix.f'), 'w') 921 loop_matrix = re.sub(r"BOOTANDSTOP=\S+\)","BOOTANDSTOP=%s)"%('.TRUE.' 922 if bootandstop else '.FALSE.'), loop_matrix) 923 file.write(loop_matrix) 924 file.close()
925
926 - def setup_process(self, matrix_element, export_dir, reusing = False, 927 param_card = None,MLOptions={},clean=True):
928 """ Output the matrix_element in argument and perform the initialization 929 while providing some details about the output in the dictionary returned. 930 Returns None if anything fails""" 931 932 infos={'Process_output': None, 933 'HELAS_MODEL_compilation' : None, 934 'dir_path' : None, 935 'Initialization' : None, 936 'Process_compilation' : None} 937 938 if not reusing and clean: 939 if os.path.isdir(export_dir): 940 clean_up(self.output_path) 941 if os.path.isdir(export_dir): 942 raise InvalidCmd(\ 943 "The directory %s already exist. Please remove it."\ 944 %str(export_dir)) 945 else: 946 if not os.path.isdir(export_dir): 947 raise InvalidCmd(\ 948 "Could not find the directory %s to reuse."%str(export_dir)) 949 950 951 if not reusing and clean: 952 model = matrix_element['processes'][0].get('model') 953 # I do the import here because there is some cyclic import of export_v4 954 # otherwise 955 import madgraph.loop.loop_exporters as loop_exporters 956 if self.loop_optimized_output: 957 exporter_class=loop_exporters.LoopProcessOptimizedExporterFortranSA 958 else: 959 exporter_class=loop_exporters.LoopProcessExporterFortranSA 960 961 MLoptions = {'clean': True, 962 'complex_mass': self.cmass_scheme, 963 'export_format':'madloop', 964 'mp':True, 965 'SubProc_prefix':'P', 966 'compute_color_flows':not matrix_element['processes'][0].get('has_born'), 967 'loop_dir': pjoin(self.mg_root,'Template','loop_material'), 968 'cuttools_dir': self.cuttools_dir, 969 'fortran_compiler':self.cmd.options['fortran_compiler'], 970 'output_dependencies':self.cmd.options['output_dependencies']} 971 972 MLoptions.update(self.tir_dir) 973 974 start=time.time() 975 FortranExporter = exporter_class(self.mg_root, export_dir, MLoptions) 976 FortranModel = helas_call_writers.FortranUFOHelasCallWriter(model) 977 FortranExporter.copy_v4template(modelname=model.get('name')) 978 FortranExporter.generate_subprocess_directory_v4(matrix_element, FortranModel) 979 wanted_lorentz = list(set(matrix_element.get_used_lorentz())) 980 wanted_couplings = list(set([c for l in matrix_element.get_used_couplings() \ 981 for c in l])) 982 FortranExporter.convert_model_to_mg4(self.full_model,wanted_lorentz,wanted_couplings) 983 infos['Process_output'] = time.time()-start 984 start=time.time() 985 FortranExporter.finalize_v4_directory(None,"",False,False,'gfortran') 986 infos['HELAS_MODEL_compilation'] = time.time()-start 987 988 # Copy the parameter card if provided 989 if param_card != None: 990 if isinstance(param_card, str): 991 cp(pjoin(param_card),\ 992 pjoin(export_dir,'Cards','param_card.dat')) 993 else: 994 param_card.write(pjoin(export_dir,'Cards','param_card.dat')) 995 996 # First Initialize filters (in later versions where this will be done 997 # at generation time, it can be skipped) 998 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 999 read_ps = False, npoints = 4) 1000 self.fix_MadLoopParamCard(pjoin(export_dir,'Cards'), 1001 mp = False, loop_filter = True,MLOptions=MLOptions) 1002 1003 shell_name = None 1004 directories = glob.glob(pjoin(export_dir, 'SubProcesses','P0_*')) 1005 if directories and os.path.isdir(directories[0]): 1006 shell_name = os.path.basename(directories[0]) 1007 dir_name = pjoin(export_dir, 'SubProcesses', shell_name) 1008 infos['dir_path']=dir_name 1009 1010 attempts = [3,15] 1011 # remove check and check_sa.o for running initialization again 1012 try: 1013 os.remove(pjoin(dir_name,'check')) 1014 os.remove(pjoin(dir_name,'check_sa.o')) 1015 except OSError: 1016 pass 1017 1018 nPS_necessary = MadLoopInitializer.run_initialization(dir_name, 1019 pjoin(export_dir,'SubProcesses'),infos,\ 1020 req_files = ['HelFilter.dat','LoopFilter.dat'], 1021 attempts = attempts) 1022 if attempts is None: 1023 logger.error("Could not compile the process %s,"%shell_name+\ 1024 " try to generate it via the 'generate' command.") 1025 return None 1026 if nPS_necessary is None: 1027 logger.error("Could not initialize the process %s"%shell_name+\ 1028 " with %s PS points."%max(attempts)) 1029 return None 1030 elif nPS_necessary > min(attempts): 1031 logger.warning("Could not initialize the process %s"%shell_name+\ 1032 " with %d PS points. It needed %d."%(min(attempts),nPS_necessary)) 1033 1034 return infos
1035
1036 - def time_matrix_element(self, matrix_element, reusing = False, 1037 param_card = None, keep_folder = False, options=None, 1038 MLOptions = {}):
1039 """ Output the matrix_element in argument and give detail information 1040 about the timing for its output and running""" 1041 1042 if options and 'split_orders' in options.keys(): 1043 split_orders = options['split_orders'] 1044 else: 1045 split_orders = -1 1046 1047 assert ((not reusing and isinstance(matrix_element, \ 1048 helas_objects.HelasMatrixElement)) or (reusing and 1049 isinstance(matrix_element, base_objects.Process))) 1050 if not reusing: 1051 proc_name = matrix_element['processes'][0].shell_string()[2:] 1052 else: 1053 proc_name = matrix_element.shell_string()[2:] 1054 1055 export_dir=pjoin(self.output_path,('SAVED' if keep_folder else '')+\ 1056 temp_dir_prefix+"_%s"%proc_name) 1057 1058 res_timings = self.setup_process(matrix_element,export_dir, \ 1059 reusing, param_card,MLOptions = MLOptions) 1060 1061 if res_timings == None: 1062 return None 1063 dir_name=res_timings['dir_path'] 1064 1065 def check_disk_usage(path): 1066 return subprocess.Popen("du -shc -L "+str(path), \ 1067 stdout=subprocess.PIPE, shell=True).communicate()[0].split()[-2]
1068 # The above is compatible with python 2.6, not the neater version below 1069 #return subprocess.check_output(["du -shc %s"%path],shell=True).\ 1070 # split()[-2] 1071 1072 res_timings['du_source']=check_disk_usage(pjoin(\ 1073 export_dir,'Source','*','*.f')) 1074 res_timings['du_process']=check_disk_usage(pjoin(dir_name,'*.f')) 1075 res_timings['du_color']=check_disk_usage(pjoin(dir_name,'*.dat')) 1076 res_timings['du_exe']=check_disk_usage(pjoin(dir_name,'check')) 1077 1078 if not res_timings['Initialization']==None: 1079 time_per_ps_estimate = (res_timings['Initialization']/4.0)/2.0 1080 else: 1081 # We cannot estimate from the initialization, so we run just a 3 1082 # PS point run to evaluate it. 1083 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 1084 read_ps = False, npoints = 3, hel_config = -1, 1085 split_orders=split_orders) 1086 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name) 1087 time_per_ps_estimate = run_time/3.0 1088 1089 self.boot_time_setup(dir_name,bootandstop=True) 1090 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name) 1091 res_timings['Booting_time'] = run_time 1092 self.boot_time_setup(dir_name,bootandstop=False) 1093 1094 # Detect one contributing helicity 1095 contributing_hel=0 1096 n_contrib_hel=0 1097 proc_prefix_file = open(pjoin(dir_name,'proc_prefix.txt'),'r') 1098 proc_prefix = proc_prefix_file.read() 1099 proc_prefix_file.close() 1100 helicities = file(pjoin(dir_name,'MadLoop5_resources', 1101 '%sHelFilter.dat'%proc_prefix)).read().split() 1102 for i, hel in enumerate(helicities): 1103 if (self.loop_optimized_output and int(hel)>-10000) or hel=='T': 1104 if contributing_hel==0: 1105 contributing_hel=i+1 1106 n_contrib_hel += 1 1107 1108 if contributing_hel==0: 1109 logger.error("Could not find a contributing helicity "+\ 1110 "configuration for process %s."%proc_name) 1111 return None 1112 1113 res_timings['n_contrib_hel']=n_contrib_hel 1114 res_timings['n_tot_hel']=len(helicities) 1115 1116 # We aim at a 15 sec run 1117 target_pspoints_number = max(int(15.0/time_per_ps_estimate)+1,5) 1118 1119 logger.info("Checking timing for process %s "%proc_name+\ 1120 "with %d PS points."%target_pspoints_number) 1121 1122 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 1123 read_ps = False, npoints = target_pspoints_number*2, \ 1124 hel_config = contributing_hel, split_orders=split_orders) 1125 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name) 1126 if compile_time == None: return None 1127 res_timings['run_polarized_total']=\ 1128 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2) 1129 1130 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 1131 read_ps = False, npoints = target_pspoints_number, hel_config = -1, 1132 split_orders=split_orders) 1133 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name, 1134 checkRam=True) 1135 if compile_time == None: return None 1136 res_timings['run_unpolarized_total']=\ 1137 (run_time-res_timings['Booting_time'])/target_pspoints_number 1138 res_timings['ram_usage'] = ram_usage 1139 1140 if not self.loop_optimized_output: 1141 return res_timings 1142 1143 # For the loop optimized output, we also check the time spent in 1144 # computing the coefficients of the loop numerator polynomials. 1145 1146 # So we modify loop_matrix.f in order to skip the loop evaluation phase. 1147 self.skip_loop_evaluation_setup(dir_name,skip=True) 1148 1149 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 1150 read_ps = False, npoints = target_pspoints_number, hel_config = -1, 1151 split_orders=split_orders) 1152 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name) 1153 if compile_time == None: return None 1154 res_timings['run_unpolarized_coefs']=\ 1155 (run_time-res_timings['Booting_time'])/target_pspoints_number 1156 1157 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 1158 read_ps = False, npoints = target_pspoints_number*2, \ 1159 hel_config = contributing_hel, split_orders=split_orders) 1160 compile_time, run_time, ram_usage = MadLoopInitializer.make_and_run(dir_name) 1161 if compile_time == None: return None 1162 res_timings['run_polarized_coefs']=\ 1163 (run_time-res_timings['Booting_time'])/(target_pspoints_number*2) 1164 1165 # Restitute the original file. 1166 self.skip_loop_evaluation_setup(dir_name,skip=False) 1167 1168 return res_timings
1169 1170 #=============================================================================== 1171 # Global helper function run_multiprocs 1172 #=============================================================================== 1173
1174 - def check_matrix_element_stability(self, matrix_element,options=None, 1175 infos_IN = None, param_card = None, keep_folder = False, 1176 MLOptions = {}):
1177 """ Output the matrix_element in argument, run in for nPoints and return 1178 a dictionary containing the stability information on each of these points. 1179 If infos are provided, then the matrix element output is skipped and 1180 reused from a previous run and the content of infos. 1181 """ 1182 1183 if not options: 1184 reusing = False 1185 nPoints = 100 1186 split_orders = -1 1187 else: 1188 reusing = options['reuse'] 1189 nPoints = options['npoints'] 1190 split_orders = options['split_orders'] 1191 1192 assert ((not reusing and isinstance(matrix_element, \ 1193 helas_objects.HelasMatrixElement)) or (reusing and 1194 isinstance(matrix_element, base_objects.Process))) 1195 1196 # Helper functions 1197 def format_PS_point(ps, rotation=0): 1198 """ Write out the specified PS point to the file dir_path/PS.input 1199 while rotating it if rotation!=0. We consider only rotations of 90 1200 but one could think of having rotation of arbitrary angle too. 1201 The first two possibilities, 1 and 2 are a rotation and boost 1202 along the z-axis so that improve_ps can still work. 1203 rotation=0 => No rotation 1204 rotation=1 => Z-axis pi/2 rotation 1205 rotation=2 => Z-axis pi/4 rotation 1206 rotation=3 => Z-axis boost 1207 rotation=4 => (x'=z,y'=-x,z'=-y) 1208 rotation=5 => (x'=-z,y'=y,z'=x)""" 1209 if rotation==0: 1210 p_out=copy.copy(ps) 1211 elif rotation==1: 1212 p_out = [[pm[0],-pm[2],pm[1],pm[3]] for pm in ps] 1213 elif rotation==2: 1214 sq2 = math.sqrt(2.0) 1215 p_out = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in ps] 1216 elif rotation==3: 1217 p_out = boost_momenta(ps, 3) 1218 # From this point the transformations will prevent the 1219 # improve_ps script of MadLoop to work. 1220 elif rotation==4: 1221 p_out=[[pm[0],pm[3],-pm[1],-pm[2]] for pm in ps] 1222 elif rotation==5: 1223 p_out=[[pm[0],-pm[3],pm[2],pm[1]] for pm in ps] 1224 else: 1225 raise MadGraph5Error("Rotation id %i not implemented"%rotation) 1226 1227 return '\n'.join([' '.join(['%.16E'%pi for pi in p]) for p in p_out])
1228 1229 def pick_PS_point(proc, options): 1230 """ Randomly generate a PS point and make sure it is eligible. Then 1231 return it. Users can edit the cuts here if they want.""" 1232 def Pt(pmom): 1233 """ Computes the pt of a 4-momentum""" 1234 return math.sqrt(pmom[1]**2+pmom[2]**2) 1235 def DeltaR(p1,p2): 1236 """ Computes the DeltaR between two 4-momenta""" 1237 # First compute pseudo-rapidities 1238 p1_vec=math.sqrt(p1[1]**2+p1[2]**2+p1[3]**2) 1239 p2_vec=math.sqrt(p2[1]**2+p2[2]**2+p2[3]**2) 1240 eta1=0.5*math.log((p1_vec+p1[3])/(p1_vec-p1[3])) 1241 eta2=0.5*math.log((p2_vec+p2[3])/(p2_vec-p2[3])) 1242 # Then azimutal angle phi 1243 phi1=math.atan2(p1[2],p1[1]) 1244 phi2=math.atan2(p2[2],p2[1]) 1245 dphi=abs(phi2-phi1) 1246 # Take the wraparound factor into account 1247 dphi=abs(abs(dphi-math.pi)-math.pi) 1248 # Now return deltaR 1249 return math.sqrt(dphi**2+(eta2-eta1)**2) 1250 1251 def pass_cuts(p): 1252 """ Defines the cut a PS point must pass""" 1253 for i, pmom in enumerate(p[2:]): 1254 # Pt > 50 GeV 1255 if Pt(pmom)<50.0: 1256 return False 1257 # Delta_R ij > 0.5 1258 for pmom2 in p[3+i:]: 1259 if DeltaR(pmom,pmom2)<0.5: 1260 return False 1261 return True 1262 p, w_rambo = self.get_momenta(proc, options) 1263 if options['events']: 1264 return p 1265 # For 2>1 process, we don't check the cuts of course 1266 while (not pass_cuts(p) and len(p)>3): 1267 p, w_rambo = self.get_momenta(proc, options) 1268 1269 # For a 2>1 process, it would always be the same PS point, 1270 # so here we bring in so boost along the z-axis, just for the sake 1271 # of it. 1272 if len(p)==3: 1273 p = boost_momenta(p,3,random.uniform(0.0,0.99)) 1274 return p 1275 1276 # Start loop on loop libraries 1277 # Accuracy threshold of double precision evaluations above which the 1278 # PS points is also evaluated in quadruple precision 1279 accuracy_threshold=1.0e-1 1280 1281 # Number of lorentz transformations to consider for the stability test 1282 # (along with the loop direction test which is performed by default) 1283 num_rotations = 1 1284 1285 if "MLReductionLib" not in MLOptions: 1286 tools=[1] 1287 else: 1288 tools=MLOptions["MLReductionLib"] 1289 tools=list(set(tools)) # remove the duplication ones 1290 # not self-contained tir libraries 1291 tool_var={'pjfry':2,'golem':4} 1292 for tool in ['pjfry','golem']: 1293 tool_dir='%s_dir'%tool 1294 if not tool_dir in self.tir_dir: 1295 continue 1296 tool_libpath=self.tir_dir[tool_dir] 1297 tool_libname="lib%s.a"%tool 1298 if (not isinstance(tool_libpath,str)) or (not os.path.exists(tool_libpath)) \ 1299 or (not os.path.isfile(pjoin(tool_libpath,tool_libname))): 1300 if tool_var[tool] in tools: 1301 tools.remove(tool_var[tool]) 1302 if not tools: 1303 return None 1304 # Normally, this should work for loop-induced processes as well 1305 if not reusing: 1306 process = matrix_element['processes'][0] 1307 else: 1308 process = matrix_element 1309 proc_name = process.shell_string()[2:] 1310 export_dir=pjoin(self.mg_root,("SAVED" if keep_folder else "")+\ 1311 temp_dir_prefix+"_%s"%proc_name) 1312 1313 tools_name={1:'CutTools',2:'PJFry++',3:'IREGI',4:'Golem95'} 1314 return_dict={} 1315 return_dict['Stability']={} 1316 infos_save={'Process_output': None, 1317 'HELAS_MODEL_compilation' : None, 1318 'dir_path' : None, 1319 'Initialization' : None, 1320 'Process_compilation' : None} 1321 1322 for tool in tools: 1323 tool_name=tools_name[tool] 1324 # Each evaluations is performed in different ways to assess its stability. 1325 # There are two dictionaries, one for the double precision evaluation 1326 # and the second one for quadruple precision (if it was needed). 1327 # The keys are the name of the evaluation method and the value is the 1328 # float returned. 1329 DP_stability = [] 1330 QP_stability = [] 1331 # The unstable point encountered are stored in this list 1332 Unstable_PS_points = [] 1333 # The exceptional PS points are those which stay unstable in quad prec. 1334 Exceptional_PS_points = [] 1335 1336 MLoptions={} 1337 MLoptions["MLReductionLib"]=tool 1338 clean=(tool==tools[0]) 1339 if infos_IN==None or (tool_name not in infos_IN): 1340 infos=infos_IN 1341 else: 1342 infos=infos_IN[tool_name] 1343 1344 if not infos: 1345 infos = self.setup_process(matrix_element,export_dir, \ 1346 reusing, param_card,MLoptions,clean) 1347 if not infos: 1348 return None 1349 1350 if clean: 1351 infos_save['Process_output']=infos['Process_output'] 1352 infos_save['HELAS_MODEL_compilation']=infos['HELAS_MODEL_compilation'] 1353 infos_save['dir_path']=infos['dir_path'] 1354 infos_save['Process_compilation']=infos['Process_compilation'] 1355 else: 1356 if not infos['Process_output']: 1357 infos['Process_output']=infos_save['Process_output'] 1358 if not infos['HELAS_MODEL_compilation']: 1359 infos['HELAS_MODEL_compilation']=infos_save['HELAS_MODEL_compilation'] 1360 if not infos['dir_path']: 1361 infos['dir_path']=infos_save['dir_path'] 1362 if not infos['Process_compilation']: 1363 infos['Process_compilation']=infos_save['Process_compilation'] 1364 1365 dir_path=infos['dir_path'] 1366 1367 # Reuse old stability runs if present 1368 savefile='SavedStabilityRun_%s%%s.pkl'%tools_name[tool] 1369 data_i = 0 1370 1371 if reusing: 1372 # Possibly add additional data than the main one in 0 1373 data_i=0 1374 while os.path.isfile(pjoin(dir_path,savefile%('_%d'%data_i))): 1375 pickle_path = pjoin(dir_path,savefile%('_%d'%data_i)) 1376 saved_run = save_load_object.load_from_file(pickle_path) 1377 if data_i>0: 1378 logger.info("Loading additional data stored in %s."% 1379 str(pickle_path)) 1380 logger.info("Loaded data moved to %s."%str(pjoin( 1381 dir_path,'LOADED_'+savefile%('_%d'%data_i)))) 1382 shutil.move(pickle_path, 1383 pjoin(dir_path,'LOADED_'+savefile%('%d'%data_i))) 1384 DP_stability.extend(saved_run['DP_stability']) 1385 QP_stability.extend(saved_run['QP_stability']) 1386 Unstable_PS_points.extend(saved_run['Unstable_PS_points']) 1387 Exceptional_PS_points.extend(saved_run['Exceptional_PS_points']) 1388 data_i += 1 1389 1390 return_dict['Stability'][tool_name] = {'DP_stability':DP_stability, 1391 'QP_stability':QP_stability, 1392 'Unstable_PS_points':Unstable_PS_points, 1393 'Exceptional_PS_points':Exceptional_PS_points} 1394 1395 if nPoints==0: 1396 if len(return_dict['Stability'][tool_name]['DP_stability'])!=0: 1397 # In case some data was combined, overwrite the pickle 1398 if data_i>1: 1399 save_load_object.save_to_file(pjoin(dir_path, 1400 savefile%'_0'),return_dict['Stability'][tool_name]) 1401 continue 1402 else: 1403 logger.info("ERROR: Not reusing a directory and the number"+\ 1404 " of point for the check is zero.") 1405 return None 1406 1407 logger.info("Checking stability of process %s "%proc_name+\ 1408 "with %d PS points by %s."%(nPoints,tool_name)) 1409 if infos['Initialization'] != None: 1410 time_per_ps_estimate = (infos['Initialization']/4.0)/2.0 1411 sec_needed = int(time_per_ps_estimate*nPoints*4) 1412 else: 1413 sec_needed = 0 1414 1415 progress_bar = None 1416 time_info = False 1417 if sec_needed>5: 1418 time_info = True 1419 logger.info("This check should take about "+\ 1420 "%s to run. Started on %s."%(\ 1421 str(datetime.timedelta(seconds=sec_needed)),\ 1422 datetime.datetime.now().strftime("%d-%m-%Y %H:%M"))) 1423 if logger.getEffectiveLevel()<logging.WARNING and \ 1424 (sec_needed>5 or (reusing and infos['Initialization'] == None)): 1425 widgets = ['Stability check:', pbar.Percentage(), ' ', 1426 pbar.Bar(),' ', pbar.ETA(), ' '] 1427 progress_bar = pbar.ProgressBar(widgets=widgets, maxval=nPoints, 1428 fd=sys.stdout) 1429 MadLoopInitializer.fix_PSPoint_in_check(pjoin(export_dir,'SubProcesses'), 1430 read_ps = True, npoints = 1, hel_config = -1, split_orders=split_orders) 1431 # Recompile (Notice that the recompilation is only necessary once) for 1432 # the change above to take effect. 1433 # Make sure to recreate the executable and modified sources 1434 try: 1435 os.remove(pjoin(dir_path,'check')) 1436 os.remove(pjoin(dir_path,'check_sa.o')) 1437 except OSError: 1438 pass 1439 # Now run make 1440 devnull = open(os.devnull, 'w') 1441 retcode = subprocess.call(['make','check'], 1442 cwd=dir_path, stdout=devnull, stderr=devnull) 1443 devnull.close() 1444 if retcode != 0: 1445 logging.info("Error while executing make in %s" % dir_path) 1446 return None 1447 1448 1449 # First create the stability check fortran driver executable if not 1450 # already present. 1451 if not os.path.isfile(pjoin(dir_path,'StabilityCheckDriver.f')): 1452 # Use the presence of the file born_matrix.f to check if this output 1453 # is a loop_induced one or not. 1454 if os.path.isfile(pjoin(dir_path,'born_matrix.f')): 1455 checkerName = 'StabilityCheckDriver.f' 1456 else: 1457 checkerName = 'StabilityCheckDriver_loop_induced.f' 1458 1459 with open(pjoin(self.mg_root,'Template','loop_material','Checks', 1460 checkerName),'r') as checkerFile: 1461 with open(pjoin(dir_path,'proc_prefix.txt')) as proc_prefix: 1462 checkerToWrite = checkerFile.read()%{'proc_prefix': 1463 proc_prefix.read()} 1464 checkerFile = open(pjoin(dir_path,'StabilityCheckDriver.f'),'w') 1465 checkerFile.write(checkerToWrite) 1466 checkerFile.close() 1467 #cp(pjoin(self.mg_root,'Template','loop_material','Checks',\ 1468 # checkerName),pjoin(dir_path,'StabilityCheckDriver.f')) 1469 1470 # Make sure to recompile the possibly modified files (time stamps can be 1471 # off). 1472 if os.path.isfile(pjoin(dir_path,'StabilityCheckDriver')): 1473 os.remove(pjoin(dir_path,'StabilityCheckDriver')) 1474 if os.path.isfile(pjoin(dir_path,'loop_matrix.o')): 1475 os.remove(pjoin(dir_path,'loop_matrix.o')) 1476 misc.compile(arg=['StabilityCheckDriver'], cwd=dir_path, \ 1477 mode='fortran', job_specs = False) 1478 1479 # Now for 2>1 processes, because the HelFilter was setup in for always 1480 # identical PS points with vec(p_1)=-vec(p_2), it is best not to remove 1481 # the helicityFilter double check 1482 if len(process['legs'])==3: 1483 self.fix_MadLoopParamCard(dir_path, mp=False, 1484 loop_filter=False, DoubleCheckHelicityFilter=True) 1485 1486 StabChecker = subprocess.Popen([pjoin(dir_path,'StabilityCheckDriver')], 1487 stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, 1488 cwd=dir_path) 1489 start_index = len(DP_stability) 1490 if progress_bar!=None: 1491 progress_bar.start() 1492 1493 # Flag to know if the run was interrupted or not 1494 interrupted = False 1495 # Flag to know wheter the run for one specific PS point got an IOError 1496 # and must be retried 1497 retry = 0 1498 # We do not use a for loop because we want to manipulate the updater. 1499 i=start_index 1500 if options and 'events' in options and options['events']: 1501 # it is necessary to reuse the events from lhe file 1502 import MadSpin.decay as madspin 1503 fsock = open(options['events']) 1504 self.event_file = madspin.Event(fsock) 1505 while i<(start_index+nPoints): 1506 # To be added to the returned statistics 1507 qp_dict={} 1508 dp_dict={} 1509 UPS = None 1510 EPS = None 1511 # Pick an eligible PS point with rambo, if not already done 1512 if retry==0: 1513 p = pick_PS_point(process, options) 1514 # print "I use P_%i="%i,p 1515 try: 1516 if progress_bar!=None: 1517 progress_bar.update(i+1-start_index) 1518 # Write it in the input file 1519 PSPoint = format_PS_point(p,0) 1520 dp_res=[] 1521 dp_res.append(self.get_me_value(StabChecker,PSPoint,1, 1522 split_orders=split_orders)) 1523 dp_dict['CTModeA']=dp_res[-1] 1524 dp_res.append(self.get_me_value(StabChecker,PSPoint,2, 1525 split_orders=split_orders)) 1526 dp_dict['CTModeB']=dp_res[-1] 1527 for rotation in range(1,num_rotations+1): 1528 PSPoint = format_PS_point(p,rotation) 1529 dp_res.append(self.get_me_value(StabChecker,PSPoint,1, 1530 split_orders=split_orders)) 1531 dp_dict['Rotation%i'%rotation]=dp_res[-1] 1532 # Make sure all results make sense 1533 if any([not res for res in dp_res]): 1534 return None 1535 dp_accuracy =((max(dp_res)-min(dp_res))/ 1536 abs(sum(dp_res)/len(dp_res))) 1537 dp_dict['Accuracy'] = dp_accuracy 1538 if dp_accuracy>accuracy_threshold: 1539 if tool==1: 1540 # Only CutTools can use QP 1541 UPS = [i,p] 1542 qp_res=[] 1543 PSPoint = format_PS_point(p,0) 1544 qp_res.append(self.get_me_value(StabChecker,PSPoint,4, 1545 split_orders=split_orders)) 1546 qp_dict['CTModeA']=qp_res[-1] 1547 qp_res.append(self.get_me_value(StabChecker,PSPoint,5, 1548 split_orders=split_orders)) 1549 qp_dict['CTModeB']=qp_res[-1] 1550 for rotation in range(1,num_rotations+1): 1551 PSPoint = format_PS_point(p,rotation) 1552 qp_res.append(self.get_me_value(StabChecker,PSPoint,4, 1553 split_orders=split_orders)) 1554 qp_dict['Rotation%i'%rotation]=qp_res[-1] 1555 # Make sure all results make sense 1556 if any([not res for res in qp_res]): 1557 return None 1558 1559 qp_accuracy = ((max(qp_res)-min(qp_res))/ 1560 abs(sum(qp_res)/len(qp_res))) 1561 qp_dict['Accuracy']=qp_accuracy 1562 if qp_accuracy>accuracy_threshold: 1563 EPS = [i,p] 1564 else: 1565 # Simply consider the point as a UPS when not using 1566 # CutTools 1567 UPS = [i,p] 1568 1569 except KeyboardInterrupt: 1570 interrupted = True 1571 break 1572 except IOError, e: 1573 if e.errno == errno.EINTR: 1574 if retry==100: 1575 logger.error("Failed hundred times consecutively because"+ 1576 " of system call interruptions.") 1577 raise 1578 else: 1579 logger.debug("Recovered from a system call interruption."+\ 1580 "PSpoint #%i, Attempt #%i."%(i,retry+1)) 1581 # Sleep for half a second. Safety measure. 1582 time.sleep(0.5) 1583 # We will retry this PS point 1584 retry = retry+1 1585 # Make sure the MadLoop process is properly killed 1586 try: 1587 StabChecker.kill() 1588 except Exception: 1589 pass 1590 StabChecker = subprocess.Popen(\ 1591 [pjoin(dir_path,'StabilityCheckDriver')], 1592 stdin=subprocess.PIPE, stdout=subprocess.PIPE, 1593 stderr=subprocess.PIPE, cwd=dir_path) 1594 continue 1595 else: 1596 raise 1597 1598 # Successfully processed a PS point so, 1599 # > reset retry 1600 retry = 0 1601 # > Update the while loop counter variable 1602 i=i+1 1603 1604 # Update the returned statistics 1605 DP_stability.append(dp_dict) 1606 QP_stability.append(qp_dict) 1607 if not EPS is None: 1608 Exceptional_PS_points.append(EPS) 1609 if not UPS is None: 1610 Unstable_PS_points.append(UPS) 1611 1612 if progress_bar!=None: 1613 progress_bar.finish() 1614 if time_info: 1615 logger.info('Finished check on %s.'%datetime.datetime.now().strftime(\ 1616 "%d-%m-%Y %H:%M")) 1617 1618 # Close the StabChecker process. 1619 if not interrupted: 1620 StabChecker.stdin.write('y\n') 1621 else: 1622 StabChecker.kill() 1623 1624 #return_dict = {'DP_stability':DP_stability, 1625 # 'QP_stability':QP_stability, 1626 # 'Unstable_PS_points':Unstable_PS_points, 1627 # 'Exceptional_PS_points':Exceptional_PS_points} 1628 1629 # Save the run for possible future use 1630 save_load_object.save_to_file(pjoin(dir_path,savefile%'_0'),\ 1631 return_dict['Stability'][tool_name]) 1632 1633 if interrupted: 1634 break 1635 1636 return_dict['Process'] = matrix_element.get('processes')[0] if not \ 1637 reusing else matrix_element 1638 return return_dict 1639 1640 @classmethod
1641 - def get_me_value(cls, StabChecker, PSpoint, mode, hel=-1, mu_r=-1.0, 1642 split_orders=-1):
1643 """ This version of get_me_value is simplified for the purpose of this 1644 class. No compilation is necessary. The CT mode can be specified.""" 1645 1646 # Reset the stdin with EOF character without closing it. 1647 StabChecker.stdin.write('\x1a') 1648 StabChecker.stdin.write('1\n') 1649 StabChecker.stdin.write('%d\n'%mode) 1650 StabChecker.stdin.write('%s\n'%PSpoint) 1651 StabChecker.stdin.write('%.16E\n'%mu_r) 1652 StabChecker.stdin.write('%d\n'%hel) 1653 StabChecker.stdin.write('%d\n'%split_orders) 1654 1655 try: 1656 while True: 1657 output = StabChecker.stdout.readline() 1658 if output==' ##TAG#RESULT_START#TAG##\n': 1659 break 1660 res = "" 1661 while True: 1662 output = StabChecker.stdout.readline() 1663 if output==' ##TAG#RESULT_STOP#TAG##\n': 1664 break 1665 else: 1666 res += output 1667 return cls.parse_check_output(res,format='tuple')[0][0] 1668 except IOError as e: 1669 logging.warning("Error while running MadLoop. Exception = %s"%str(e)) 1670 raise e
1671
1672 -def evaluate_helicities(process, param_card = None, mg_root="", 1673 cmass_scheme = False):
1674 """ Perform a python evaluation of the matrix element independently for 1675 all possible helicity configurations for a fixed number of points N and 1676 returns the average for each in the format [[hel_config, eval],...]. 1677 This is used to determine what are the vanishing and dependent helicity 1678 configurations at generation time and accordingly setup the output. 1679 This is not yet implemented at LO.""" 1680 1681 # Make sure this function is employed with a single process at LO 1682 assert isinstance(process,base_objects.Process) 1683 assert process.get('perturbation_couplings')==[] 1684 1685 N_eval=50 1686 1687 evaluator = MatrixElementEvaluator(process.get('model'), param_card, 1688 auth_skipping = False, reuse = True) 1689 1690 amplitude = diagram_generation.Amplitude(process) 1691 matrix_element = helas_objects.HelasMatrixElement(amplitude,gen_color=False) 1692 1693 cumulative_helEvals = [] 1694 # Fill cumulative hel progressively with several evaluations of the ME. 1695 for i in range(N_eval): 1696 p, w_rambo = evaluator.get_momenta(process) 1697 helEvals = evaluator.evaluate_matrix_element(\ 1698 matrix_element, p = p, output = 'helEvals')['helEvals'] 1699 if cumulative_helEvals==[]: 1700 cumulative_helEvals=copy.copy(helEvals) 1701 else: 1702 cumulative_helEvals = [[h[0],h[1]+helEvals[i][1]] for i, h in \ 1703 enumerate(cumulative_helEvals)] 1704 1705 # Now normalize with the total number of evaluations 1706 cumulative_helEvals = [[h[0],h[1]/N_eval] for h in cumulative_helEvals] 1707 1708 # As we are not in the context of a check command, so we clean the added 1709 # globals right away 1710 clean_added_globals(ADDED_GLOBAL) 1711 1712 return cumulative_helEvals
1713
1714 -def run_multiprocs_no_crossings(function, multiprocess, stored_quantities, 1715 opt=None, options=None):
1716 """A wrapper function for running an iteration of a function over 1717 a multiprocess, without having to first create a process list 1718 (which makes a big difference for very large multiprocesses. 1719 stored_quantities is a dictionary for any quantities that we want 1720 to reuse between runs.""" 1721 1722 model = multiprocess.get('model') 1723 isids = [leg.get('ids') for leg in multiprocess.get('legs') \ 1724 if not leg.get('state')] 1725 fsids = [leg.get('ids') for leg in multiprocess.get('legs') \ 1726 if leg.get('state')] 1727 # Create dictionary between isids and antiids, to speed up lookup 1728 id_anti_id_dict = {} 1729 for id in set(tuple(sum(isids+fsids, []))): 1730 id_anti_id_dict[id] = model.get_particle(id).get_anti_pdg_code() 1731 id_anti_id_dict[model.get_particle(id).get_anti_pdg_code()] = id 1732 sorted_ids = [] 1733 results = [] 1734 for is_prod in apply(itertools.product, isids): 1735 for fs_prod in apply(itertools.product, fsids): 1736 1737 # Check if we have already checked the process 1738 if check_already_checked(is_prod, fs_prod, sorted_ids, 1739 multiprocess, model, id_anti_id_dict): 1740 continue 1741 # Generate process based on the selected ids 1742 process = multiprocess.get_process_with_legs(base_objects.LegList(\ 1743 [base_objects.Leg({'id': id, 'state':False}) for \ 1744 id in is_prod] + \ 1745 [base_objects.Leg({'id': id, 'state':True}) for \ 1746 id in fs_prod])) 1747 1748 if opt is not None: 1749 if isinstance(opt, dict): 1750 try: 1751 value = opt[process.base_string()] 1752 except Exception: 1753 continue 1754 result = function(process, stored_quantities, value, options=options) 1755 else: 1756 result = function(process, stored_quantities, opt, options=options) 1757 else: 1758 result = function(process, stored_quantities, options=options) 1759 1760 if result: 1761 results.append(result) 1762 1763 return results
1764
1765 #=============================================================================== 1766 # Helper function check_already_checked 1767 #=============================================================================== 1768 1769 -def check_already_checked(is_ids, fs_ids, sorted_ids, process, model, 1770 id_anti_id_dict = {}):
1771 """Check if process already checked, if so return True, otherwise add 1772 process and antiprocess to sorted_ids.""" 1773 1774 # Check if process is already checked 1775 if id_anti_id_dict: 1776 is_ids = [id_anti_id_dict[id] for id in \ 1777 is_ids] 1778 else: 1779 is_ids = [model.get_particle(id).get_anti_pdg_code() for id in \ 1780 is_ids] 1781 1782 ids = array.array('i', sorted(is_ids + list(fs_ids)) + \ 1783 [process.get('id')]) 1784 1785 if ids in sorted_ids: 1786 # We have already checked (a crossing of) this process 1787 return True 1788 1789 # Add this process to tested_processes 1790 sorted_ids.append(ids) 1791 1792 # Skip adding antiprocess below, since might be relevant too 1793 return False
1794
1795 #=============================================================================== 1796 # Generate a loop matrix element 1797 #=============================================================================== 1798 -def generate_loop_matrix_element(process_definition, reuse, output_path=None, 1799 cmd = FakeInterface()):
1800 """ Generate a loop matrix element from the process definition, and returns 1801 it along with the timing information dictionary. 1802 If reuse is True, it reuses the already output directory if found.""" 1803 1804 assert isinstance(process_definition,base_objects.ProcessDefinition) 1805 assert process_definition.get('perturbation_couplings')!=[] 1806 1807 if not output_path is None: 1808 root_path = output_path 1809 else: 1810 root_path = cmd._mgme_dir 1811 # By default, set all entries to None 1812 timing = {'Diagrams_generation': None, 1813 'n_loops': None, 1814 'HelasDiagrams_generation': None, 1815 'n_loop_groups': None, 1816 'n_loop_wfs': None, 1817 'loop_wfs_ranks': None} 1818 1819 if any(len(l.get('ids'))>1 for l in process_definition.get('legs')): 1820 raise InvalidCmd("This check can only be performed on single "+ 1821 " processes. (i.e. without multiparticle labels).") 1822 1823 isids = [leg.get('ids')[0] for leg in process_definition.get('legs') \ 1824 if not leg.get('state')] 1825 fsids = [leg.get('ids')[0] for leg in process_definition.get('legs') \ 1826 if leg.get('state')] 1827 1828 # Now generate a process based on the ProcessDefinition given in argument. 1829 process = process_definition.get_process(isids,fsids) 1830 1831 proc_dir = pjoin(root_path,"SAVED"+temp_dir_prefix+"_%s"%( 1832 '_'.join(process.shell_string().split('_')[1:]))) 1833 if reuse and os.path.isdir(proc_dir): 1834 logger.info("Reusing directory %s"%str(proc_dir)) 1835 # If reusing, return process instead of matrix element 1836 return timing, process 1837 1838 logger.info("Generating p%s"%process_definition.nice_string()[1:]) 1839 1840 start=time.time() 1841 amplitude = loop_diagram_generation.LoopAmplitude(process) 1842 # Make sure to disable loop_optimized_output when considering loop induced 1843 # processes 1844 loop_optimized_output = cmd.options['loop_optimized_output'] 1845 timing['Diagrams_generation']=time.time()-start 1846 timing['n_loops']=len(amplitude.get('loop_diagrams')) 1847 start=time.time() 1848 1849 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude, 1850 optimized_output = loop_optimized_output,gen_color=True) 1851 # Here, the alohaModel used for analytica computations and for the aloha 1852 # subroutine output will be different, so that some optimization is lost. 1853 # But that is ok for the check functionality. 1854 matrix_element.compute_all_analytic_information() 1855 timing['HelasDiagrams_generation']=time.time()-start 1856 1857 if loop_optimized_output: 1858 timing['n_loop_groups']=len(matrix_element.get('loop_groups')) 1859 lwfs=[l for ldiag in matrix_element.get_loop_diagrams() for l in \ 1860 ldiag.get('loop_wavefunctions')] 1861 timing['n_loop_wfs']=len(lwfs) 1862 timing['loop_wfs_ranks']=[] 1863 for rank in range(0,max([l.get_analytic_info('wavefunction_rank') \ 1864 for l in lwfs])+1): 1865 timing['loop_wfs_ranks'].append(\ 1866 len([1 for l in lwfs if \ 1867 l.get_analytic_info('wavefunction_rank')==rank])) 1868 1869 return timing, matrix_element
1870
1871 #=============================================================================== 1872 # check profile for loop process (timings + stability in one go) 1873 #=============================================================================== 1874 -def check_profile(process_definition, param_card = None,cuttools="",tir={}, 1875 options = {}, cmd = FakeInterface(),output_path=None,MLOptions={}):
1876 """For a single loop process, check both its timings and then its stability 1877 in one go without regenerating it.""" 1878 1879 if 'reuse' not in options: 1880 keep_folder=False 1881 else: 1882 keep_folder = options['reuse'] 1883 1884 model=process_definition.get('model') 1885 1886 timing1, matrix_element = generate_loop_matrix_element(process_definition, 1887 keep_folder,output_path=output_path,cmd=cmd) 1888 reusing = isinstance(matrix_element, base_objects.Process) 1889 options['reuse'] = reusing 1890 myProfiler = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir, 1891 model=model, output_path=output_path, cmd=cmd) 1892 1893 if not myProfiler.loop_optimized_output: 1894 MLoptions={} 1895 else: 1896 MLoptions=MLOptions 1897 timing2 = myProfiler.time_matrix_element(matrix_element, reusing, 1898 param_card, keep_folder=keep_folder,options=options, 1899 MLOptions = MLoptions) 1900 1901 if timing2 == None: 1902 return None, None 1903 1904 # The timing info is made of the merged two dictionaries 1905 timing = dict(timing1.items()+timing2.items()) 1906 stability = myProfiler.check_matrix_element_stability(matrix_element, 1907 options=options, infos_IN=timing,param_card=param_card, 1908 keep_folder = keep_folder, 1909 MLOptions = MLoptions) 1910 if stability == None: 1911 return None, None 1912 else: 1913 timing['loop_optimized_output']=myProfiler.loop_optimized_output 1914 stability['loop_optimized_output']=myProfiler.loop_optimized_output 1915 return timing, stability
1916
1917 #=============================================================================== 1918 # check_timing for loop processes 1919 #=============================================================================== 1920 -def check_stability(process_definition, param_card = None,cuttools="",tir={}, 1921 options=None,nPoints=100, output_path=None, 1922 cmd = FakeInterface(), MLOptions = {}):
1923 """For a single loop process, give a detailed summary of the generation and 1924 execution timing.""" 1925 1926 if "reuse" in options: 1927 reuse=options['reuse'] 1928 else: 1929 reuse=False 1930 1931 reuse=options['reuse'] 1932 keep_folder = reuse 1933 model=process_definition.get('model') 1934 1935 1936 timing, matrix_element = generate_loop_matrix_element(process_definition, 1937 reuse, output_path=output_path, cmd=cmd) 1938 reusing = isinstance(matrix_element, base_objects.Process) 1939 options['reuse'] = reusing 1940 myStabilityChecker = LoopMatrixElementTimer(cuttools_dir=cuttools,tir_dir=tir, 1941 output_path=output_path,model=model,cmd=cmd) 1942 1943 if not myStabilityChecker.loop_optimized_output: 1944 MLoptions = {} 1945 else: 1946 MLoptions = MLOptions 1947 if "MLReductionLib" not in MLOptions: 1948 MLoptions["MLReductionLib"] = [] 1949 if cuttools: 1950 MLoptions["MLReductionLib"].extend([1]) 1951 if "iregi_dir" in tir: 1952 MLoptions["MLReductionLib"].extend([3]) 1953 if "pjfry_dir" in tir: 1954 MLoptions["MLReductionLib"].extend([2]) 1955 if "golem_dir" in tir: 1956 MLoptions["MLReductionLib"].extend([4]) 1957 1958 stability = myStabilityChecker.check_matrix_element_stability(matrix_element, 1959 options=options,param_card=param_card, 1960 keep_folder=keep_folder, 1961 MLOptions=MLoptions) 1962 1963 if stability == None: 1964 return None 1965 else: 1966 stability['loop_optimized_output']=myStabilityChecker.loop_optimized_output 1967 return stability
1968
1969 #=============================================================================== 1970 # check_timing for loop processes 1971 #=============================================================================== 1972 -def check_timing(process_definition, param_card= None, cuttools="",tir={}, 1973 output_path=None, options={}, cmd = FakeInterface(), 1974 MLOptions = {}):
1975 """For a single loop process, give a detailed summary of the generation and 1976 execution timing.""" 1977 1978 if 'reuse' not in options: 1979 keep_folder = False 1980 else: 1981 keep_folder = options['reuse'] 1982 model=process_definition.get('model') 1983 timing1, matrix_element = generate_loop_matrix_element(process_definition, 1984 keep_folder, output_path=output_path, cmd=cmd) 1985 reusing = isinstance(matrix_element, base_objects.Process) 1986 options['reuse'] = reusing 1987 myTimer = LoopMatrixElementTimer(cuttools_dir=cuttools,model=model,tir_dir=tir, 1988 output_path=output_path, cmd=cmd) 1989 1990 if not myTimer.loop_optimized_output: 1991 MLoptions = {} 1992 else: 1993 MLoptions = MLOptions 1994 timing2 = myTimer.time_matrix_element(matrix_element, reusing, param_card, 1995 keep_folder = keep_folder, options=options, 1996 MLOptions = MLoptions) 1997 1998 if timing2 == None: 1999 return None 2000 else: 2001 # Return the merged two dictionaries 2002 res = dict(timing1.items()+timing2.items()) 2003 res['loop_optimized_output']=myTimer.loop_optimized_output 2004 return res 2005
2006 #=============================================================================== 2007 # check_processes 2008 #=============================================================================== 2009 -def check_processes(processes, param_card = None, quick = [],cuttools="",tir={}, 2010 options=None, reuse = False, output_path=None, cmd = FakeInterface()):
2011 """Check processes by generating them with all possible orderings 2012 of particles (which means different diagram building and Helas 2013 calls), and comparing the resulting matrix element values.""" 2014 2015 cmass_scheme = cmd.options['complex_mass_scheme'] 2016 if isinstance(processes, base_objects.ProcessDefinition): 2017 # Generate a list of unique processes 2018 # Extract IS and FS ids 2019 multiprocess = processes 2020 model = multiprocess.get('model') 2021 2022 # Initialize matrix element evaluation 2023 if multiprocess.get('perturbation_couplings')==[]: 2024 evaluator = MatrixElementEvaluator(model, 2025 auth_skipping = True, reuse = False, cmd = cmd) 2026 else: 2027 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir, 2028 model=model, auth_skipping = True, 2029 reuse = False, output_path=output_path, cmd = cmd) 2030 2031 results = run_multiprocs_no_crossings(check_process, 2032 multiprocess, 2033 evaluator, 2034 quick, 2035 options) 2036 2037 if "used_lorentz" not in evaluator.stored_quantities: 2038 evaluator.stored_quantities["used_lorentz"] = [] 2039 2040 if multiprocess.get('perturbation_couplings')!=[] and not reuse: 2041 # Clean temporary folders created for the running of the loop processes 2042 clean_up(output_path) 2043 2044 return results, evaluator.stored_quantities["used_lorentz"] 2045 2046 elif isinstance(processes, base_objects.Process): 2047 processes = base_objects.ProcessList([processes]) 2048 elif isinstance(processes, base_objects.ProcessList): 2049 pass 2050 else: 2051 raise InvalidCmd("processes is of non-supported format") 2052 2053 if not processes: 2054 raise InvalidCmd("No processes given") 2055 2056 model = processes[0].get('model') 2057 2058 # Initialize matrix element evaluation 2059 if processes[0].get('perturbation_couplings')==[]: 2060 evaluator = MatrixElementEvaluator(model, param_card, 2061 auth_skipping = True, reuse = False, cmd = cmd) 2062 else: 2063 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir, 2064 model=model,param_card=param_card, 2065 auth_skipping = True, reuse = False, 2066 output_path=output_path, cmd = cmd) 2067 2068 # Keep track of tested processes, matrix elements, color and already 2069 # initiated Lorentz routines, to reuse as much as possible 2070 sorted_ids = [] 2071 comparison_results = [] 2072 2073 # Check process by process 2074 for process in processes: 2075 2076 # Check if we already checked process 2077 if check_already_checked([l.get('id') for l in process.get('legs') if \ 2078 not l.get('state')], 2079 [l.get('id') for l in process.get('legs') if \ 2080 l.get('state')], 2081 sorted_ids, process, model): 2082 continue 2083 # Get process result 2084 res = check_process(process, evaluator, quick, options) 2085 if res: 2086 comparison_results.append(res) 2087 2088 if "used_lorentz" not in evaluator.stored_quantities: 2089 evaluator.stored_quantities["used_lorentz"] = [] 2090 2091 if processes[0].get('perturbation_couplings')!=[] and not reuse: 2092 # Clean temporary folders created for the running of the loop processes 2093 clean_up(output_path) 2094 2095 return comparison_results, evaluator.stored_quantities["used_lorentz"]
2096
2097 -def check_process(process, evaluator, quick, options):
2098 """Check the helas calls for a process by generating the process 2099 using all different permutations of the process legs (or, if 2100 quick, use a subset of permutations), and check that the matrix 2101 element is invariant under this.""" 2102 2103 model = process.get('model') 2104 2105 # Ensure that leg numbers are set 2106 for i, leg in enumerate(process.get('legs')): 2107 leg.set('number', i+1) 2108 2109 logger.info("Checking crossings of %s" % \ 2110 process.nice_string().replace('Process:', 'process')) 2111 2112 process_matrix_elements = [] 2113 2114 # For quick checks, only test twp permutations with leg "1" in 2115 # each position 2116 if quick: 2117 leg_positions = [[] for leg in process.get('legs')] 2118 quick = range(1,len(process.get('legs')) + 1) 2119 2120 values = [] 2121 2122 # Now, generate all possible permutations of the legs 2123 number_checked=0 2124 for legs in itertools.permutations(process.get('legs')): 2125 2126 order = [l.get('number') for l in legs] 2127 if quick: 2128 found_leg = True 2129 for num in quick: 2130 # Only test one permutation for each position of the 2131 # specified legs 2132 leg_position = legs.index([l for l in legs if \ 2133 l.get('number') == num][0]) 2134 2135 if not leg_position in leg_positions[num-1]: 2136 found_leg = False 2137 leg_positions[num-1].append(leg_position) 2138 2139 if found_leg: 2140 continue 2141 2142 # Further limit the total number of permutations checked to 3 for 2143 # loop processes. 2144 if quick and process.get('perturbation_couplings') and number_checked >3: 2145 continue 2146 2147 legs = base_objects.LegList(legs) 2148 2149 if order != range(1,len(legs) + 1): 2150 logger.info("Testing permutation: %s" % \ 2151 order) 2152 2153 newproc = copy.copy(process) 2154 newproc.set('legs',legs) 2155 2156 # Generate the amplitude for this process 2157 try: 2158 if newproc.get('perturbation_couplings')==[]: 2159 amplitude = diagram_generation.Amplitude(newproc) 2160 else: 2161 # Change the cutting method every two times. 2162 loop_base_objects.cutting_method = 'optimal' if \ 2163 number_checked%2 == 0 else 'default' 2164 amplitude = loop_diagram_generation.LoopAmplitude(newproc) 2165 except InvalidCmd: 2166 result=False 2167 else: 2168 result = amplitude.get('diagrams') 2169 # Make sure to re-initialize the cutting method to the original one. 2170 loop_base_objects.cutting_method = 'optimal' 2171 2172 if not result: 2173 # This process has no diagrams; go to next process 2174 logging.info("No diagrams for %s" % \ 2175 process.nice_string().replace('Process', 'process')) 2176 break 2177 2178 if order == range(1,len(legs) + 1): 2179 # Generate phase space point to use 2180 p, w_rambo = evaluator.get_momenta(process, options) 2181 2182 # Generate the HelasMatrixElement for the process 2183 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude): 2184 matrix_element = helas_objects.HelasMatrixElement(amplitude, 2185 gen_color=False) 2186 else: 2187 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude, 2188 optimized_output=evaluator.loop_optimized_output) 2189 2190 # The loop diagrams are always the same in the basis, so that the 2191 # LoopHelasMatrixElement always look alike. One needs to consider 2192 # the crossing no matter what then. 2193 if amplitude.get('process').get('has_born'): 2194 # But the born diagrams will change depending on the order of the 2195 # particles in the process definition 2196 if matrix_element in process_matrix_elements: 2197 # Exactly the same matrix element has been tested 2198 # for other permutation of same process 2199 continue 2200 2201 process_matrix_elements.append(matrix_element) 2202 2203 res = evaluator.evaluate_matrix_element(matrix_element, p = p, 2204 options=options) 2205 if res == None: 2206 break 2207 2208 values.append(res[0]) 2209 number_checked += 1 2210 2211 # Check if we failed badly (1% is already bad) - in that 2212 # case done for this process 2213 if abs(max(values)) + abs(min(values)) > 0 and \ 2214 2 * abs(max(values) - min(values)) / \ 2215 (abs(max(values)) + abs(min(values))) > 0.01: 2216 break 2217 2218 # Check if process was interrupted 2219 if not values: 2220 return None 2221 2222 # Done with this process. Collect values, and store 2223 # process and momenta 2224 diff = 0 2225 if abs(max(values)) + abs(min(values)) > 0: 2226 diff = 2* abs(max(values) - min(values)) / \ 2227 (abs(max(values)) + abs(min(values))) 2228 2229 # be more tolerant with loop processes 2230 if process.get('perturbation_couplings'): 2231 passed = diff < 1.e-5 2232 else: 2233 passed = diff < 1.e-8 2234 2235 return {"process": process, 2236 "momenta": p, 2237 "values": values, 2238 "difference": diff, 2239 "passed": passed}
2240
2241 -def clean_up(mg_root):
2242 """Clean-up the possible left-over outputs from 'evaluate_matrix element' of 2243 the LoopMatrixEvaluator (when its argument proliferate is set to true). """ 2244 2245 if mg_root is None: 2246 pass 2247 2248 directories = glob.glob(pjoin(mg_root, '%s*'%temp_dir_prefix)) 2249 if directories != []: 2250 logger.debug("Cleaning temporary %s* check runs."%temp_dir_prefix) 2251 for dir in directories: 2252 # For safety make sure that the directory contains a folder SubProcesses 2253 if os.path.isdir(pjoin(dir,'SubProcesses')): 2254 shutil.rmtree(dir)
2255
2256 -def format_output(output,format):
2257 """ Return a string for 'output' with the specified format. If output is 2258 None, it returns 'NA'.""" 2259 2260 if output!=None: 2261 return format%output 2262 else: 2263 return 'NA'
2264
2265 -def output_profile(myprocdef, stability, timing, output_path, reusing=False):
2266 """Present the results from a timing and stability consecutive check""" 2267 2268 opt = timing['loop_optimized_output'] 2269 2270 text = 'Timing result for the '+('optimized' if opt else 'default')+\ 2271 ' output:\n' 2272 text += output_timings(myprocdef,timing) 2273 2274 text += '\nStability result for the '+('optimized' if opt else 'default')+\ 2275 ' output:\n' 2276 text += output_stability(stability,output_path, reusing=reusing) 2277 2278 mode = 'optimized' if opt else 'default' 2279 logFilePath = pjoin(output_path, 'profile_%s_%s.log'\ 2280 %(mode,stability['Process'].shell_string())) 2281 logFile = open(logFilePath, 'w') 2282 logFile.write(text) 2283 logFile.close() 2284 logger.info('Log of this profile check was output to file %s'\ 2285 %str(logFilePath)) 2286 return text
2287
2288 -def output_stability(stability, output_path, reusing=False):
2289 """Present the result of a stability check in a nice format. 2290 The full info is printed out in 'Stability_result_<proc_shell_string>.dat' 2291 under the MadGraph5_aMC@NLO root folder (output_path)""" 2292 2293 def accuracy(eval_list): 2294 """ Compute the accuracy from different evaluations.""" 2295 return (2.0*(max(eval_list)-min(eval_list))/ 2296 abs(max(eval_list)+min(eval_list)))
2297 2298 def best_estimate(eval_list): 2299 """ Returns the best estimate from different evaluations.""" 2300 return (max(eval_list)+min(eval_list))/2.0 2301 2302 def loop_direction_test_power(eval_list): 2303 """ Computes the loop direction test power P is computed as follow: 2304 P = accuracy(loop_dir_test) / accuracy(all_test) 2305 So that P is large if the loop direction test is effective. 2306 The tuple returned is (log(median(P)),log(min(P)),frac) 2307 where frac is the fraction of events with powers smaller than -3 2308 which means events for which the reading direction test shows an 2309 accuracy three digits higher than it really is according to the other 2310 tests.""" 2311 powers=[] 2312 for eval in eval_list: 2313 loop_dir_evals = [eval['CTModeA'],eval['CTModeB']] 2314 # CTModeA is the reference so we keep it in too 2315 other_evals = [eval[key] for key in eval.keys() if key not in \ 2316 ['CTModeB','Accuracy']] 2317 if accuracy(other_evals)!=0.0 and accuracy(loop_dir_evals)!=0.0: 2318 powers.append(accuracy(loop_dir_evals)/accuracy(other_evals)) 2319 2320 n_fail=0 2321 for p in powers: 2322 if (math.log(p)/math.log(10))<-3: 2323 n_fail+=1 2324 2325 if len(powers)==0: 2326 return (None,None,None) 2327 2328 return (math.log(median(powers))/math.log(10), 2329 math.log(min(powers))/math.log(10), 2330 n_fail/len(powers)) 2331 2332 def test_consistency(dp_eval_list, qp_eval_list): 2333 """ Computes the consistency test C from the DP and QP evaluations. 2334 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval) 2335 So a consistent test would have C as close to one as possible. 2336 The tuple returned is (log(median(C)),log(min(C)),log(max(C)))""" 2337 consistencies = [] 2338 for dp_eval, qp_eval in zip(dp_eval_list,qp_eval_list): 2339 dp_evals = [dp_eval[key] for key in dp_eval.keys() \ 2340 if key!='Accuracy'] 2341 qp_evals = [qp_eval[key] for key in qp_eval.keys() \ 2342 if key!='Accuracy'] 2343 if (abs(best_estimate(qp_evals)-best_estimate(dp_evals)))!=0.0 and \ 2344 accuracy(dp_evals)!=0.0: 2345 consistencies.append(accuracy(dp_evals)/(abs(\ 2346 best_estimate(qp_evals)-best_estimate(dp_evals)))) 2347 2348 if len(consistencies)==0: 2349 return (None,None,None) 2350 2351 return (math.log(median(consistencies))/math.log(10), 2352 math.log(min(consistencies))/math.log(10), 2353 math.log(max(consistencies))/math.log(10)) 2354 2355 def median(orig_list): 2356 """ Find the median of a sorted float list. """ 2357 list=copy.copy(orig_list) 2358 list.sort() 2359 if len(list)%2==0: 2360 return (list[int((len(list)/2)-1)]+list[int(len(list)/2)])/2.0 2361 else: 2362 return list[int((len(list)-1)/2)] 2363 2364 # Define shortcut 2365 f = format_output 2366 2367 opt = stability['loop_optimized_output'] 2368 2369 mode = 'optimized' if opt else 'default' 2370 process = stability['Process'] 2371 res_str = "Stability checking for %s (%s mode)\n"\ 2372 %(process.nice_string()[9:],mode) 2373 2374 logFile = open(pjoin(output_path, 'stability_%s_%s.log'\ 2375 %(mode,process.shell_string())), 'w') 2376 2377 logFile.write('Stability check results\n\n') 2378 logFile.write(res_str) 2379 data_plot_dict={} 2380 accuracy_dict={} 2381 nPSmax=0 2382 max_acc=0.0 2383 min_acc=1.0 2384 if stability['Stability']: 2385 toolnames= stability['Stability'].keys() 2386 toolnamestr=" | ".join(tn+ 2387 ''.join([' ']*(10-len(tn))) for tn in toolnames) 2388 DP_stability = [[eval['Accuracy'] for eval in stab['DP_stability']] \ 2389 for key,stab in stability['Stability'].items()] 2390 med_dp_stab_str=" | ".join([f(median(dp_stab),'%.2e ') for dp_stab in DP_stability]) 2391 min_dp_stab_str=" | ".join([f(min(dp_stab),'%.2e ') for dp_stab in DP_stability]) 2392 max_dp_stab_str=" | ".join([f(max(dp_stab),'%.2e ') for dp_stab in DP_stability]) 2393 UPS = [stab['Unstable_PS_points'] for key,stab in stability['Stability'].items()] 2394 res_str_i = "\n= Tool (DoublePrec for CT)....... %s\n"%toolnamestr 2395 len_PS=["%i"%len(evals)+\ 2396 ''.join([' ']*(10-len("%i"%len(evals)))) for evals in DP_stability] 2397 len_PS_str=" | ".join(len_PS) 2398 res_str_i += "|= Number of PS points considered %s\n"%len_PS_str 2399 res_str_i += "|= Median accuracy............... %s\n"%med_dp_stab_str 2400 res_str_i += "|= Max accuracy.................. %s\n"%min_dp_stab_str 2401 res_str_i += "|= Min accuracy.................. %s\n"%max_dp_stab_str 2402 pmedminlist=[] 2403 pfraclist=[] 2404 for key,stab in stability['Stability'].items(): 2405 (pmed,pmin,pfrac)=loop_direction_test_power(stab['DP_stability']) 2406 ldtest_str = "%s,%s"%(f(pmed,'%.1f'),f(pmin,'%.1f')) 2407 pfrac_str = f(pfrac,'%.2e') 2408 pmedminlist.append(ldtest_str+''.join([' ']*(10-len(ldtest_str)))) 2409 pfraclist.append(pfrac_str+''.join([' ']*(10-len(pfrac_str)))) 2410 pmedminlist_str=" | ".join(pmedminlist) 2411 pfraclist_str=" | ".join(pfraclist) 2412 res_str_i += "|= Overall DP loop_dir test power %s\n"%pmedminlist_str 2413 res_str_i += "|= Fraction of evts with power<-3 %s\n"%pfraclist_str 2414 len_UPS=["%i"%len(upup)+\ 2415 ''.join([' ']*(10-len("%i"%len(upup)))) for upup in UPS] 2416 len_UPS_str=" | ".join(len_UPS) 2417 res_str_i += "|= Number of Unstable PS points %s\n"%len_UPS_str 2418 res_str_i += \ 2419 """ 2420 = Legend for the statistics of the stability tests. (all log below ar log_10) 2421 The loop direction test power P is computed as follow: 2422 P = accuracy(loop_dir_test) / accuracy(all_other_test) 2423 So that log(P) is positive if the loop direction test is effective. 2424 The tuple printed out is (log(median(P)),log(min(P))) 2425 The consistency test C is computed when QP evaluations are available: 2426 C = accuracy(all_DP_test) / abs(best_QP_eval-best_DP_eval) 2427 So a consistent test would have log(C) as close to zero as possible. 2428 The tuple printed out is (log(median(C)),log(min(C)),log(max(C)))\n""" 2429 res_str+=res_str_i 2430 for key in stability['Stability'].keys(): 2431 toolname=key 2432 stab=stability['Stability'][key] 2433 DP_stability = [eval['Accuracy'] for eval in stab['DP_stability']] 2434 # Remember that an evaluation which did not require QP has an empty dictionary 2435 QP_stability = [eval['Accuracy'] if eval!={} else -1.0 for eval in \ 2436 stab['QP_stability']] 2437 nPS = len(DP_stability) 2438 if nPS>nPSmax:nPSmax=nPS 2439 UPS = stab['Unstable_PS_points'] 2440 UPS_stability_DP = [DP_stability[U[0]] for U in UPS] 2441 UPS_stability_QP = [QP_stability[U[0]] for U in UPS] 2442 EPS = stab['Exceptional_PS_points'] 2443 EPS_stability_DP = [DP_stability[E[0]] for E in EPS] 2444 EPS_stability_QP = [QP_stability[E[0]] for E in EPS] 2445 res_str_i = "" 2446 2447 if len(UPS)>0: 2448 res_str_i = "\nDetails of the %d/%d UPS encountered by %s\n"\ 2449 %(len(UPS),nPS,toolname) 2450 prefix = 'DP' if toolname=='CutTools' else '' 2451 res_str_i += "|= %s Median inaccuracy.......... %s\n"\ 2452 %(prefix,f(median(UPS_stability_DP),'%.2e')) 2453 res_str_i += "|= %s Max accuracy............... %s\n"\ 2454 %(prefix,f(min(UPS_stability_DP),'%.2e')) 2455 res_str_i += "|= %s Min accuracy............... %s\n"\ 2456 %(prefix,f(max(UPS_stability_DP),'%.2e')) 2457 (pmed,pmin,pfrac)=loop_direction_test_power(\ 2458 [stab['DP_stability'][U[0]] for U in UPS]) 2459 if toolname=='CutTools': 2460 res_str_i += "|= UPS DP loop_dir test power.... %s,%s\n"\ 2461 %(f(pmed,'%.1f'),f(pmin,'%.1f')) 2462 res_str_i += "|= UPS DP fraction with power<-3. %s\n"\ 2463 %f(pfrac,'%.2e') 2464 res_str_i += "|= QP Median accuracy............ %s\n"\ 2465 %f(median(UPS_stability_QP),'%.2e') 2466 res_str_i += "|= QP Max accuracy............... %s\n"\ 2467 %f(min(UPS_stability_QP),'%.2e') 2468 res_str_i += "|= QP Min accuracy............... %s\n"\ 2469 %f(max(UPS_stability_QP),'%.2e') 2470 (pmed,pmin,pfrac)=loop_direction_test_power(\ 2471 [stab['QP_stability'][U[0]] for U in UPS]) 2472 res_str_i += "|= UPS QP loop_dir test power.... %s,%s\n"\ 2473 %(f(pmed,'%.1f'),f(pmin,'%.1f')) 2474 res_str_i += "|= UPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e') 2475 (pmed,pmin,pmax)=test_consistency(\ 2476 [stab['DP_stability'][U[0]] for U in UPS], 2477 [stab['QP_stability'][U[0]] for U in UPS]) 2478 res_str_i += "|= DP vs QP stab test consistency %s,%s,%s\n"\ 2479 %(f(pmed,'%.1f'),f(pmin,'%.1f'),f(pmax,'%.1f')) 2480 if len(EPS)==0: 2481 res_str_i += "= Number of Exceptional PS points : 0\n" 2482 if len(EPS)>0: 2483 res_str_i = "\nDetails of the %d/%d EPS encountered by %s\n"\ 2484 %(len(EPS),nPS,toolname) 2485 res_str_i += "|= DP Median accuracy............ %s\n"\ 2486 %f(median(EPS_stability_DP),'%.2e') 2487 res_str_i += "|= DP Max accuracy............... %s\n"\ 2488 %f(min(EPS_stability_DP),'%.2e') 2489 res_str_i += "|= DP Min accuracy............... %s\n"\ 2490 %f(max(EPS_stability_DP),'%.2e') 2491 pmed,pmin,pfrac=loop_direction_test_power(\ 2492 [stab['DP_stability'][E[0]] for E in EPS]) 2493 res_str_i += "|= EPS DP loop_dir test power.... %s,%s\n"\ 2494 %(f(pmed,'%.1f'),f(pmin,'%.1f')) 2495 res_str_i += "|= EPS DP fraction with power<-3. %s\n"\ 2496 %f(pfrac,'%.2e') 2497 res_str_i += "|= QP Median accuracy............ %s\n"\ 2498 %f(median(EPS_stability_QP),'%.2e') 2499 res_str_i += "|= QP Max accuracy............... %s\n"\ 2500 %f(min(EPS_stability_QP),'%.2e') 2501 res_str_i += "|= QP Min accuracy............... %s\n"\ 2502 %f(max(EPS_stability_QP),'%.2e') 2503 pmed,pmin,pfrac=loop_direction_test_power(\ 2504 [stab['QP_stability'][E[0]] for E in EPS]) 2505 res_str_i += "|= EPS QP loop_dir test power.... %s,%s\n"\ 2506 %(f(pmed,'%.1f'),f(pmin,'%.1f')) 2507 res_str_i += "|= EPS QP fraction with power<-3. %s\n"%f(pfrac,'%.2e') 2508 2509 logFile.write(res_str_i) 2510 2511 if len(EPS)>0: 2512 logFile.write('\nFull details of the %i EPS encountered by %s.\n'\ 2513 %(len(EPS),toolname)) 2514 for i, eps in enumerate(EPS): 2515 logFile.write('\nEPS #%i\n'%(i+1)) 2516 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \ 2517 for p in eps[1]])) 2518 logFile.write('\n DP accuracy : %.3e\n'%DP_stability[eps[0]]) 2519 logFile.write(' QP accuracy : %.3e\n'%QP_stability[eps[0]]) 2520 if len(UPS)>0: 2521 logFile.write('\nFull details of the %i UPS encountered by %s.\n'\ 2522 %(len(UPS),toolname)) 2523 for i, ups in enumerate(UPS): 2524 logFile.write('\nUPS #%i\n'%(i+1)) 2525 logFile.write('\n'.join([' '+' '.join(['%.16E'%pi for pi in p]) \ 2526 for p in ups[1]])) 2527 logFile.write('\n DP accuracy : %.3e\n'%DP_stability[ups[0]]) 2528 logFile.write(' QP accuracy : %.3e\n'%QP_stability[ups[0]]) 2529 2530 logFile.write('\nData entries for the stability plot.\n') 2531 logFile.write('First row is a maximal accuracy delta, second is the '+\ 2532 'fraction of events with DP accuracy worse than delta.\n\n') 2533 # Set the x-range so that it spans [10**-17,10**(min_digit_accuracy)] 2534 if max(DP_stability)>0.0: 2535 min_digit_acc=int(math.log(max(DP_stability))/math.log(10)) 2536 if min_digit_acc>=0: 2537 min_digit_acc = min_digit_acc+1 2538 accuracies=[10**(-17+(i/5.0)) for i in range(5*(17+min_digit_acc)+1)] 2539 else: 2540 res_str_i += '\nPerfect accuracy over all the trial PS points. No plot'+\ 2541 ' is output then.' 2542 logFile.write('Perfect accuracy over all the trial PS points.') 2543 res_str +=res_str_i 2544 continue 2545 2546 accuracy_dict[toolname]=accuracies 2547 if max(accuracies) > max_acc: max_acc=max(accuracies) 2548 if min(accuracies) < min_acc: min_acc=min(accuracies) 2549 data_plot=[] 2550 for acc in accuracies: 2551 data_plot.append(float(len([d for d in DP_stability if d>acc]))\ 2552 /float(len(DP_stability))) 2553 data_plot_dict[toolname]=data_plot 2554 2555 logFile.writelines('%.3e %.3e\n'%(accuracies[i], data_plot[i]) for i in \ 2556 range(len(accuracies))) 2557 logFile.write('\nList of accuracies recorded for the %i evaluations with %s\n'\ 2558 %(nPS,toolname)) 2559 logFile.write('First row is DP, second is QP (if available).\n\n') 2560 logFile.writelines('%.3e '%DP_stability[i]+('NA\n' if QP_stability[i]==-1.0 \ 2561 else '%.3e\n'%QP_stability[i]) for i in range(nPS)) 2562 res_str+=res_str_i 2563 logFile.close() 2564 res_str += "\n= Stability details of the run are output to the file"+\ 2565 " stability_%s_%s.log\n"%(mode,process.shell_string()) 2566 2567 # Bypass the plotting if the madgraph logger has a FileHandler (like it is 2568 # done in the check command acceptance test) because in this case it makes 2569 # no sense to plot anything. 2570 if any(isinstance(handler,logging.FileHandler) for handler in \ 2571 logging.getLogger('madgraph').handlers): 2572 return res_str 2573 2574 try: 2575 import matplotlib.pyplot as plt 2576 colorlist=['b','r','g','y'] 2577 for i,key in enumerate(data_plot_dict.keys()): 2578 color=colorlist[i] 2579 data_plot=data_plot_dict[key] 2580 accuracies=accuracy_dict[key] 2581 plt.plot(accuracies, data_plot, color=color, marker='', linestyle='-',\ 2582 label=key) 2583 plt.axis([min_acc,max_acc,\ 2584 10**(-int(math.log(nPSmax-0.5)/math.log(10))-1), 1]) 2585 plt.yscale('log') 2586 plt.xscale('log') 2587 plt.title('Stability plot for %s (%s mode, %d points)'%\ 2588 (process.nice_string()[9:],mode,nPSmax)) 2589 plt.ylabel('Fraction of events') 2590 plt.xlabel('Maximal precision') 2591 plt.legend() 2592 if not reusing: 2593 logger.info('Some stability statistics will be displayed once you '+\ 2594 'close the plot window') 2595 plt.show() 2596 else: 2597 fig_output_file = str(pjoin(output_path, 2598 'stability_plot_%s_%s.png'%(mode,process.shell_string()))) 2599 logger.info('Stability plot output to file %s. '%fig_output_file) 2600 plt.savefig(fig_output_file) 2601 return res_str 2602 except Exception as e: 2603 if isinstance(e, ImportError): 2604 res_str += "\n= Install matplotlib to get a "+\ 2605 "graphical display of the results of this check." 2606 else: 2607 res_str += "\n= Could not produce the stability plot because of "+\ 2608 "the following error: %s"%str(e) 2609 return res_str 2610
2611 -def output_timings(process, timings):
2612 """Present the result of a timings check in a nice format """ 2613 2614 # Define shortcut 2615 f = format_output 2616 loop_optimized_output = timings['loop_optimized_output'] 2617 2618 res_str = "%s \n"%process.nice_string() 2619 try: 2620 gen_total = timings['HELAS_MODEL_compilation']+\ 2621 timings['HelasDiagrams_generation']+\ 2622 timings['Process_output']+\ 2623 timings['Diagrams_generation']+\ 2624 timings['Process_compilation']+\ 2625 timings['Initialization'] 2626 except TypeError: 2627 gen_total = None 2628 res_str += "\n= Generation time total...... ========== %s\n"%f(gen_total,'%.3gs') 2629 res_str += "|= Diagrams generation....... %s\n"\ 2630 %f(timings['Diagrams_generation'],'%.3gs') 2631 res_str += "|= Helas Diagrams generation. %s\n"\ 2632 %f(timings['HelasDiagrams_generation'],'%.3gs') 2633 res_str += "|= Process output............ %s\n"\ 2634 %f(timings['Process_output'],'%.3gs') 2635 res_str += "|= HELAS+model compilation... %s\n"\ 2636 %f(timings['HELAS_MODEL_compilation'],'%.3gs') 2637 res_str += "|= Process compilation....... %s\n"\ 2638 %f(timings['Process_compilation'],'%.3gs') 2639 res_str += "|= Initialization............ %s\n"\ 2640 %f(timings['Initialization'],'%.3gs') 2641 2642 res_str += "\n= Helicity sum time / PSpoint ========== %.3gms\n"\ 2643 %(timings['run_unpolarized_total']*1000.0) 2644 if loop_optimized_output: 2645 coef_time=timings['run_unpolarized_coefs']*1000.0 2646 loop_time=(timings['run_unpolarized_total']-\ 2647 timings['run_unpolarized_coefs'])*1000.0 2648 total=coef_time+loop_time 2649 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\ 2650 %(coef_time,int(round(100.0*coef_time/total))) 2651 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\ 2652 %(loop_time,int(round(100.0*loop_time/total))) 2653 res_str += "\n= One helicity time / PSpoint ========== %.3gms\n"\ 2654 %(timings['run_polarized_total']*1000.0) 2655 if loop_optimized_output: 2656 coef_time=timings['run_polarized_coefs']*1000.0 2657 loop_time=(timings['run_polarized_total']-\ 2658 timings['run_polarized_coefs'])*1000.0 2659 total=coef_time+loop_time 2660 res_str += "|= Coefs. computation time... %.3gms (%d%%)\n"\ 2661 %(coef_time,int(round(100.0*coef_time/total))) 2662 res_str += "|= Loop evaluation (OPP) time %.3gms (%d%%)\n"\ 2663 %(loop_time,int(round(100.0*loop_time/total))) 2664 res_str += "\n= Miscellaneous ========================\n" 2665 res_str += "|= Number of hel. computed... %s/%s\n"\ 2666 %(f(timings['n_contrib_hel'],'%d'),f(timings['n_tot_hel'],'%d')) 2667 res_str += "|= Number of loop diagrams... %s\n"%f(timings['n_loops'],'%d') 2668 if loop_optimized_output: 2669 res_str += "|= Number of loop groups..... %s\n"\ 2670 %f(timings['n_loop_groups'],'%d') 2671 res_str += "|= Number of loop wfs........ %s\n"\ 2672 %f(timings['n_loop_wfs'],'%d') 2673 if timings['loop_wfs_ranks']!=None: 2674 for i, r in enumerate(timings['loop_wfs_ranks']): 2675 res_str += "||= # of loop wfs of rank %d.. %d\n"%(i,r) 2676 res_str += "|= Loading time (Color data). ~%.3gms\n"\ 2677 %(timings['Booting_time']*1000.0) 2678 res_str += "|= Maximum RAM usage (rss)... %s\n"\ 2679 %f(float(timings['ram_usage']/1000.0),'%.3gMb') 2680 res_str += "\n= Output disk size =====================\n" 2681 res_str += "|= Source directory sources.. %s\n"%f(timings['du_source'],'%sb') 2682 res_str += "|= Process sources........... %s\n"%f(timings['du_process'],'%sb') 2683 res_str += "|= Color and helicity data... %s\n"%f(timings['du_color'],'%sb') 2684 res_str += "|= Executable size........... %s\n"%f(timings['du_exe'],'%sb') 2685 2686 return res_str
2687
2688 -def output_comparisons(comparison_results):
2689 """Present the results of a comparison in a nice list format 2690 mode short: return the number of fail process 2691 """ 2692 proc_col_size = 17 2693 pert_coupl = comparison_results[0]['process']['perturbation_couplings'] 2694 if pert_coupl: 2695 process_header = "Process [virt="+" ".join(pert_coupl)+"]" 2696 else: 2697 process_header = "Process" 2698 2699 if len(process_header) + 1 > proc_col_size: 2700 proc_col_size = len(process_header) + 1 2701 2702 for proc in comparison_results: 2703 if len(proc['process'].base_string()) + 1 > proc_col_size: 2704 proc_col_size = len(proc['process'].base_string()) + 1 2705 2706 col_size = 18 2707 2708 pass_proc = 0 2709 fail_proc = 0 2710 no_check_proc = 0 2711 2712 failed_proc_list = [] 2713 no_check_proc_list = [] 2714 2715 res_str = fixed_string_length(process_header, proc_col_size) + \ 2716 fixed_string_length("Min element", col_size) + \ 2717 fixed_string_length("Max element", col_size) + \ 2718 fixed_string_length("Relative diff.", col_size) + \ 2719 "Result" 2720 2721 for result in comparison_results: 2722 proc = result['process'].base_string() 2723 values = result['values'] 2724 2725 if len(values) <= 1: 2726 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \ 2727 " * No permutations, process not checked *" 2728 no_check_proc += 1 2729 no_check_proc_list.append(result['process'].nice_string()) 2730 continue 2731 2732 passed = result['passed'] 2733 2734 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \ 2735 fixed_string_length("%1.10e" % min(values), col_size) + \ 2736 fixed_string_length("%1.10e" % max(values), col_size) + \ 2737 fixed_string_length("%1.10e" % result['difference'], 2738 col_size) 2739 if passed: 2740 pass_proc += 1 2741 res_str += "Passed" 2742 else: 2743 fail_proc += 1 2744 failed_proc_list.append(result['process'].nice_string()) 2745 res_str += "Failed" 2746 2747 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \ 2748 (pass_proc, pass_proc + fail_proc, 2749 fail_proc, pass_proc + fail_proc) 2750 2751 if fail_proc != 0: 2752 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list) 2753 if no_check_proc != 0: 2754 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list) 2755 2756 return res_str
2757
2758 -def fixed_string_length(mystr, length):
2759 """Helper function to fix the length of a string by cutting it 2760 or adding extra space.""" 2761 2762 if len(mystr) > length: 2763 return mystr[0:length] 2764 else: 2765 return mystr + " " * (length - len(mystr))
2766
2767 2768 #=============================================================================== 2769 # check_gauge 2770 #=============================================================================== 2771 -def check_gauge(processes, param_card = None,cuttools="", tir={}, reuse = False, 2772 options=None, output_path=None, cmd = FakeInterface()):
2773 """Check gauge invariance of the processes by using the BRS check. 2774 For one of the massless external bosons (e.g. gluon or photon), 2775 replace the polarization vector (epsilon_mu) with its momentum (p_mu) 2776 """ 2777 cmass_scheme = cmd.options['complex_mass_scheme'] 2778 if isinstance(processes, base_objects.ProcessDefinition): 2779 # Generate a list of unique processes 2780 # Extract IS and FS ids 2781 multiprocess = processes 2782 2783 model = multiprocess.get('model') 2784 # Initialize matrix element evaluation 2785 if multiprocess.get('perturbation_couplings')==[]: 2786 evaluator = MatrixElementEvaluator(model, param_card,cmd= cmd, 2787 auth_skipping = True, reuse = False) 2788 else: 2789 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir, 2790 cmd=cmd,model=model, param_card=param_card, 2791 auth_skipping = False, reuse = False, 2792 output_path=output_path) 2793 2794 if not cmass_scheme and multiprocess.get('perturbation_couplings')==[]: 2795 # Set all widths to zero for gauge check 2796 logger.info('Set All width to zero for non complex mass scheme checks') 2797 for particle in evaluator.full_model.get('particles'): 2798 if particle.get('width') != 'ZERO': 2799 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0. 2800 results = run_multiprocs_no_crossings(check_gauge_process, 2801 multiprocess, 2802 evaluator, 2803 options=options 2804 ) 2805 2806 if multiprocess.get('perturbation_couplings')!=[] and not reuse: 2807 # Clean temporary folders created for the running of the loop processes 2808 clean_up(output_path) 2809 2810 return results 2811 2812 elif isinstance(processes, base_objects.Process): 2813 processes = base_objects.ProcessList([processes]) 2814 elif isinstance(processes, base_objects.ProcessList): 2815 pass 2816 else: 2817 raise InvalidCmd("processes is of non-supported format") 2818 2819 assert processes, "No processes given" 2820 2821 model = processes[0].get('model') 2822 2823 # Initialize matrix element evaluation 2824 if processes[0].get('perturbation_couplings')==[]: 2825 evaluator = MatrixElementEvaluator(model, param_card, 2826 auth_skipping = True, reuse = False, 2827 cmd = cmd) 2828 else: 2829 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir, 2830 model=model, param_card=param_card, 2831 auth_skipping = False, reuse = False, 2832 output_path=output_path, cmd = cmd) 2833 comparison_results = [] 2834 comparison_explicit_flip = [] 2835 2836 # For each process, make sure we have set up leg numbers: 2837 for process in processes: 2838 # Check if we already checked process 2839 #if check_already_checked([l.get('id') for l in process.get('legs') if \ 2840 # not l.get('state')], 2841 ## [l.get('id') for l in process.get('legs') if \ 2842 # l.get('state')], 2843 # sorted_ids, process, model): 2844 # continue 2845 2846 # Get process result 2847 result = check_gauge_process(process, evaluator,options=options) 2848 if result: 2849 comparison_results.append(result) 2850 2851 if processes[0].get('perturbation_couplings')!=[] and not reuse: 2852 # Clean temporary folders created for the running of the loop processes 2853 clean_up(output_path) 2854 2855 return comparison_results
2856
2857 2858 -def check_gauge_process(process, evaluator, options=None):
2859 """Check gauge invariance for the process, unless it is already done.""" 2860 2861 model = process.get('model') 2862 2863 # Check that there are massless vector bosons in the process 2864 found_gauge = False 2865 for i, leg in enumerate(process.get('legs')): 2866 part = model.get_particle(leg.get('id')) 2867 if part.get('spin') == 3 and part.get('mass').lower() == 'zero': 2868 found_gauge = True 2869 break 2870 if not found_gauge: 2871 logger.info("No ward identity for %s" % \ 2872 process.nice_string().replace('Process', 'process')) 2873 # This process can't be checked 2874 return None 2875 2876 for i, leg in enumerate(process.get('legs')): 2877 leg.set('number', i+1) 2878 2879 logger.info("Checking ward identities for %s" % \ 2880 process.nice_string().replace('Process', 'process')) 2881 2882 legs = process.get('legs') 2883 # Generate a process with these legs 2884 # Generate the amplitude for this process 2885 try: 2886 if process.get('perturbation_couplings')==[]: 2887 amplitude = diagram_generation.Amplitude(process) 2888 else: 2889 amplitude = loop_diagram_generation.LoopAmplitude(process) 2890 except InvalidCmd: 2891 logging.info("No diagrams for %s" % \ 2892 process.nice_string().replace('Process', 'process')) 2893 return None 2894 if not amplitude.get('diagrams'): 2895 # This process has no diagrams; go to next process 2896 logging.info("No diagrams for %s" % \ 2897 process.nice_string().replace('Process', 'process')) 2898 return None 2899 # Generate the HelasMatrixElement for the process 2900 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude): 2901 matrix_element = helas_objects.HelasMatrixElement(amplitude, 2902 gen_color = False) 2903 else: 2904 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude, 2905 optimized_output=evaluator.loop_optimized_output) 2906 2907 #p, w_rambo = evaluator.get_momenta(process) 2908 2909 #MLOptions = {'ImprovePS':True,'ForceMP':True} 2910 2911 #brsvalue = evaluator.evaluate_matrix_element(matrix_element, p=p, gauge_check = True, 2912 # output='jamp',MLOptions=MLOptions) 2913 brsvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = True, 2914 output='jamp', options=options) 2915 2916 if not isinstance(amplitude,loop_diagram_generation.LoopAmplitude): 2917 matrix_element = helas_objects.HelasMatrixElement(amplitude, 2918 gen_color = False) 2919 2920 mvalue = evaluator.evaluate_matrix_element(matrix_element, gauge_check = False, 2921 output='jamp', options=options) 2922 2923 if mvalue and mvalue['m2']: 2924 return {'process':process,'value':mvalue,'brs':brsvalue}
2925
2926 -def output_gauge(comparison_results, output='text'):
2927 """Present the results of a comparison in a nice list format""" 2928 2929 proc_col_size = 17 2930 2931 pert_coupl = comparison_results[0]['process']['perturbation_couplings'] 2932 2933 # Of course, be more tolerant for loop processes 2934 if pert_coupl: 2935 threshold=1e-5 2936 else: 2937 threshold=1e-10 2938 2939 if pert_coupl: 2940 process_header = "Process [virt="+" ".join(pert_coupl)+"]" 2941 else: 2942 process_header = "Process" 2943 2944 if len(process_header) + 1 > proc_col_size: 2945 proc_col_size = len(process_header) + 1 2946 2947 for one_comp in comparison_results: 2948 proc = one_comp['process'].base_string() 2949 mvalue = one_comp['value'] 2950 brsvalue = one_comp['brs'] 2951 if len(proc) + 1 > proc_col_size: 2952 proc_col_size = len(proc) + 1 2953 2954 col_size = 18 2955 2956 pass_proc = 0 2957 fail_proc = 0 2958 2959 failed_proc_list = [] 2960 no_check_proc_list = [] 2961 2962 res_str = fixed_string_length(process_header, proc_col_size) + \ 2963 fixed_string_length("matrix", col_size) + \ 2964 fixed_string_length("BRS", col_size) + \ 2965 fixed_string_length("ratio", col_size) + \ 2966 "Result" 2967 2968 for one_comp in comparison_results: 2969 proc = one_comp['process'].base_string() 2970 mvalue = one_comp['value'] 2971 brsvalue = one_comp['brs'] 2972 ratio = (abs(brsvalue['m2'])/abs(mvalue['m2'])) 2973 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \ 2974 fixed_string_length("%1.10e" % mvalue['m2'], col_size)+ \ 2975 fixed_string_length("%1.10e" % brsvalue['m2'], col_size)+ \ 2976 fixed_string_length("%1.10e" % ratio, col_size) 2977 2978 if ratio > threshold: 2979 fail_proc += 1 2980 proc_succeed = False 2981 failed_proc_list.append(proc) 2982 res_str += "Failed" 2983 else: 2984 pass_proc += 1 2985 proc_succeed = True 2986 res_str += "Passed" 2987 2988 #check all the JAMP 2989 # loop over jamp 2990 # This is not available for loop processes where the jamp list returned 2991 # is empty. 2992 if len(mvalue['jamp'])!=0: 2993 for k in range(len(mvalue['jamp'][0])): 2994 m_sum = 0 2995 brs_sum = 0 2996 # loop over helicity 2997 for j in range(len(mvalue['jamp'])): 2998 #values for the different lorentz boost 2999 m_sum += abs(mvalue['jamp'][j][k])**2 3000 brs_sum += abs(brsvalue['jamp'][j][k])**2 3001 3002 # Compare the different helicity 3003 if not m_sum: 3004 continue 3005 ratio = abs(brs_sum) / abs(m_sum) 3006 3007 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \ 3008 fixed_string_length("%1.10e" % m_sum, col_size) + \ 3009 fixed_string_length("%1.10e" % brs_sum, col_size) + \ 3010 fixed_string_length("%1.10e" % ratio, col_size) 3011 3012 if ratio > 1e-15: 3013 if not len(failed_proc_list) or failed_proc_list[-1] != proc: 3014 fail_proc += 1 3015 pass_proc -= 1 3016 failed_proc_list.append(proc) 3017 res_str += tmp_str + "Failed" 3018 elif not proc_succeed: 3019 res_str += tmp_str + "Passed" 3020 3021 3022 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \ 3023 (pass_proc, pass_proc + fail_proc, 3024 fail_proc, pass_proc + fail_proc) 3025 3026 if fail_proc != 0: 3027 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list) 3028 3029 if output=='text': 3030 return res_str 3031 else: 3032 return fail_proc
3033 #===============================================================================
3034 # check_lorentz 3035 #=============================================================================== 3036 -def check_lorentz(processes, param_card = None,cuttools="", tir={}, options=None, \ 3037 reuse = False, output_path=None, cmd = FakeInterface()):
3038 """ Check if the square matrix element (sum over helicity) is lorentz 3039 invariant by boosting the momenta with different value.""" 3040 3041 cmass_scheme = cmd.options['complex_mass_scheme'] 3042 if isinstance(processes, base_objects.ProcessDefinition): 3043 # Generate a list of unique processes 3044 # Extract IS and FS ids 3045 multiprocess = processes 3046 model = multiprocess.get('model') 3047 # Initialize matrix element evaluation 3048 if multiprocess.get('perturbation_couplings')==[]: 3049 evaluator = MatrixElementEvaluator(model, 3050 cmd= cmd, auth_skipping = False, reuse = True) 3051 else: 3052 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir, 3053 model=model, auth_skipping = False, reuse = True, 3054 output_path=output_path, cmd = cmd) 3055 3056 if not cmass_scheme and processes.get('perturbation_couplings')==[]: 3057 # Set all widths to zero for lorentz check 3058 logger.info('Set All width to zero for non complex mass scheme checks') 3059 for particle in evaluator.full_model.get('particles'): 3060 if particle.get('width') != 'ZERO': 3061 evaluator.full_model.get('parameter_dict')[\ 3062 particle.get('width')] = 0. 3063 3064 results = run_multiprocs_no_crossings(check_lorentz_process, 3065 multiprocess, 3066 evaluator, 3067 options=options) 3068 3069 if multiprocess.get('perturbation_couplings')!=[] and not reuse: 3070 # Clean temporary folders created for the running of the loop processes 3071 clean_up(output_path) 3072 3073 return results 3074 3075 elif isinstance(processes, base_objects.Process): 3076 processes = base_objects.ProcessList([processes]) 3077 elif isinstance(processes, base_objects.ProcessList): 3078 pass 3079 else: 3080 raise InvalidCmd("processes is of non-supported format") 3081 3082 assert processes, "No processes given" 3083 3084 model = processes[0].get('model') 3085 3086 # Initialize matrix element evaluation 3087 if processes[0].get('perturbation_couplings')==[]: 3088 evaluator = MatrixElementEvaluator(model, param_card, 3089 auth_skipping = False, reuse = True, 3090 cmd=cmd) 3091 else: 3092 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools, tir_dir=tir, 3093 model=model,param_card=param_card, 3094 auth_skipping = False, reuse = True, 3095 output_path=output_path, cmd = cmd) 3096 3097 comparison_results = [] 3098 3099 # For each process, make sure we have set up leg numbers: 3100 for process in processes: 3101 # Check if we already checked process 3102 #if check_already_checked([l.get('id') for l in process.get('legs') if \ 3103 # not l.get('state')], 3104 # [l.get('id') for l in process.get('legs') if \ 3105 # l.get('state')], 3106 # sorted_ids, process, model): 3107 # continue 3108 3109 # Get process result 3110 result = check_lorentz_process(process, evaluator,options=options) 3111 if result: 3112 comparison_results.append(result) 3113 3114 if processes[0].get('perturbation_couplings')!=[] and not reuse: 3115 # Clean temporary folders created for the running of the loop processes 3116 clean_up(output_path) 3117 3118 return comparison_results
3119
3120 3121 -def check_lorentz_process(process, evaluator,options=None):
3122 """Check gauge invariance for the process, unless it is already done.""" 3123 3124 amp_results = [] 3125 model = process.get('model') 3126 3127 for i, leg in enumerate(process.get('legs')): 3128 leg.set('number', i+1) 3129 3130 logger.info("Checking lorentz transformations for %s" % \ 3131 process.nice_string().replace('Process:', 'process')) 3132 3133 legs = process.get('legs') 3134 # Generate a process with these legs 3135 # Generate the amplitude for this process 3136 try: 3137 if process.get('perturbation_couplings')==[]: 3138 amplitude = diagram_generation.Amplitude(process) 3139 else: 3140 amplitude = loop_diagram_generation.LoopAmplitude(process) 3141 except InvalidCmd: 3142 logging.info("No diagrams for %s" % \ 3143 process.nice_string().replace('Process', 'process')) 3144 return None 3145 3146 if not amplitude.get('diagrams'): 3147 # This process has no diagrams; go to next process 3148 logging.info("No diagrams for %s" % \ 3149 process.nice_string().replace('Process', 'process')) 3150 return None 3151 3152 # Generate the HelasMatrixElement for the process 3153 p, w_rambo = evaluator.get_momenta(process, options) 3154 3155 # Generate the HelasMatrixElement for the process 3156 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude): 3157 matrix_element = helas_objects.HelasMatrixElement(amplitude, 3158 gen_color = True) 3159 else: 3160 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude, 3161 optimized_output = evaluator.loop_optimized_output) 3162 3163 MLOptions = {'ImprovePS':True,'ForceMP':True} 3164 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude): 3165 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp', 3166 auth_skipping = True, options=options) 3167 else: 3168 data = evaluator.evaluate_matrix_element(matrix_element, p=p, output='jamp', 3169 auth_skipping = True, PS_name = 'original', MLOptions=MLOptions, 3170 options = options) 3171 3172 if data and data['m2']: 3173 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude): 3174 results = [data] 3175 else: 3176 results = [('Original evaluation',data)] 3177 else: 3178 return {'process':process, 'results':'pass'} 3179 3180 # The boosts are not precise enough for the loop evaluations and one need the 3181 # fortran improve_ps function of MadLoop to work. So we only consider the 3182 # boosts along the z directions for loops or simple rotations. 3183 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude): 3184 for boost in range(1,4): 3185 boost_p = boost_momenta(p, boost) 3186 results.append(evaluator.evaluate_matrix_element(matrix_element, 3187 p=boost_p,output='jamp')) 3188 else: 3189 # We only consider the rotations around the z axis so to have the 3190 boost_p = boost_momenta(p, 3) 3191 results.append(('Z-axis boost', 3192 evaluator.evaluate_matrix_element(matrix_element, options=options, 3193 p=boost_p, PS_name='zBoost', output='jamp',MLOptions = MLOptions))) 3194 # We add here also the boost along x and y for reference. In the output 3195 # of the check, it is now clearly stated that MadLoop improve_ps script 3196 # will not work for them. The momenta read from event file are not 3197 # precise enough so these x/yBoost checks are omitted. 3198 if not options['events']: 3199 boost_p = boost_momenta(p, 1) 3200 results.append(('X-axis boost', 3201 evaluator.evaluate_matrix_element(matrix_element, options=options, 3202 p=boost_p, PS_name='xBoost', output='jamp',MLOptions = MLOptions))) 3203 boost_p = boost_momenta(p, 2) 3204 results.append(('Y-axis boost', 3205 evaluator.evaluate_matrix_element(matrix_element,options=options, 3206 p=boost_p, PS_name='yBoost', output='jamp',MLOptions = MLOptions))) 3207 # We only consider the rotations around the z axis so to have the 3208 # improve_ps fortran routine work. 3209 rot_p = [[pm[0],-pm[2],pm[1],pm[3]] for pm in p] 3210 results.append(('Z-axis pi/2 rotation', 3211 evaluator.evaluate_matrix_element(matrix_element,options=options, 3212 p=rot_p, PS_name='Rotation1', output='jamp',MLOptions = MLOptions))) 3213 # Now a pi/4 rotation around the z-axis 3214 sq2 = math.sqrt(2.0) 3215 rot_p = [[pm[0],(pm[1]-pm[2])/sq2,(pm[1]+pm[2])/sq2,pm[3]] for pm in p] 3216 results.append(('Z-axis pi/4 rotation', 3217 evaluator.evaluate_matrix_element(matrix_element,options=options, 3218 p=rot_p, PS_name='Rotation2', output='jamp',MLOptions = MLOptions))) 3219 3220 3221 return {'process': process, 'results': results}
3222
3223 #=============================================================================== 3224 # check_gauge 3225 #=============================================================================== 3226 -def check_unitary_feynman(processes_unit, processes_feynm, param_card=None, 3227 options=None, tir={}, output_path=None, 3228 cuttools="", reuse=False, cmd = FakeInterface()):
3229 """Check gauge invariance of the processes by flipping 3230 the gauge of the model 3231 """ 3232 3233 mg_root = cmd._mgme_dir 3234 3235 cmass_scheme = cmd.options['complex_mass_scheme'] 3236 3237 if isinstance(processes_unit, base_objects.ProcessDefinition): 3238 # Generate a list of unique processes 3239 # Extract IS and FS ids 3240 multiprocess_unit = processes_unit 3241 model = multiprocess_unit.get('model') 3242 3243 # Initialize matrix element evaluation 3244 # For the unitary gauge, open loops should not be used 3245 loop_optimized_bu = cmd.options['loop_optimized_output'] 3246 if processes_unit.get('squared_orders'): 3247 if processes_unit.get('perturbation_couplings') in [[],['QCD']]: 3248 cmd.options['loop_optimized_output'] = True 3249 else: 3250 raise InvalidCmd("The gauge test cannot be performed for "+ 3251 " a process with more than QCD corrections and which"+ 3252 " specifies squared order constraints.") 3253 else: 3254 cmd.options['loop_optimized_output'] = False 3255 3256 aloha.unitary_gauge = True 3257 if processes_unit.get('perturbation_couplings')==[]: 3258 evaluator = MatrixElementEvaluator(model, param_card, 3259 cmd=cmd,auth_skipping = False, reuse = True) 3260 else: 3261 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir, 3262 cmd=cmd, model=model, 3263 param_card=param_card, 3264 auth_skipping = False, 3265 output_path=output_path, 3266 reuse = False) 3267 if not cmass_scheme and multiprocess_unit.get('perturbation_couplings')==[]: 3268 logger.info('Set All width to zero for non complex mass scheme checks') 3269 for particle in evaluator.full_model.get('particles'): 3270 if particle.get('width') != 'ZERO': 3271 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0. 3272 3273 output_u = run_multiprocs_no_crossings(get_value, 3274 multiprocess_unit, 3275 evaluator, 3276 options=options) 3277 3278 clean_added_globals(ADDED_GLOBAL) 3279 # Clear up previous run if checking loop output 3280 if processes_unit.get('perturbation_couplings')!=[]: 3281 clean_up(output_path) 3282 3283 momentum = {} 3284 for data in output_u: 3285 momentum[data['process']] = data['p'] 3286 3287 multiprocess_feynm = processes_feynm 3288 model = multiprocess_feynm.get('model') 3289 3290 # Initialize matrix element evaluation 3291 aloha.unitary_gauge = False 3292 # We could use the default output as well for Feynman, but it provides 3293 # an additional check 3294 cmd.options['loop_optimized_output'] = True 3295 if processes_feynm.get('perturbation_couplings')==[]: 3296 evaluator = MatrixElementEvaluator(model, param_card, 3297 cmd= cmd, auth_skipping = False, reuse = False) 3298 else: 3299 evaluator = LoopMatrixElementEvaluator(cuttools_dir=cuttools,tir_dir=tir, 3300 cmd= cmd, model=model, 3301 param_card=param_card, 3302 auth_skipping = False, 3303 output_path=output_path, 3304 reuse = False) 3305 3306 if not cmass_scheme and multiprocess_feynm.get('perturbation_couplings')==[]: 3307 # Set all widths to zero for gauge check 3308 for particle in evaluator.full_model.get('particles'): 3309 if particle.get('width') != 'ZERO': 3310 evaluator.full_model.get('parameter_dict')[particle.get('width')] = 0. 3311 3312 output_f = run_multiprocs_no_crossings(get_value, multiprocess_feynm, 3313 evaluator, momentum, 3314 options=options) 3315 output = [processes_unit] 3316 for data in output_f: 3317 local_dico = {} 3318 local_dico['process'] = data['process'] 3319 local_dico['value_feynm'] = data['value'] 3320 local_dico['value_unit'] = [d['value'] for d in output_u 3321 if d['process'] == data['process']][0] 3322 output.append(local_dico) 3323 3324 if processes_feynm.get('perturbation_couplings')!=[] and not reuse: 3325 # Clean temporary folders created for the running of the loop processes 3326 clean_up(output_path) 3327 3328 # Reset the original global variable loop_optimized_output. 3329 cmd.options['loop_optimized_output'] = loop_optimized_bu 3330 3331 return output 3332 # elif isinstance(processes, base_objects.Process): 3333 # processes = base_objects.ProcessList([processes]) 3334 # elif isinstance(processes, base_objects.ProcessList): 3335 # pass 3336 else: 3337 raise InvalidCmd("processes is of non-supported format")
3338
3339 -def get_value(process, evaluator, p=None, options=None):
3340 """Return the value/momentum for a phase space point""" 3341 3342 for i, leg in enumerate(process.get('legs')): 3343 leg.set('number', i+1) 3344 3345 logger.info("Checking %s in %s gauge" % \ 3346 ( process.nice_string().replace('Process:', 'process'), 3347 'unitary' if aloha.unitary_gauge else 'feynman')) 3348 3349 legs = process.get('legs') 3350 # Generate a process with these legs 3351 # Generate the amplitude for this process 3352 try: 3353 if process.get('perturbation_couplings')==[]: 3354 amplitude = diagram_generation.Amplitude(process) 3355 else: 3356 amplitude = loop_diagram_generation.LoopAmplitude(process) 3357 except InvalidCmd: 3358 logging.info("No diagrams for %s" % \ 3359 process.nice_string().replace('Process', 'process')) 3360 return None 3361 3362 if not amplitude.get('diagrams'): 3363 # This process has no diagrams; go to next process 3364 logging.info("No diagrams for %s" % \ 3365 process.nice_string().replace('Process', 'process')) 3366 return None 3367 3368 if not p: 3369 # Generate phase space point to use 3370 p, w_rambo = evaluator.get_momenta(process, options) 3371 3372 # Generate the HelasMatrixElement for the process 3373 if not isinstance(amplitude, loop_diagram_generation.LoopAmplitude): 3374 matrix_element = helas_objects.HelasMatrixElement(amplitude, 3375 gen_color = True) 3376 else: 3377 matrix_element = loop_helas_objects.LoopHelasMatrixElement(amplitude, 3378 gen_color = True, optimized_output = evaluator.loop_optimized_output) 3379 3380 mvalue = evaluator.evaluate_matrix_element(matrix_element, p=p, 3381 output='jamp',options=options) 3382 3383 if mvalue and mvalue['m2']: 3384 return {'process':process.base_string(),'value':mvalue,'p':p}
3385
3386 -def output_lorentz_inv_loop(comparison_results, output='text'):
3387 """Present the results of a comparison in a nice list format for loop 3388 processes. It detail the results from each lorentz transformation performed. 3389 """ 3390 3391 process = comparison_results[0]['process'] 3392 results = comparison_results[0]['results'] 3393 # Rotations do not change the reference vector for helicity projection, 3394 # the loop ME are invarariant under them with a relatively good accuracy. 3395 threshold_rotations = 1e-6 3396 # This is typically not the case for the boosts when one cannot really 3397 # expect better than 1e-5. It turns out that this is even true in 3398 # quadruple precision, for an unknown reason so far. 3399 threshold_boosts = 1e-3 3400 res_str = "%s" % process.base_string() 3401 3402 transfo_col_size = 17 3403 col_size = 18 3404 transfo_name_header = 'Transformation name' 3405 3406 if len(transfo_name_header) + 1 > transfo_col_size: 3407 transfo_col_size = len(transfo_name_header) + 1 3408 3409 for transfo_name, value in results: 3410 if len(transfo_name) + 1 > transfo_col_size: 3411 transfo_col_size = len(transfo_name) + 1 3412 3413 res_str += '\n' + fixed_string_length(transfo_name_header, transfo_col_size) + \ 3414 fixed_string_length("Value", col_size) + \ 3415 fixed_string_length("Relative diff.", col_size) + "Result" 3416 3417 ref_value = results[0] 3418 res_str += '\n' + fixed_string_length(ref_value[0], transfo_col_size) + \ 3419 fixed_string_length("%1.10e" % ref_value[1]['m2'], col_size) 3420 # Now that the reference value has been recuperated, we can span all the 3421 # other evaluations 3422 all_pass = True 3423 for res in results[1:]: 3424 threshold = threshold_boosts if 'BOOST' in res[0].upper() else \ 3425 threshold_rotations 3426 rel_diff = abs((ref_value[1]['m2']-res[1]['m2'])\ 3427 /((ref_value[1]['m2']+res[1]['m2'])/2.0)) 3428 this_pass = rel_diff <= threshold 3429 if not this_pass: 3430 all_pass = False 3431 res_str += '\n' + fixed_string_length(res[0], transfo_col_size) + \ 3432 fixed_string_length("%1.10e" % res[1]['m2'], col_size) + \ 3433 fixed_string_length("%1.10e" % rel_diff, col_size) + \ 3434 ("Passed" if this_pass else "Failed") 3435 if all_pass: 3436 res_str += '\n' + 'Summary: passed' 3437 else: 3438 res_str += '\n' + 'Summary: failed' 3439 3440 return res_str
3441
3442 -def output_lorentz_inv(comparison_results, output='text'):
3443 """Present the results of a comparison in a nice list format 3444 if output='fail' return the number of failed process -- for test-- 3445 """ 3446 3447 # Special output for loop processes 3448 if comparison_results[0]['process']['perturbation_couplings']!=[]: 3449 return output_lorentz_inv_loop(comparison_results, output) 3450 3451 proc_col_size = 17 3452 3453 threshold=1e-10 3454 process_header = "Process" 3455 3456 if len(process_header) + 1 > proc_col_size: 3457 proc_col_size = len(process_header) + 1 3458 3459 for proc, values in comparison_results: 3460 if len(proc) + 1 > proc_col_size: 3461 proc_col_size = len(proc) + 1 3462 3463 col_size = 18 3464 3465 pass_proc = 0 3466 fail_proc = 0 3467 no_check_proc = 0 3468 3469 failed_proc_list = [] 3470 no_check_proc_list = [] 3471 3472 res_str = fixed_string_length(process_header, proc_col_size) + \ 3473 fixed_string_length("Min element", col_size) + \ 3474 fixed_string_length("Max element", col_size) + \ 3475 fixed_string_length("Relative diff.", col_size) + \ 3476 "Result" 3477 3478 for one_comp in comparison_results: 3479 proc = one_comp['process'].base_string() 3480 data = one_comp['results'] 3481 3482 if data == 'pass': 3483 no_check_proc += 1 3484 no_check_proc_list.append(proc) 3485 continue 3486 3487 values = [data[i]['m2'] for i in range(len(data))] 3488 3489 min_val = min(values) 3490 max_val = max(values) 3491 diff = (max_val - min_val) / abs(max_val) 3492 3493 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \ 3494 fixed_string_length("%1.10e" % min_val, col_size) + \ 3495 fixed_string_length("%1.10e" % max_val, col_size) + \ 3496 fixed_string_length("%1.10e" % diff, col_size) 3497 3498 if diff < threshold: 3499 pass_proc += 1 3500 proc_succeed = True 3501 res_str += "Passed" 3502 else: 3503 fail_proc += 1 3504 proc_succeed = False 3505 failed_proc_list.append(proc) 3506 res_str += "Failed" 3507 3508 #check all the JAMP 3509 # loop over jamp 3510 # Keep in mind that this is not available for loop processes where the 3511 # jamp list is empty 3512 if len(data[0]['jamp'])!=0: 3513 for k in range(len(data[0]['jamp'][0])): 3514 sum = [0] * len(data) 3515 # loop over helicity 3516 for j in range(len(data[0]['jamp'])): 3517 #values for the different lorentz boost 3518 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))] 3519 sum = [sum[i] + values[i] for i in range(len(values))] 3520 3521 # Compare the different lorentz boost 3522 min_val = min(sum) 3523 max_val = max(sum) 3524 if not max_val: 3525 continue 3526 diff = (max_val - min_val) / max_val 3527 3528 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , proc_col_size) + \ 3529 fixed_string_length("%1.10e" % min_val, col_size) + \ 3530 fixed_string_length("%1.10e" % max_val, col_size) + \ 3531 fixed_string_length("%1.10e" % diff, col_size) 3532 3533 if diff > 1e-10: 3534 if not len(failed_proc_list) or failed_proc_list[-1] != proc: 3535 fail_proc += 1 3536 pass_proc -= 1 3537 failed_proc_list.append(proc) 3538 res_str += tmp_str + "Failed" 3539 elif not proc_succeed: 3540 res_str += tmp_str + "Passed" 3541 3542 3543 3544 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \ 3545 (pass_proc, pass_proc + fail_proc, 3546 fail_proc, pass_proc + fail_proc) 3547 3548 if fail_proc != 0: 3549 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list) 3550 if no_check_proc: 3551 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list) 3552 3553 if output == 'text': 3554 return res_str 3555 else: 3556 return fail_proc
3557
3558 -def output_unitary_feynman(comparison_results, output='text'):
3559 """Present the results of a comparison in a nice list format 3560 if output='fail' return the number of failed process -- for test-- 3561 """ 3562 3563 proc_col_size = 17 3564 3565 # We use the first element of the comparison_result list to store the 3566 # process definition object 3567 pert_coupl = comparison_results[0]['perturbation_couplings'] 3568 comparison_results = comparison_results[1:] 3569 3570 if pert_coupl: 3571 process_header = "Process [virt="+" ".join(pert_coupl)+"]" 3572 else: 3573 process_header = "Process" 3574 3575 if len(process_header) + 1 > proc_col_size: 3576 proc_col_size = len(process_header) + 1 3577 3578 for data in comparison_results: 3579 proc = data['process'] 3580 if len(proc) + 1 > proc_col_size: 3581 proc_col_size = len(proc) + 1 3582 3583 pass_proc = 0 3584 fail_proc = 0 3585 no_check_proc = 0 3586 3587 failed_proc_list = [] 3588 no_check_proc_list = [] 3589 3590 col_size = 18 3591 3592 res_str = fixed_string_length(process_header, proc_col_size) + \ 3593 fixed_string_length("Unitary", col_size) + \ 3594 fixed_string_length("Feynman", col_size) + \ 3595 fixed_string_length("Relative diff.", col_size) + \ 3596 "Result" 3597 3598 for one_comp in comparison_results: 3599 proc = one_comp['process'] 3600 data = [one_comp['value_unit'], one_comp['value_feynm']] 3601 3602 3603 if data[0] == 'pass': 3604 no_check_proc += 1 3605 no_check_proc_list.append(proc) 3606 continue 3607 3608 values = [data[i]['m2'] for i in range(len(data))] 3609 3610 min_val = min(values) 3611 max_val = max(values) 3612 diff = (max_val - min_val) / max_val 3613 3614 res_str += '\n' + fixed_string_length(proc, proc_col_size) + \ 3615 fixed_string_length("%1.10e" % values[0], col_size) + \ 3616 fixed_string_length("%1.10e" % values[1], col_size) + \ 3617 fixed_string_length("%1.10e" % diff, col_size) 3618 3619 if diff < 1e-8: 3620 pass_proc += 1 3621 proc_succeed = True 3622 res_str += "Passed" 3623 else: 3624 fail_proc += 1 3625 proc_succeed = False 3626 failed_proc_list.append(proc) 3627 res_str += "Failed" 3628 3629 #check all the JAMP 3630 # loop over jamp 3631 # This is not available for loop processes where the jamp list returned 3632 # is empty. 3633 if len(data[0]['jamp'])>0: 3634 for k in range(len(data[0]['jamp'][0])): 3635 sum = [0, 0] 3636 # loop over helicity 3637 for j in range(len(data[0]['jamp'])): 3638 #values for the different lorentz boost 3639 values = [abs(data[i]['jamp'][j][k])**2 for i in range(len(data))] 3640 sum = [sum[i] + values[i] for i in range(len(values))] 3641 3642 # Compare the different lorentz boost 3643 min_val = min(sum) 3644 max_val = max(sum) 3645 if not max_val: 3646 continue 3647 diff = (max_val - min_val) / max_val 3648 3649 tmp_str = '\n' + fixed_string_length(' JAMP %s'%k , col_size) + \ 3650 fixed_string_length("%1.10e" % sum[0], col_size) + \ 3651 fixed_string_length("%1.10e" % sum[1], col_size) + \ 3652 fixed_string_length("%1.10e" % diff, col_size) 3653 3654 if diff > 1e-10: 3655 if not len(failed_proc_list) or failed_proc_list[-1] != proc: 3656 fail_proc += 1 3657 pass_proc -= 1 3658 failed_proc_list.append(proc) 3659 res_str += tmp_str + "Failed" 3660 elif not proc_succeed: 3661 res_str += tmp_str + "Passed" 3662 3663 3664 3665 res_str += "\nSummary: %i/%i passed, %i/%i failed" % \ 3666 (pass_proc, pass_proc + fail_proc, 3667 fail_proc, pass_proc + fail_proc) 3668 3669 if fail_proc != 0: 3670 res_str += "\nFailed processes: %s" % ', '.join(failed_proc_list) 3671 if no_check_proc: 3672 res_str += "\nNot checked processes: %s" % ', '.join(no_check_proc_list) 3673 3674 3675 if output == 'text': 3676 return res_str 3677 else: 3678 return fail_proc
3679