The Gaudi Framework  v36r9p1 (5c15b2bb)
BaseTest.py
Go to the documentation of this file.
1 # -*- coding: utf-8 -*-
2 
12 
13 import inspect
14 import json
15 import logging
16 import os
17 import platform
18 import re
19 import signal
20 import sys
21 import tempfile
22 import threading
23 import time
24 from subprocess import PIPE, STDOUT, Popen
25 from unittest import TestCase
26 
27 try:
28  from html import escape as escape_for_html
29 except ImportError: # Python2
30  from cgi import escape as escape_for_html
31 
32 import six
33 
34 if sys.version_info < (3, 5):
35  # backport of 'backslashreplace' handling of UnicodeDecodeError
36  # to Python < 3.5
37  from codecs import backslashreplace_errors, register_error
38 
40  if isinstance(exc, UnicodeDecodeError):
41  code = hex(ord(exc.object[exc.start]))
42  return ("\\" + code[1:], exc.start + 1)
43  else:
44  return backslashreplace_errors(exc)
45 
46  register_error("backslashreplace", _new_backslashreplace_errors)
47  del register_error
48  del backslashreplace_errors
49  del _new_backslashreplace_errors
50 
51 SKIP_RETURN_CODE = 77
52 
53 
54 def sanitize_for_xml(data):
55  """
56  Take a string with invalid ASCII/UTF characters and quote them so that the
57  string can be used in an XML text.
58 
59  >>> sanitize_for_xml('this is \x1b')
60  'this is [NON-XML-CHAR-0x1B]'
61  """
62  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
63 
64  def quote(match):
65  "helper function"
66  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
67 
68  return bad_chars.sub(quote, data)
69 
70 
71 def dumpProcs(name):
72  """helper to debug GAUDI-1084, dump the list of processes"""
73  from getpass import getuser
74 
75  if "WORKSPACE" in os.environ:
76  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
77  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
78  f.write(p.communicate()[0])
79 
80 
81 def kill_tree(ppid, sig):
82  """
83  Send a signal to a process and all its child processes (starting from the
84  leaves).
85  """
86  log = logging.getLogger("kill_tree")
87  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
88  # Note: start in a clean env to avoid a freeze with libasan.so
89  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
90  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
91  children = map(int, get_children.communicate()[0].split())
92  for child in children:
93  kill_tree(child, sig)
94  try:
95  log.debug("killing process %d", ppid)
96  os.kill(ppid, sig)
97  except OSError as err:
98  if err.errno != 3: # No such process
99  raise
100  log.debug("no such process %d", ppid)
101 
102 
103 # -------------------------------------------------------------------------#
104 
105 
106 class BaseTest(object):
107 
108  _common_tmpdir = None
109 
110  def __init__(self):
111  self.program = ""
112  self.args = []
113  self.reference = ""
114  self.error_reference = ""
115  self.options = ""
116  self.stderr = ""
117  self.timeout = 600
118  self.exit_code = None
119  self.environment = dict(os.environ)
121  self.signal = None
122  self.workdir = os.curdir
123  self.use_temp_dir = False
124  # Variables not for users
125  self.status = None
126  self.name = ""
127  self.causes = []
128  self.result = Result(self)
129  self.returnedCode = 0
130  self.out = ""
131  self.err = ""
132  self.proc = None
133  self.stack_trace = None
134  self.basedir = os.getcwd()
135  self.validate_time = None
136 
137  def run(self):
138  logging.debug("running test %s", self.name)
139 
140  self.result = Result(
141  {
142  "CAUSE": None,
143  "EXCEPTION": None,
144  "RESOURCE": None,
145  "TARGET": None,
146  "TRACEBACK": None,
147  "START_TIME": None,
148  "END_TIME": None,
149  "TIMEOUT_DETAIL": None,
150  }
151  )
152 
153  if self.options:
154  if re.search(
155  r"from\s+Gaudi.Configuration\s+import\s+\*|"
156  "from\s+Configurables\s+import",
157  self.options,
158  ):
159  suffix, lang = ".py", "python"
160  else:
161  suffix, lang = ".opts", "c++"
162  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
163  lang, escape_for_html(self.options)
164  )
165  optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
166  optionFile.file.write(self.options.encode("utf-8"))
167  optionFile.seek(0)
168  self.args.append(RationalizePath(optionFile.name))
169 
170  platform_id = (
171  self.environment.get("BINARY_TAG")
172  or self.environment.get("CMTCONFIG")
173  or platform.platform()
174  )
175  # If at least one regex matches we skip the test.
176  skip_test = bool(
177  [
178  None
179  for prex in self.unsupported_platforms
180  if re.search(prex, platform_id)
181  ]
182  )
183 
184  if not skip_test:
185  # handle working/temporary directory options
186  workdir = self.workdir
187  if self.use_temp_dir:
188  if self._common_tmpdir:
189  workdir = self._common_tmpdir
190  else:
191  workdir = tempfile.mkdtemp()
192 
193  # prepare the command to execute
194  prog = ""
195  if self.program != "":
196  prog = self.program
197  elif "GAUDIEXE" in self.environment:
198  prog = self.environment["GAUDIEXE"]
199  else:
200  prog = "Gaudi.exe"
201 
202  prog_ext = os.path.splitext(prog)[1]
203  if prog_ext not in [".exe", ".py", ".bat"]:
204  prog += ".exe"
205  prog_ext = ".exe"
206 
207  prog = which(prog) or prog
208 
209  args = list(map(RationalizePath, self.args))
210 
211  if prog_ext == ".py":
212  params = ["python", RationalizePath(prog)] + args
213  else:
214  params = [RationalizePath(prog)] + args
215 
216  # we need to switch directory because the validator expects to run
217  # in the same dir as the program
218  os.chdir(workdir)
219 
220  # launching test in a different thread to handle timeout exception
221  def target():
222  logging.debug("executing %r in %s", params, workdir)
223  self.proc = Popen(
224  params, stdout=PIPE, stderr=PIPE, env=self.environment
225  )
226  logging.debug("(pid: %d)", self.proc.pid)
227  out, err = self.proc.communicate()
228  self.out = out.decode("utf-8", errors="backslashreplace")
229  self.err = err.decode("utf-8", errors="backslashreplace")
230 
231  thread = threading.Thread(target=target)
232  thread.start()
233  # catching timeout
234  thread.join(self.timeout)
235 
236  if thread.is_alive():
237  logging.debug("time out in test %s (pid %d)", self.name, self.proc.pid)
238  # get the stack trace of the stuck process
239  cmd = [
240  "gdb",
241  "--pid",
242  str(self.proc.pid),
243  "--batch",
244  "--eval-command=thread apply all backtrace",
245  ]
246  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
247  self.stack_trace = gdb.communicate()[0].decode(
248  "utf-8", errors="backslashreplace"
249  )
250 
251  kill_tree(self.proc.pid, signal.SIGTERM)
252  thread.join(60)
253  if thread.is_alive():
254  kill_tree(self.proc.pid, signal.SIGKILL)
255  self.causes.append("timeout")
256  else:
257  self.returnedCode = self.proc.returncode
258  if self.returnedCode != SKIP_RETURN_CODE:
259  logging.debug(
260  f"completed test {self.name} with returncode = {self.returnedCode}"
261  )
262  logging.debug("validating test...")
263  val_start_time = time.perf_counter()
264  self.result, self.causes = self.ValidateOutput(
265  stdout=self.out, stderr=self.err, result=self.result
266  )
267  self.validate_time = round(time.perf_counter() - val_start_time, 2)
268  else:
269  logging.debug(f"skipped test {self.name}")
270  self.status = "skipped"
271 
272  # remove the temporary directory if we created it
273  if self.use_temp_dir and not self._common_tmpdir:
274  shutil.rmtree(workdir, True)
275 
276  os.chdir(self.basedir)
277 
278  if self.status != "skipped":
279  # handle application exit code
280  if self.signal is not None:
281  if int(self.returnedCode) != -int(self.signal):
282  self.causes.append("exit code")
283 
284  elif self.exit_code is not None:
285  if int(self.returnedCode) != int(self.exit_code):
286  self.causes.append("exit code")
287 
288  elif self.returnedCode != 0:
289  self.causes.append("exit code")
290 
291  if self.causes:
292  self.status = "failed"
293  else:
294  self.status = "passed"
295 
296  else:
297  self.status = "skipped"
298 
299  logging.debug("%s: %s", self.name, self.status)
300  field_mapping = {
301  "Exit Code": "returnedCode",
302  "stderr": "err",
303  "Arguments": "args",
304  "Runtime Environment": "environment",
305  "Status": "status",
306  "stdout": "out",
307  "Program Name": "program",
308  "Name": "name",
309  "Validator": "validator",
310  "Validation execution time": "validate_time",
311  "Output Reference File": "reference",
312  "Error Reference File": "error_reference",
313  "Causes": "causes",
314  # 'Validator Result': 'result.annotations',
315  "Unsupported Platforms": "unsupported_platforms",
316  "Stack Trace": "stack_trace",
317  }
318  resultDict = [
319  (key, getattr(self, attr))
320  for key, attr in field_mapping.items()
321  if getattr(self, attr)
322  ]
323  resultDict.append(
324  (
325  "Working Directory",
326  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
327  )
328  )
329  # print(dict(resultDict).keys())
330  resultDict.extend(self.result.annotations.items())
331  # print(self.result.annotations.keys())
332  resultDict = dict(resultDict)
333 
334  # Special cases
335  if "Validator" in resultDict:
336  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
337  "python", escape_for_html(resultDict["Validator"])
338  )
339  return resultDict
340 
341  # -------------------------------------------------#
342  # ----------------Validating tool------------------#
343  # -------------------------------------------------#
344 
345  def ValidateOutput(self, stdout, stderr, result):
346  if not self.stderr:
347  self.validateWithReference(stdout, stderr, result, self.causes)
348  elif stderr.strip() != self.stderr.strip():
349  self.causes.append("standard error")
350  return result, self.causes
351 
353  self,
354  reference=None,
355  stdout=None,
356  result=None,
357  causes=None,
358  signature_offset=0,
359  signature=None,
360  id=None,
361  ):
362  """
363  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
364  """
365 
366  if reference is None:
367  reference = self.reference
368  if stdout is None:
369  stdout = self.out
370  if result is None:
371  result = self.result
372  if causes is None:
373  causes = self.causes
374 
375  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
376  if not reflines:
377  raise RuntimeError("Empty (or null) reference")
378  # the same on standard output
379  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
380 
381  res_field = "GaudiTest.RefBlock"
382  if id:
383  res_field += "_%s" % id
384 
385  if signature is None:
386  if signature_offset < 0:
387  signature_offset = len(reference) + signature_offset
388  signature = reflines[signature_offset]
389  # find the reference block in the output file
390  try:
391  pos = outlines.index(signature)
392  outlines = outlines[
393  pos - signature_offset : pos + len(reflines) - signature_offset
394  ]
395  if reflines != outlines:
396  msg = "standard output"
397  # I do not want 2 messages in causes if the function is called
398  # twice
399  if not msg in causes:
400  causes.append(msg)
401  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
402  except ValueError:
403  causes.append("missing signature")
404  result[res_field + ".signature"] = result.Quote(signature)
405  if len(reflines) > 1 or signature != reflines[0]:
406  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
407  return causes
408 
410  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
411  ):
412  """
413  Count the number of messages with required severity (by default ERROR and FATAL)
414  and check if their numbers match the expected ones (0 by default).
415  The dictionary "expected" can be used to tune the number of errors and fatals
416  allowed, or to limit the number of expected warnings etc.
417  """
418 
419  if stdout is None:
420  stdout = self.out
421  if result is None:
422  result = self.result
423  if causes is None:
424  causes = self.causes
425 
426  # prepare the dictionary to record the extracted lines
427  errors = {}
428  for sev in expected:
429  errors[sev] = []
430 
431  outlines = stdout.splitlines()
432  from math import log10
433 
434  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
435 
436  linecount = 0
437  for l in outlines:
438  linecount += 1
439  words = l.split()
440  if len(words) >= 2 and words[1] in errors:
441  errors[words[1]].append(fmt % (linecount, l.rstrip()))
442 
443  for e in errors:
444  if len(errors[e]) != expected[e]:
445  causes.append("%s(%d)" % (e, len(errors[e])))
446  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
447  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
448  str(expected[e])
449  )
450 
451  return causes
452 
454  self,
455  stdout=None,
456  result=None,
457  causes=None,
458  trees_dict=None,
459  ignore=r"Basket|.*size|Compression",
460  ):
461  """
462  Compare the TTree summaries in stdout with the ones in trees_dict or in
463  the reference file. By default ignore the size, compression and basket
464  fields.
465  The presence of TTree summaries when none is expected is not a failure.
466  """
467  if stdout is None:
468  stdout = self.out
469  if result is None:
470  result = self.result
471  if causes is None:
472  causes = self.causes
473  if trees_dict is None:
474  lreference = self._expandReferenceFileName(self.reference)
475  # call the validator if the file exists
476  if lreference and os.path.isfile(lreference):
477  trees_dict = findTTreeSummaries(open(lreference).read())
478  else:
479  trees_dict = {}
480 
481  from pprint import PrettyPrinter
482 
483  pp = PrettyPrinter()
484  if trees_dict:
485  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
486  if ignore:
487  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
488 
489  trees = findTTreeSummaries(stdout)
490  failed = cmpTreesDicts(trees_dict, trees, ignore)
491  if failed:
492  causes.append("trees summaries")
493  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
494  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
495  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
496 
497  return causes
498 
500  self, stdout=None, result=None, causes=None, dict=None, ignore=None
501  ):
502  """
503  Compare the TTree summaries in stdout with the ones in trees_dict or in
504  the reference file. By default ignore the size, compression and basket
505  fields.
506  The presence of TTree summaries when none is expected is not a failure.
507  """
508  if stdout is None:
509  stdout = self.out
510  if result is None:
511  result = self.result
512  if causes is None:
513  causes = self.causes
514 
515  if dict is None:
516  lreference = self._expandReferenceFileName(self.reference)
517  # call the validator if the file exists
518  if lreference and os.path.isfile(lreference):
519  dict = findHistosSummaries(open(lreference).read())
520  else:
521  dict = {}
522 
523  from pprint import PrettyPrinter
524 
525  pp = PrettyPrinter()
526  if dict:
527  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
528  if ignore:
529  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
530 
531  histos = findHistosSummaries(stdout)
532  failed = cmpTreesDicts(dict, histos, ignore)
533  if failed:
534  causes.append("histos summaries")
535  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
536  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
537  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
538 
539  return causes
540 
542  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
543  ):
544  """
545  Default validation acti*on: compare standard output and error to the
546  reference files.
547  """
548 
549  if stdout is None:
550  stdout = self.out
551  if stderr is None:
552  stderr = self.err
553  if result is None:
554  result = self.result
555  if causes is None:
556  causes = self.causes
557 
558  # set the default output preprocessor
559  if preproc is None:
560  preproc = normalizeExamples
561  # check standard output
562  lreference = self._expandReferenceFileName(self.reference)
563  # call the validator if the file exists
564  if lreference and os.path.isfile(lreference):
565  causes += ReferenceFileValidator(
566  lreference, "standard output", "Output Diff", preproc=preproc
567  )(stdout, result)
568  elif lreference:
569  causes += ["missing reference file"]
570  # Compare TTree summaries
571  causes = self.CheckTTreesSummaries(stdout, result, causes)
572  causes = self.CheckHistosSummaries(stdout, result, causes)
573  if causes and lreference: # Write a new reference file for stdout
574  try:
575  cnt = 0
576  newrefname = ".".join([lreference, "new"])
577  while os.path.exists(newrefname):
578  cnt += 1
579  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
580  newref = open(newrefname, "w")
581  # sanitize newlines
582  for l in stdout.splitlines():
583  newref.write(l.rstrip() + "\n")
584  del newref # flush and close
585  result["New Output Reference File"] = os.path.relpath(
586  newrefname, self.basedir
587  )
588  except IOError:
589  # Ignore IO errors when trying to update reference files
590  # because we may be in a read-only filesystem
591  pass
592 
593  # check standard error
594  lreference = self._expandReferenceFileName(self.error_reference)
595  # call the validator if we have a file to use
596  if lreference:
597  if os.path.isfile(lreference):
598  newcauses = ReferenceFileValidator(
599  lreference, "standard error", "Error Diff", preproc=preproc
600  )(stderr, result)
601  else:
602  newcauses = ["missing error reference file"]
603  causes += newcauses
604  if newcauses and lreference: # Write a new reference file for stdedd
605  cnt = 0
606  newrefname = ".".join([lreference, "new"])
607  while os.path.exists(newrefname):
608  cnt += 1
609  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
610  newref = open(newrefname, "w")
611  # sanitize newlines
612  for l in stderr.splitlines():
613  newref.write(l.rstrip() + "\n")
614  del newref # flush and close
615  result["New Error Reference File"] = os.path.relpath(
616  newrefname, self.basedir
617  )
618  else:
619  causes += BasicOutputValidator(
620  lreference, "standard error", "ExecTest.expected_stderr"
621  )(stderr, result)
622  return causes
623 
625  self,
626  output_file,
627  reference_file,
628  result=None,
629  causes=None,
630  detailed=True,
631  ):
632  """
633  JSON validation action: compare json file to reference file
634  """
635 
636  if result is None:
637  result = self.result
638  if causes is None:
639  causes = self.causes
640 
641  if not os.path.isfile(output_file):
642  causes.append(f"output file {output_file} does not exist")
643  return causes
644 
645  try:
646  with open(output_file) as f:
647  output = json.load(f)
648  except json.JSONDecodeError as err:
649  causes.append("json parser error")
650  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
651  return causes
652 
653  lreference = self._expandReferenceFileName(reference_file)
654  if not lreference:
655  causes.append("reference file not set")
656  elif not os.path.isfile(lreference):
657  causes.append("reference file does not exist")
658  else:
659  causes += JSONOutputValidator()(lreference, output, result, detailed)
660  if causes and lreference: # Write a new reference file for output
661  try:
662  cnt = 0
663  newrefname = ".".join([lreference, "new"])
664  while os.path.exists(newrefname):
665  cnt += 1
666  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
667  with open(newrefname, "w") as newref:
668  json.dump(output, newref, indent=4)
669  result["New JSON Output Reference File"] = os.path.relpath(
670  newrefname, self.basedir
671  )
672  except IOError:
673  # Ignore IO errors when trying to update reference files
674  # because we may be in a read-only filesystem
675  pass
676  return causes
677 
678  def _expandReferenceFileName(self, reffile):
679  # if no file is passed, do nothing
680  if not reffile:
681  return ""
682 
683  # function to split an extension in constituents parts
684  import re
685 
686  platformSplit = lambda p: set(re.split(r"[-+]", p))
687 
688  reference = os.path.normpath(
689  os.path.join(self.basedir, os.path.expandvars(reffile))
690  )
691 
692  # old-style platform-specific reference name
693  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
694  if os.path.isfile(spec_ref):
695  reference = spec_ref
696  else: # look for new-style platform specific reference files:
697  # get all the files whose name start with the reference filename
698  dirname, basename = os.path.split(reference)
699  if not dirname:
700  dirname = "."
701  head = basename + "."
702  head_len = len(head)
703  platform = platformSplit(GetPlatform(self))
704  if "do0" in platform:
705  platform.add("dbg")
706  candidates = []
707  for f in os.listdir(dirname):
708  if f.startswith(head):
709  req_plat = platformSplit(f[head_len:])
710  if platform.issuperset(req_plat):
711  candidates.append((len(req_plat), f))
712  if candidates: # take the one with highest matching
713  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
714  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
715  candidates.sort()
716  reference = os.path.join(dirname, candidates[-1][1])
717  return reference
718 
719 
720 # ======= GAUDI TOOLS =======
721 
722 import calendar
723 import difflib
724 import shutil
725 import string
726 
727 try:
728  from GaudiKernel import ROOT6WorkAroundEnabled
729 except ImportError:
730 
732  # dummy implementation
733  return False
734 
735 
736 # --------------------------------- TOOLS ---------------------------------#
737 
738 
740  """
741  Function used to normalize the used path
742  """
743  newPath = os.path.normpath(os.path.expandvars(p))
744  if os.path.exists(newPath):
745  p = os.path.realpath(newPath)
746  return p
747 
748 
749 def which(executable):
750  """
751  Locates an executable in the executables path ($PATH) and returns the full
752  path to it. An application is looked for with or without the '.exe' suffix.
753  If the executable cannot be found, None is returned
754  """
755  if os.path.isabs(executable):
756  if not os.path.isfile(executable):
757  if executable.endswith(".exe"):
758  if os.path.isfile(executable[:-4]):
759  return executable[:-4]
760  else:
761  executable = os.path.split(executable)[1]
762  else:
763  return executable
764  for d in os.environ.get("PATH").split(os.pathsep):
765  fullpath = os.path.join(d, executable)
766  if os.path.isfile(fullpath):
767  return fullpath
768  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
769  return fullpath[:-4]
770  return None
771 
772 
773 # -------------------------------------------------------------------------#
774 # ----------------------------- Result Classe -----------------------------#
775 # -------------------------------------------------------------------------#
776 import types
777 
778 
779 class Result:
780 
781  PASS = "PASS"
782  FAIL = "FAIL"
783  ERROR = "ERROR"
784  UNTESTED = "UNTESTED"
785 
786  EXCEPTION = ""
787  RESOURCE = ""
788  TARGET = ""
789  TRACEBACK = ""
790  START_TIME = ""
791  END_TIME = ""
792  TIMEOUT_DETAIL = ""
793 
794  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
795  self.annotations = annotations.copy()
796 
797  def __getitem__(self, key):
798  assert isinstance(key, six.string_types)
799  return self.annotations[key]
800 
801  def __setitem__(self, key, value):
802  assert isinstance(key, six.string_types)
803  assert isinstance(value, six.string_types), "{!r} is not a string".format(value)
804  self.annotations[key] = value
805 
806  def Quote(self, text):
807  """
808  Convert text to html by escaping special chars and adding <pre> tags.
809  """
810  return "<pre>{}</pre>".format(escape_for_html(text))
811 
812 
813 # -------------------------------------------------------------------------#
814 # --------------------------- Validator Classes ---------------------------#
815 # -------------------------------------------------------------------------#
816 
817 # Basic implementation of an option validator for Gaudi test. This
818 # implementation is based on the standard (LCG) validation functions used
819 # in QMTest.
820 
821 
823  def __init__(self, ref, cause, result_key):
824  self.ref = ref
825  self.cause = cause
826  self.result_key = result_key
827 
828  def __call__(self, out, result):
829  """Validate the output of the program.
830  'stdout' -- A string containing the data written to the standard output
831  stream.
832  'stderr' -- A string containing the data written to the standard error
833  stream.
834  'result' -- A 'Result' object. It may be used to annotate
835  the outcome according to the content of stderr.
836  returns -- A list of strings giving causes of failure."""
837 
838  causes = []
839  # Check the output
840  if not self.__CompareText(out, self.ref):
841  causes.append(self.cause)
842  result[self.result_key] = result.Quote(self.ref)
843 
844  return causes
845 
846  def __CompareText(self, s1, s2):
847  """Compare 's1' and 's2', ignoring line endings.
848  's1' -- A string.
849  's2' -- A string.
850  returns -- True if 's1' and 's2' are the same, ignoring
851  differences in line endings."""
852  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
853  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
854  # can fix them
855  to_ignore = re.compile(
856  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
857  )
858 
859  def keep_line(l):
860  return not to_ignore.match(l)
861 
862  return list(filter(keep_line, s1.splitlines())) == list(
863  filter(keep_line, s2.splitlines())
864  )
865  else:
866  return s1.splitlines() == s2.splitlines()
867 
868 
869 # ------------------------ Preprocessor elements ------------------------#
871  """Base class for a callable that takes a file and returns a modified
872  version of it."""
873 
874  def __processLine__(self, line):
875  return line
876 
877  def __processFile__(self, lines):
878  output = []
879  for l in lines:
880  l = self.__processLine__(l)
881  if l:
882  output.append(l)
883  return output
884 
885  def __call__(self, input):
886  if not isinstance(input, six.string_types):
887  lines = input
888  mergeback = False
889  else:
890  lines = input.splitlines()
891  mergeback = True
892  output = self.__processFile__(lines)
893  if mergeback:
894  output = "\n".join(output)
895  return output
896 
897  def __add__(self, rhs):
898  return FilePreprocessorSequence([self, rhs])
899 
900 
902  def __init__(self, members=[]):
903  self.members = members
904 
905  def __add__(self, rhs):
906  return FilePreprocessorSequence(self.members + [rhs])
907 
908  def __call__(self, input):
909  output = input
910  for pp in self.members:
911  output = pp(output)
912  return output
913 
914 
916  def __init__(self, strings=[], regexps=[]):
917  import re
918 
919  self.strings = strings
920  self.regexps = list(map(re.compile, regexps))
921 
922  def __processLine__(self, line):
923  for s in self.strings:
924  if line.find(s) >= 0:
925  return None
926  for r in self.regexps:
927  if r.search(line):
928  return None
929  return line
930 
931 
933  def __init__(self, start, end):
934  self.start = start
935  self.end = end
936  self._skipping = False
937 
938  def __processLine__(self, line):
939  if self.start in line:
940  self._skipping = True
941  return None
942  elif self.end in line:
943  self._skipping = False
944  elif self._skipping:
945  return None
946  return line
947 
948 
950  def __init__(self, orig, repl="", when=None):
951  if when:
952  when = re.compile(when)
953  self._operations = [(when, re.compile(orig), repl)]
954 
955  def __add__(self, rhs):
956  if isinstance(rhs, RegexpReplacer):
957  res = RegexpReplacer("", "", None)
958  res._operations = self._operations + rhs._operations
959  else:
960  res = FilePreprocessor.__add__(self, rhs)
961  return res
962 
963  def __processLine__(self, line):
964  for w, o, r in self._operations:
965  if w is None or w.search(line):
966  line = o.sub(r, line)
967  return line
968 
969 
970 # Common preprocessors
971 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
972 normalizeDate = RegexpReplacer(
973  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
974  "00:00:00 1970-01-01",
975 )
976 normalizeEOL = FilePreprocessor()
977 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
978 
979 skipEmptyLines = FilePreprocessor()
980 # FIXME: that's ugly
981 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
982 
983 # Special preprocessor sorting the list of strings (whitespace separated)
984 # that follow a signature on a single line
985 
986 
988  def __init__(self, signature):
989  self.signature = signature
990  self.siglen = len(signature)
991 
992  def __processLine__(self, line):
993  pos = line.find(self.signature)
994  if pos >= 0:
995  line = line[: (pos + self.siglen)]
996  lst = line[(pos + self.siglen) :].split()
997  lst.sort()
998  line += " ".join(lst)
999  return line
1000 
1001 
1003  """
1004  Sort group of lines matching a regular expression
1005  """
1006 
1007  def __init__(self, exp):
1008  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1009 
1010  def __processFile__(self, lines):
1011  match = self.exp.match
1012  output = []
1013  group = []
1014  for l in lines:
1015  if match(l):
1016  group.append(l)
1017  else:
1018  if group:
1019  group.sort()
1020  output.extend(group)
1021  group = []
1022  output.append(l)
1023  return output
1024 
1025 
1026 # Preprocessors for GaudiExamples
1027 normalizeExamples = maskPointers + normalizeDate
1028 for w, o, r in [
1029  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
1030  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
1031  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1032  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1033  (
1034  "^JobOptionsSvc.*options successfully read in from",
1035  r"read in from .*[/\\]([^/\\]*)$",
1036  r"file \1",
1037  ), # normalize path to options
1038  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1039  (
1040  None,
1041  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1042  "00000000-0000-0000-0000-000000000000",
1043  ),
1044  # Absorb a change in ServiceLocatorHelper
1045  (
1046  "ServiceLocatorHelper::",
1047  "ServiceLocatorHelper::(create|locate)Service",
1048  "ServiceLocatorHelper::service",
1049  ),
1050  # Remove the leading 0 in Windows' exponential format
1051  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1052  # Output line changed in Gaudi v24
1053  (None, r"Service reference count check:", r"Looping over all active services..."),
1054  # Ignore count of declared properties (anyway they are all printed)
1055  (
1056  None,
1057  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1058  r"\1NN",
1059  ),
1060  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1061  (r"Property \['Name': Value\]", r"( = '[^']+':)'(.*)'", r"\1\2"),
1062  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1063  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1064 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
1065  normalizeExamples += RegexpReplacer(o, r, w)
1066 
1067 lineSkipper = LineSkipper(
1068  [
1069  "//GP:",
1070  "JobOptionsSvc INFO # ",
1071  "JobOptionsSvc WARNING # ",
1072  "Time User",
1073  "Welcome to",
1074  "This machine has a speed",
1075  "running on",
1076  "ToolSvc.Sequenc... INFO",
1077  "DataListenerSvc INFO XML written to file:",
1078  "[INFO]",
1079  "[WARNING]",
1080  "DEBUG No writable file catalog found which contains FID:",
1081  "DEBUG Service base class initialized successfully",
1082  # changed between v20 and v21
1083  "DEBUG Incident timing:",
1084  # introduced with patch #3487
1085  # changed the level of the message from INFO to
1086  # DEBUG
1087  "INFO 'CnvServices':[",
1088  # message removed because could be printed in constructor
1089  "DEBUG 'CnvServices':[",
1090  # The signal handler complains about SIGXCPU not
1091  # defined on some platforms
1092  "SIGXCPU",
1093  # Message removed with redesing of JobOptionsSvc
1094  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1095  # Ignore warnings for properties case mismatch
1096  "mismatching case for property name:",
1097  # Message demoted to DEBUG in gaudi/Gaudi!992
1098  "Histograms saving not required.",
1099  # Message added in gaudi/Gaudi!577
1100  "Properties are dumped into",
1101  ],
1102  regexps=[
1103  r"^JobOptionsSvc INFO *$",
1104  r"^# ", # Ignore python comments
1105  # skip the message reporting the version of the root file
1106  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1107  r"File '.*.xml' does not exist",
1108  r"INFO Refer to dataset .* by its file ID:",
1109  r"INFO Referring to dataset .* by its file ID:",
1110  r"INFO Disconnect from dataset",
1111  r"INFO Disconnected from dataset",
1112  r"INFO Disconnected data IO:",
1113  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1114  # Ignore StatusCodeSvc related messages
1115  r".*StatusCodeSvc.*",
1116  r".*StatusCodeCheck.*",
1117  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1118  r"^[-+]*\s*$",
1119  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1120  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1121  # Hide unchecked StatusCodes from dictionaries
1122  r"^ +[0-9]+ \|.*ROOT",
1123  r"^ +[0-9]+ \|.*\|.*Dict",
1124  # Hide EventLoopMgr total timing report
1125  r"EventLoopMgr.*---> Loop Finished",
1126  r"HiveSlimEventLo.*---> Loop Finished",
1127  # Remove ROOT TTree summary table, which changes from one version to the
1128  # other
1129  r"^\*.*\*$",
1130  # Remove Histos Summaries
1131  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1132  r"^ \|",
1133  r"^ ID=",
1134  # Ignore added/removed properties
1135  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1136  r"Property(.*)'Audit(Begin|End)Run':",
1137  # these were missing in tools
1138  r"Property(.*)'AuditRe(start|initialize)':",
1139  r"Property(.*)'Blocking':",
1140  # removed with gaudi/Gaudi!273
1141  r"Property(.*)'ErrorCount(er)?':",
1142  # added with gaudi/Gaudi!306
1143  r"Property(.*)'Sequential':",
1144  # added with gaudi/Gaudi!314
1145  r"Property(.*)'FilterCircularDependencies':",
1146  # removed with gaudi/Gaudi!316
1147  r"Property(.*)'IsClonable':",
1148  # ignore uninteresting/obsolete messages
1149  r"Property update for OutputLevel : new value =",
1150  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1151  ],
1152 )
1153 
1154 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1155  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1156  # fix them
1157  lineSkipper += LineSkipper(
1158  regexps=[
1159  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1160  ]
1161  )
1162 
1163 normalizeExamples = (
1164  lineSkipper
1165  + normalizeExamples
1166  + skipEmptyLines
1167  + normalizeEOL
1168  + LineSorter("Services to release : ")
1169  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1170 )
1171 
1172 # --------------------- Validation functions/classes ---------------------#
1173 
1174 
1176  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1177  self.reffile = os.path.expandvars(reffile)
1178  self.cause = cause
1179  self.result_key = result_key
1180  self.preproc = preproc
1181 
1182  def __call__(self, stdout, result):
1183  causes = []
1184  if os.path.isfile(self.reffile):
1185  orig = open(self.reffile).readlines()
1186  if self.preproc:
1187  orig = self.preproc(orig)
1188  result[self.result_key + ".preproc.orig"] = result.Quote(
1189  "\n".join(map(str.strip, orig))
1190  )
1191  else:
1192  orig = []
1193  new = stdout.splitlines()
1194  if self.preproc:
1195  new = self.preproc(new)
1196 
1197  filterdiffs = list(
1198  difflib.unified_diff(
1199  orig, new, n=1, fromfile="Reference file", tofile="Actual output"
1200  )
1201  )
1202  if filterdiffs:
1203  result[self.result_key] = result.Quote("".join(filterdiffs))
1204  result[self.result_key + ".preproc.new"] = result.Quote(
1205  "\n".join(map(str.strip, new))
1206  )
1207  causes.append(self.cause)
1208  return causes
1209 
1210 
1212  """
1213  Scan stdout to find ROOT TTree summaries and digest them.
1214  """
1215  stars = re.compile(r"^\*+$")
1216  outlines = stdout.splitlines()
1217  nlines = len(outlines)
1218  trees = {}
1219 
1220  i = 0
1221  while i < nlines: # loop over the output
1222  # look for
1223  while i < nlines and not stars.match(outlines[i]):
1224  i += 1
1225  if i < nlines:
1226  tree, i = _parseTTreeSummary(outlines, i)
1227  if tree:
1228  trees[tree["Name"]] = tree
1229 
1230  return trees
1231 
1232 
1233 def cmpTreesDicts(reference, to_check, ignore=None):
1234  """
1235  Check that all the keys in reference are in to_check too, with the same value.
1236  If the value is a dict, the function is called recursively. to_check can
1237  contain more keys than reference, that will not be tested.
1238  The function returns at the first difference found.
1239  """
1240  fail_keys = []
1241  # filter the keys in the reference dictionary
1242  if ignore:
1243  ignore_re = re.compile(ignore)
1244  keys = [key for key in reference if not ignore_re.match(key)]
1245  else:
1246  keys = reference.keys()
1247  # loop over the keys (not ignored) in the reference dictionary
1248  for k in keys:
1249  if k in to_check: # the key must be in the dictionary to_check
1250  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1251  # if both reference and to_check values are dictionaries,
1252  # recurse
1253  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1254  else:
1255  # compare the two values
1256  failed = to_check[k] != reference[k]
1257  else: # handle missing keys in the dictionary to check (i.e. failure)
1258  to_check[k] = None
1259  failed = True
1260  if failed:
1261  fail_keys.insert(0, k)
1262  break # exit from the loop at the first failure
1263  return fail_keys # return the list of keys bringing to the different values
1264 
1265 
1266 def getCmpFailingValues(reference, to_check, fail_path):
1267  c = to_check
1268  r = reference
1269  for k in fail_path:
1270  c = c.get(k, None)
1271  r = r.get(k, None)
1272  if c is None or r is None:
1273  break # one of the dictionaries is not deep enough
1274  return (fail_path, r, c)
1275 
1276 
1277 # signature of the print-out of the histograms
1278 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1279 
1280 
1281 def _parseTTreeSummary(lines, pos):
1282  """
1283  Parse the TTree summary table in lines, starting from pos.
1284  Returns a tuple with the dictionary with the digested informations and the
1285  position of the first line after the summary.
1286  """
1287  result = {}
1288  i = pos + 1 # first line is a sequence of '*'
1289  count = len(lines)
1290 
1291  def splitcols(l):
1292  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1293 
1294  def parseblock(ll):
1295  r = {}
1296  cols = splitcols(ll[0])
1297  r["Name"], r["Title"] = cols[1:]
1298 
1299  cols = splitcols(ll[1])
1300  r["Entries"] = int(cols[1])
1301 
1302  sizes = cols[2].split()
1303  r["Total size"] = int(sizes[2])
1304  if sizes[-1] == "memory":
1305  r["File size"] = 0
1306  else:
1307  r["File size"] = int(sizes[-1])
1308 
1309  cols = splitcols(ll[2])
1310  sizes = cols[2].split()
1311  if cols[0] == "Baskets":
1312  r["Baskets"] = int(cols[1])
1313  r["Basket size"] = int(sizes[2])
1314  r["Compression"] = float(sizes[-1])
1315  return r
1316 
1317  if i < (count - 3) and lines[i].startswith("*Tree"):
1318  result = parseblock(lines[i : i + 3])
1319  result["Branches"] = {}
1320  i += 4
1321  while i < (count - 3) and lines[i].startswith("*Br"):
1322  if i < (count - 2) and lines[i].startswith("*Branch "):
1323  # skip branch header
1324  i += 3
1325  continue
1326  branch = parseblock(lines[i : i + 3])
1327  result["Branches"][branch["Name"]] = branch
1328  i += 4
1329 
1330  return (result, i)
1331 
1332 
1333 def parseHistosSummary(lines, pos):
1334  """
1335  Extract the histograms infos from the lines starting at pos.
1336  Returns the position of the first line after the summary block.
1337  """
1338  global h_count_re
1339  h_table_head = re.compile(
1340  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1341  )
1342  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1343 
1344  nlines = len(lines)
1345 
1346  # decode header
1347  m = h_count_re.search(lines[pos])
1348  name = m.group(1).strip()
1349  total = int(m.group(2))
1350  header = {}
1351  for k, v in [x.split("=") for x in m.group(3).split()]:
1352  header[k] = int(v)
1353  pos += 1
1354  header["Total"] = total
1355 
1356  summ = {}
1357  while pos < nlines:
1358  m = h_table_head.search(lines[pos])
1359  if m:
1360  t, d = m.groups(1) # type and directory
1361  t = t.replace(" profile", "Prof")
1362  pos += 1
1363  if pos < nlines:
1364  l = lines[pos]
1365  else:
1366  l = ""
1367  cont = {}
1368  if l.startswith(" | ID"):
1369  # table format
1370  titles = [x.strip() for x in l.split("|")][1:]
1371  pos += 1
1372  while pos < nlines and lines[pos].startswith(" |"):
1373  l = lines[pos]
1374  values = [x.strip() for x in l.split("|")][1:]
1375  hcont = {}
1376  for i in range(len(titles)):
1377  hcont[titles[i]] = values[i]
1378  cont[hcont["ID"]] = hcont
1379  pos += 1
1380  elif l.startswith(" ID="):
1381  while pos < nlines and lines[pos].startswith(" ID="):
1382  values = [
1383  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1384  ]
1385  cont[values[0]] = values
1386  pos += 1
1387  else: # not interpreted
1388  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1389  if not d in summ:
1390  summ[d] = {}
1391  summ[d][t] = cont
1392  summ[d]["header"] = header
1393  else:
1394  break
1395  if not summ:
1396  # If the full table is not present, we use only the header
1397  summ[name] = {"header": header}
1398  return summ, pos
1399 
1400 
1402  """
1403  Scan stdout to find ROOT TTree summaries and digest them.
1404  """
1405  outlines = stdout.splitlines()
1406  nlines = len(outlines) - 1
1407  summaries = {}
1408  global h_count_re
1409 
1410  pos = 0
1411  while pos < nlines:
1412  summ = {}
1413  # find first line of block:
1414  match = h_count_re.search(outlines[pos])
1415  while pos < nlines and not match:
1416  pos += 1
1417  match = h_count_re.search(outlines[pos])
1418  if match:
1419  summ, pos = parseHistosSummary(outlines, pos)
1420  summaries.update(summ)
1421  return summaries
1422 
1423 
1424 def GetPlatform(self):
1425  """
1426  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1427  """
1428  arch = "None"
1429  # check architecture name
1430  if "BINARY_TAG" in os.environ:
1431  arch = os.environ["BINARY_TAG"]
1432  elif "CMTCONFIG" in os.environ:
1433  arch = os.environ["CMTCONFIG"]
1434  elif "SCRAM_ARCH" in os.environ:
1435  arch = os.environ["SCRAM_ARCH"]
1436  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1437  "Debug",
1438  "FastDebug",
1439  "Developer",
1440  ):
1441  arch = "dummy-dbg"
1442  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1443  "Release",
1444  "MinSizeRel",
1445  "RelWithDebInfo",
1446  "",
1447  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1448  arch = "dummy-opt"
1449  return arch
1450 
1451 
1452 def isWinPlatform(self):
1453  """
1454  Return True if the current platform is Windows.
1455 
1456  This function was needed because of the change in the CMTCONFIG format,
1457  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1458  """
1459  platform = GetPlatform(self)
1460  return "winxp" in platform or platform.startswith("win")
1461 
1462 
1464  def __call__(self, ref, out, result, detailed=True):
1465  """Validate JSON output.
1466  returns -- A list of strings giving causes of failure."""
1467 
1468  causes = []
1469  try:
1470  with open(ref) as f:
1471  expected = json.load(f)
1472  except json.JSONDecodeError as err:
1473  causes.append("json parser error")
1474  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1475  return causes
1476 
1477  if not detailed:
1478  if expected != out:
1479  causes.append("json content")
1480  result["json_diff"] = "detailed diff was turned off"
1481  return causes
1482 
1483  # piggyback on TestCase dict diff report
1484  t = TestCase()
1485  try:
1486  t.assertEqual(expected, out)
1487  except AssertionError as err:
1488  causes.append("json content")
1489  result["json_diff"] = str(err).splitlines()[0]
1490 
1491  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1177
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:127
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1007
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:115
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:870
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:797
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:824
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:71
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:990
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:885
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:987
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1281
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:992
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:130
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:499
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:54
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:108
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:113
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:823
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:117
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1180
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:541
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1266
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:826
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:132
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:39
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:133
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:877
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:119
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:989
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:118
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:934
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:81
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:806
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:905
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:624
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:901
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:110
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:953
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:131
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1010
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:932
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:112
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:128
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:874
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:908
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:122
Gaudi::Functional::details::get
auto get(const Handle &handle, const Algo &, const EventContext &) -> decltype(details::deref(handle.get()))
Definition: FunctionalDetails.h:444
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:936
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1178
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1333
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:135
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:949
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1452
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:920
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:134
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:749
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1008
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:120
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:794
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:935
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:129
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:919
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:828
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1233
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:795
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:126
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1463
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:963
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1176
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:903
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:678
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:121
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:1002
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:352
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:739
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:915
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1175
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:33
gaudirun.type
type
Definition: gaudirun.py:160
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:111
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:902
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1182
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:825
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:897
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1179
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:933
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:950
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1401
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:801
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:453
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:106
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:409
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:114
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:345
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1464
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:822
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:916
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:779
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:137
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1211
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:846
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:955
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:513
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:116
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:988
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:922
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:731
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:123
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1424
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:125
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: FunctionalDetails.h:102
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:938