Processing math: 100%
The Gaudi Framework  v36r11 (bdb84f5f)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 
11 
12 import inspect
13 import json
14 import logging
15 import os
16 import platform
17 import re
18 import signal
19 import sys
20 import tempfile
21 import threading
22 import time
23 from subprocess import PIPE, STDOUT, Popen
24 from unittest import TestCase
25 
26 try:
27  from html import escape as escape_for_html
28 except ImportError: # Python2
29  from cgi import escape as escape_for_html
30 
31 import six
32 
33 if sys.version_info < (3, 5):
34  # backport of 'backslashreplace' handling of UnicodeDecodeError
35  # to Python < 3.5
36  from codecs import backslashreplace_errors, register_error
37 
39  if isinstance(exc, UnicodeDecodeError):
40  code = hex(ord(exc.object[exc.start]))
41  return ("\\" + code[1:], exc.start + 1)
42  else:
43  return backslashreplace_errors(exc)
44 
45  register_error("backslashreplace", _new_backslashreplace_errors)
46  del register_error
47  del backslashreplace_errors
48  del _new_backslashreplace_errors
49 
50 SKIP_RETURN_CODE = 77
51 
52 
53 def sanitize_for_xml(data):
54  """
55  Take a string with invalid ASCII/UTF characters and quote them so that the
56  string can be used in an XML text.
57 
58  >>> sanitize_for_xml('this is \x1b')
59  'this is [NON-XML-CHAR-0x1B]'
60  """
61  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
62 
63  def quote(match):
64  "helper function"
65  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
66 
67  return bad_chars.sub(quote, data)
68 
69 
70 def dumpProcs(name):
71  """helper to debug GAUDI-1084, dump the list of processes"""
72  from getpass import getuser
73 
74  if "WORKSPACE" in os.environ:
75  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
76  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
77  f.write(p.communicate()[0])
78 
79 
80 def kill_tree(ppid, sig):
81  """
82  Send a signal to a process and all its child processes (starting from the
83  leaves).
84  """
85  log = logging.getLogger("kill_tree")
86  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
87  # Note: start in a clean env to avoid a freeze with libasan.so
88  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
89  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
90  children = map(int, get_children.communicate()[0].split())
91  for child in children:
92  kill_tree(child, sig)
93  try:
94  log.debug("killing process %d", ppid)
95  os.kill(ppid, sig)
96  except OSError as err:
97  if err.errno != 3: # No such process
98  raise
99  log.debug("no such process %d", ppid)
100 
101 
102 # -------------------------------------------------------------------------#
103 
104 
105 class BaseTest(object):
106 
107  _common_tmpdir = None
108 
109  def __init__(self):
110  self.program = ""
111  self.args = []
112  self.reference = ""
113  self.error_reference = ""
114  self.options = ""
115  self.stderr = ""
116  self.timeout = 600
117  self.exit_code = None
118  self.environment = dict(os.environ)
120  self.signal = None
121  self.workdir = os.curdir
122  self.use_temp_dir = False
123  # Variables not for users
124  self.status = None
125  self.name = ""
126  self.causes = []
127  self.result = Result(self)
128  self.returnedCode = 0
129  self.out = ""
130  self.err = ""
131  self.proc = None
132  self.stack_trace = None
133  self.basedir = os.getcwd()
134  self.validate_time = None
135 
136  def run(self):
137  logging.debug("running test %s", self.name)
138 
139  self.result = Result(
140  {
141  "CAUSE": None,
142  "EXCEPTION": None,
143  "RESOURCE": None,
144  "TARGET": None,
145  "TRACEBACK": None,
146  "START_TIME": None,
147  "END_TIME": None,
148  "TIMEOUT_DETAIL": None,
149  }
150  )
151 
152  if self.options:
153  if re.search(
154  r"from\s+Gaudi.Configuration\s+import\s+\*|"
155  "from\s+Configurables\s+import",
156  self.options,
157  ):
158  suffix, lang = ".py", "python"
159  else:
160  suffix, lang = ".opts", "c++"
161  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
162  lang, escape_for_html(self.options)
163  )
164  optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
165  optionFile.file.write(self.options.encode("utf-8"))
166  optionFile.seek(0)
167  self.args.append(RationalizePath(optionFile.name))
168 
169  platform_id = (
170  self.environment.get("BINARY_TAG")
171  or self.environment.get("CMTCONFIG")
172  or platform.platform()
173  )
174  # If at least one regex matches we skip the test.
175  skip_test = bool(
176  [
177  None
178  for prex in self.unsupported_platforms
179  if re.search(prex, platform_id)
180  ]
181  )
182 
183  if not skip_test:
184  # handle working/temporary directory options
185  workdir = self.workdir
186  if self.use_temp_dir:
187  if self._common_tmpdir:
188  workdir = self._common_tmpdir
189  else:
190  workdir = tempfile.mkdtemp()
191 
192  # prepare the command to execute
193  prog = ""
194  if self.program != "":
195  prog = self.program
196  elif "GAUDIEXE" in self.environment:
197  prog = self.environment["GAUDIEXE"]
198  else:
199  prog = "Gaudi.exe"
200 
201  prog_ext = os.path.splitext(prog)[1]
202  if prog_ext not in [".exe", ".py", ".bat"]:
203  prog += ".exe"
204  prog_ext = ".exe"
205 
206  prog = which(prog) or prog
207 
208  args = list(map(RationalizePath, self.args))
209 
210  if prog_ext == ".py":
211  params = ["python3", RationalizePath(prog)] + args
212  else:
213  params = [RationalizePath(prog)] + args
214 
215  # we need to switch directory because the validator expects to run
216  # in the same dir as the program
217  os.chdir(workdir)
218 
219  # launching test in a different thread to handle timeout exception
220  def target():
221  logging.debug("executing %r in %s", params, workdir)
222  self.proc = Popen(
223  params, stdout=PIPE, stderr=PIPE, env=self.environment
224  )
225  logging.debug("(pid: %d)", self.proc.pid)
226  out, err = self.proc.communicate()
227  self.out = out.decode("utf-8", errors="backslashreplace")
228  self.err = err.decode("utf-8", errors="backslashreplace")
229 
230  thread = threading.Thread(target=target)
231  thread.start()
232  # catching timeout
233  thread.join(self.timeout)
234 
235  if thread.is_alive():
236  logging.debug("time out in test %s (pid %d)", self.name, self.proc.pid)
237  # get the stack trace of the stuck process
238  cmd = [
239  "gdb",
240  "--pid",
241  str(self.proc.pid),
242  "--batch",
243  "--eval-command=thread apply all backtrace",
244  ]
245  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
246  self.stack_trace = gdb.communicate()[0].decode(
247  "utf-8", errors="backslashreplace"
248  )
249 
250  kill_tree(self.proc.pid, signal.SIGTERM)
251  thread.join(60)
252  if thread.is_alive():
253  kill_tree(self.proc.pid, signal.SIGKILL)
254  self.causes.append("timeout")
255  else:
256  self.returnedCode = self.proc.returncode
257  if self.returnedCode != SKIP_RETURN_CODE:
258  logging.debug(
259  f"completed test {self.name} with returncode = {self.returnedCode}"
260  )
261  logging.debug("validating test...")
262  val_start_time = time.perf_counter()
263  self.result, self.causes = self.ValidateOutput(
264  stdout=self.out, stderr=self.err, result=self.result
265  )
266  self.validate_time = round(time.perf_counter() - val_start_time, 2)
267  else:
268  logging.debug(f"skipped test {self.name}")
269  self.status = "skipped"
270 
271  # remove the temporary directory if we created it
272  if self.use_temp_dir and not self._common_tmpdir:
273  shutil.rmtree(workdir, True)
274 
275  os.chdir(self.basedir)
276 
277  if self.status != "skipped":
278  # handle application exit code
279  if self.signal is not None:
280  if int(self.returnedCode) != -int(self.signal):
281  self.causes.append("exit code")
282 
283  elif self.exit_code is not None:
284  if int(self.returnedCode) != int(self.exit_code):
285  self.causes.append("exit code")
286 
287  elif self.returnedCode != 0:
288  self.causes.append("exit code")
289 
290  if self.causes:
291  self.status = "failed"
292  else:
293  self.status = "passed"
294 
295  else:
296  self.status = "skipped"
297 
298  logging.debug("%s: %s", self.name, self.status)
299  field_mapping = {
300  "Exit Code": "returnedCode",
301  "stderr": "err",
302  "Arguments": "args",
303  "Runtime Environment": "environment",
304  "Status": "status",
305  "stdout": "out",
306  "Program Name": "program",
307  "Name": "name",
308  "Validator": "validator",
309  "Validation execution time": "validate_time",
310  "Output Reference File": "reference",
311  "Error Reference File": "error_reference",
312  "Causes": "causes",
313  # 'Validator Result': 'result.annotations',
314  "Unsupported Platforms": "unsupported_platforms",
315  "Stack Trace": "stack_trace",
316  }
317  resultDict = [
318  (key, getattr(self, attr))
319  for key, attr in field_mapping.items()
320  if getattr(self, attr)
321  ]
322  resultDict.append(
323  (
324  "Working Directory",
325  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
326  )
327  )
328  # print(dict(resultDict).keys())
329  resultDict.extend(self.result.annotations.items())
330  # print(self.result.annotations.keys())
331  resultDict = dict(resultDict)
332 
333  # Special cases
334  if "Validator" in resultDict:
335  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
336  "python", escape_for_html(resultDict["Validator"])
337  )
338  return resultDict
339 
340  # -------------------------------------------------#
341  # ----------------Validating tool------------------#
342  # -------------------------------------------------#
343 
344  def ValidateOutput(self, stdout, stderr, result):
345  if not self.stderr:
346  self.validateWithReference(stdout, stderr, result, self.causes)
347  elif stderr.strip() != self.stderr.strip():
348  self.causes.append("standard error")
349  return result, self.causes
350 
352  self,
353  reference=None,
354  stdout=None,
355  result=None,
356  causes=None,
357  signature_offset=0,
358  signature=None,
359  id=None,
360  ):
361  """
362  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
363  """
364 
365  if reference is None:
366  reference = self.reference
367  if stdout is None:
368  stdout = self.out
369  if result is None:
370  result = self.result
371  if causes is None:
372  causes = self.causes
373 
374  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
375  if not reflines:
376  raise RuntimeError("Empty (or null) reference")
377  # the same on standard output
378  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
379 
380  res_field = "GaudiTest.RefBlock"
381  if id:
382  res_field += "_%s" % id
383 
384  if signature is None:
385  if signature_offset < 0:
386  signature_offset = len(reference) + signature_offset
387  signature = reflines[signature_offset]
388  # find the reference block in the output file
389  try:
390  pos = outlines.index(signature)
391  outlines = outlines[
392  pos - signature_offset : pos + len(reflines) - signature_offset
393  ]
394  if reflines != outlines:
395  msg = "standard output"
396  # I do not want 2 messages in causes if the function is called
397  # twice
398  if not msg in causes:
399  causes.append(msg)
400  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
401  except ValueError:
402  causes.append("missing signature")
403  result[res_field + ".signature"] = result.Quote(signature)
404  if len(reflines) > 1 or signature != reflines[0]:
405  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
406  return causes
407 
409  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
410  ):
411  """
412  Count the number of messages with required severity (by default ERROR and FATAL)
413  and check if their numbers match the expected ones (0 by default).
414  The dictionary "expected" can be used to tune the number of errors and fatals
415  allowed, or to limit the number of expected warnings etc.
416  """
417 
418  if stdout is None:
419  stdout = self.out
420  if result is None:
421  result = self.result
422  if causes is None:
423  causes = self.causes
424 
425  # prepare the dictionary to record the extracted lines
426  errors = {}
427  for sev in expected:
428  errors[sev] = []
429 
430  outlines = stdout.splitlines()
431  from math import log10
432 
433  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
434 
435  linecount = 0
436  for l in outlines:
437  linecount += 1
438  words = l.split()
439  if len(words) >= 2 and words[1] in errors:
440  errors[words[1]].append(fmt % (linecount, l.rstrip()))
441 
442  for e in errors:
443  if len(errors[e]) != expected[e]:
444  causes.append("%s(%d)" % (e, len(errors[e])))
445  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
446  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
447  str(expected[e])
448  )
449 
450  return causes
451 
453  self,
454  stdout=None,
455  result=None,
456  causes=None,
457  trees_dict=None,
458  ignore=r"Basket|.*size|Compression",
459  ):
460  """
461  Compare the TTree summaries in stdout with the ones in trees_dict or in
462  the reference file. By default ignore the size, compression and basket
463  fields.
464  The presence of TTree summaries when none is expected is not a failure.
465  """
466  if stdout is None:
467  stdout = self.out
468  if result is None:
469  result = self.result
470  if causes is None:
471  causes = self.causes
472  if trees_dict is None:
473  lreference = self._expandReferenceFileName(self.reference)
474  # call the validator if the file exists
475  if lreference and os.path.isfile(lreference):
476  trees_dict = findTTreeSummaries(open(lreference).read())
477  else:
478  trees_dict = {}
479 
480  from pprint import PrettyPrinter
481 
482  pp = PrettyPrinter()
483  if trees_dict:
484  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
485  if ignore:
486  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
487 
488  trees = findTTreeSummaries(stdout)
489  failed = cmpTreesDicts(trees_dict, trees, ignore)
490  if failed:
491  causes.append("trees summaries")
492  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
493  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
494  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
495 
496  return causes
497 
499  self, stdout=None, result=None, causes=None, dict=None, ignore=None
500  ):
501  """
502  Compare the TTree summaries in stdout with the ones in trees_dict or in
503  the reference file. By default ignore the size, compression and basket
504  fields.
505  The presence of TTree summaries when none is expected is not a failure.
506  """
507  if stdout is None:
508  stdout = self.out
509  if result is None:
510  result = self.result
511  if causes is None:
512  causes = self.causes
513 
514  if dict is None:
515  lreference = self._expandReferenceFileName(self.reference)
516  # call the validator if the file exists
517  if lreference and os.path.isfile(lreference):
518  dict = findHistosSummaries(open(lreference).read())
519  else:
520  dict = {}
521 
522  from pprint import PrettyPrinter
523 
524  pp = PrettyPrinter()
525  if dict:
526  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
527  if ignore:
528  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
529 
530  histos = findHistosSummaries(stdout)
531  failed = cmpTreesDicts(dict, histos, ignore)
532  if failed:
533  causes.append("histos summaries")
534  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
535  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
536  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
537 
538  return causes
539 
541  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
542  ):
543  """
544  Default validation acti*on: compare standard output and error to the
545  reference files.
546  """
547 
548  if stdout is None:
549  stdout = self.out
550  if stderr is None:
551  stderr = self.err
552  if result is None:
553  result = self.result
554  if causes is None:
555  causes = self.causes
556 
557  # set the default output preprocessor
558  if preproc is None:
559  preproc = normalizeExamples
560  # check standard output
561  lreference = self._expandReferenceFileName(self.reference)
562  # call the validator if the file exists
563  if lreference and os.path.isfile(lreference):
564  causes += ReferenceFileValidator(
565  lreference, "standard output", "Output Diff", preproc=preproc
566  )(stdout, result)
567  elif lreference:
568  causes += ["missing reference file"]
569  # Compare TTree summaries
570  causes = self.CheckTTreesSummaries(stdout, result, causes)
571  causes = self.CheckHistosSummaries(stdout, result, causes)
572  if causes and lreference: # Write a new reference file for stdout
573  try:
574  cnt = 0
575  newrefname = ".".join([lreference, "new"])
576  while os.path.exists(newrefname):
577  cnt += 1
578  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
579  newref = open(newrefname, "w")
580  # sanitize newlines
581  for l in stdout.splitlines():
582  newref.write(l.rstrip() + "\n")
583  del newref # flush and close
584  result["New Output Reference File"] = os.path.relpath(
585  newrefname, self.basedir
586  )
587  except IOError:
588  # Ignore IO errors when trying to update reference files
589  # because we may be in a read-only filesystem
590  pass
591 
592  # check standard error
593  lreference = self._expandReferenceFileName(self.error_reference)
594  # call the validator if we have a file to use
595  if lreference:
596  if os.path.isfile(lreference):
597  newcauses = ReferenceFileValidator(
598  lreference, "standard error", "Error Diff", preproc=preproc
599  )(stderr, result)
600  else:
601  newcauses = ["missing error reference file"]
602  causes += newcauses
603  if newcauses and lreference: # Write a new reference file for stdedd
604  cnt = 0
605  newrefname = ".".join([lreference, "new"])
606  while os.path.exists(newrefname):
607  cnt += 1
608  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
609  newref = open(newrefname, "w")
610  # sanitize newlines
611  for l in stderr.splitlines():
612  newref.write(l.rstrip() + "\n")
613  del newref # flush and close
614  result["New Error Reference File"] = os.path.relpath(
615  newrefname, self.basedir
616  )
617  else:
618  causes += BasicOutputValidator(
619  lreference, "standard error", "ExecTest.expected_stderr"
620  )(stderr, result)
621  return causes
622 
624  self,
625  output_file,
626  reference_file,
627  result=None,
628  causes=None,
629  detailed=True,
630  ):
631  """
632  JSON validation action: compare json file to reference file
633  """
634 
635  if result is None:
636  result = self.result
637  if causes is None:
638  causes = self.causes
639 
640  if not os.path.isfile(output_file):
641  causes.append(f"output file {output_file} does not exist")
642  return causes
643 
644  try:
645  with open(output_file) as f:
646  output = json.load(f)
647  except json.JSONDecodeError as err:
648  causes.append("json parser error")
649  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
650  return causes
651 
652  lreference = self._expandReferenceFileName(reference_file)
653  if not lreference:
654  causes.append("reference file not set")
655  elif not os.path.isfile(lreference):
656  causes.append("reference file does not exist")
657  else:
658  causes += JSONOutputValidator()(lreference, output, result, detailed)
659  if causes and lreference: # Write a new reference file for output
660  try:
661  cnt = 0
662  newrefname = ".".join([lreference, "new"])
663  while os.path.exists(newrefname):
664  cnt += 1
665  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
666  with open(newrefname, "w") as newref:
667  json.dump(output, newref, indent=4)
668  result["New JSON Output Reference File"] = os.path.relpath(
669  newrefname, self.basedir
670  )
671  except IOError:
672  # Ignore IO errors when trying to update reference files
673  # because we may be in a read-only filesystem
674  pass
675  return causes
676 
677  def _expandReferenceFileName(self, reffile):
678  # if no file is passed, do nothing
679  if not reffile:
680  return ""
681 
682  # function to split an extension in constituents parts
683  import re
684 
685  platformSplit = lambda p: set(re.split(r"[-+]", p))
686 
687  reference = os.path.normpath(
688  os.path.join(self.basedir, os.path.expandvars(reffile))
689  )
690 
691  # old-style platform-specific reference name
692  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
693  if os.path.isfile(spec_ref):
694  reference = spec_ref
695  else: # look for new-style platform specific reference files:
696  # get all the files whose name start with the reference filename
697  dirname, basename = os.path.split(reference)
698  if not dirname:
699  dirname = "."
700  head = basename + "."
701  head_len = len(head)
702  platform = platformSplit(GetPlatform(self))
703  if "do0" in platform:
704  platform.add("dbg")
705  candidates = []
706  for f in os.listdir(dirname):
707  if f.startswith(head):
708  req_plat = platformSplit(f[head_len:])
709  if platform.issuperset(req_plat):
710  candidates.append((len(req_plat), f))
711  if candidates: # take the one with highest matching
712  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
713  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
714  candidates.sort()
715  reference = os.path.join(dirname, candidates[-1][1])
716  return reference
717 
718 
719 # ======= GAUDI TOOLS =======
720 
721 import calendar
722 import difflib
723 import shutil
724 import string
725 
726 try:
727  from GaudiKernel import ROOT6WorkAroundEnabled
728 except ImportError:
729 
731  # dummy implementation
732  return False
733 
734 
735 # --------------------------------- TOOLS ---------------------------------#
736 
737 
739  """
740  Function used to normalize the used path
741  """
742  newPath = os.path.normpath(os.path.expandvars(p))
743  if os.path.exists(newPath):
744  p = os.path.realpath(newPath)
745  return p
746 
747 
748 def which(executable):
749  """
750  Locates an executable in the executables path ($PATH) and returns the full
751  path to it. An application is looked for with or without the '.exe' suffix.
752  If the executable cannot be found, None is returned
753  """
754  if os.path.isabs(executable):
755  if not os.path.isfile(executable):
756  if executable.endswith(".exe"):
757  if os.path.isfile(executable[:-4]):
758  return executable[:-4]
759  else:
760  executable = os.path.split(executable)[1]
761  else:
762  return executable
763  for d in os.environ.get("PATH").split(os.pathsep):
764  fullpath = os.path.join(d, executable)
765  if os.path.isfile(fullpath):
766  return fullpath
767  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
768  return fullpath[:-4]
769  return None
770 
771 
772 # -------------------------------------------------------------------------#
773 # ----------------------------- Result Classe -----------------------------#
774 # -------------------------------------------------------------------------#
775 import types
776 
777 
778 class Result:
779 
780  PASS = "PASS"
781  FAIL = "FAIL"
782  ERROR = "ERROR"
783  UNTESTED = "UNTESTED"
784 
785  EXCEPTION = ""
786  RESOURCE = ""
787  TARGET = ""
788  TRACEBACK = ""
789  START_TIME = ""
790  END_TIME = ""
791  TIMEOUT_DETAIL = ""
792 
793  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
794  self.annotations = annotations.copy()
795 
796  def __getitem__(self, key):
797  assert isinstance(key, six.string_types)
798  return self.annotations[key]
799 
800  def __setitem__(self, key, value):
801  assert isinstance(key, six.string_types)
802  assert isinstance(value, six.string_types), "{!r} is not a string".format(value)
803  self.annotations[key] = value
804 
805  def Quote(self, text):
806  """
807  Convert text to html by escaping special chars and adding <pre> tags.
808  """
809  return "<pre>{}</pre>".format(escape_for_html(text))
810 
811 
812 # -------------------------------------------------------------------------#
813 # --------------------------- Validator Classes ---------------------------#
814 # -------------------------------------------------------------------------#
815 
816 # Basic implementation of an option validator for Gaudi test. This
817 # implementation is based on the standard (LCG) validation functions used
818 # in QMTest.
819 
820 
822  def __init__(self, ref, cause, result_key):
823  self.ref = ref
824  self.cause = cause
825  self.result_key = result_key
826 
827  def __call__(self, out, result):
828  """Validate the output of the program.
829  'stdout' -- A string containing the data written to the standard output
830  stream.
831  'stderr' -- A string containing the data written to the standard error
832  stream.
833  'result' -- A 'Result' object. It may be used to annotate
834  the outcome according to the content of stderr.
835  returns -- A list of strings giving causes of failure."""
836 
837  causes = []
838  # Check the output
839  if not self.__CompareText(out, self.ref):
840  causes.append(self.cause)
841  result[self.result_key] = result.Quote(self.ref)
842 
843  return causes
844 
845  def __CompareText(self, s1, s2):
846  """Compare 's1' and 's2', ignoring line endings.
847  's1' -- A string.
848  's2' -- A string.
849  returns -- True if 's1' and 's2' are the same, ignoring
850  differences in line endings."""
851  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
852  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
853  # can fix them
854  to_ignore = re.compile(
855  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
856  )
857 
858  def keep_line(l):
859  return not to_ignore.match(l)
860 
861  return list(filter(keep_line, s1.splitlines())) == list(
862  filter(keep_line, s2.splitlines())
863  )
864  else:
865  return s1.splitlines() == s2.splitlines()
866 
867 
868 # ------------------------ Preprocessor elements ------------------------#
870  """Base class for a callable that takes a file and returns a modified
871  version of it."""
872 
873  def __processLine__(self, line):
874  return line
875 
876  def __processFile__(self, lines):
877  output = []
878  for l in lines:
879  l = self.__processLine__(l)
880  if l:
881  output.append(l)
882  return output
883 
884  def __call__(self, input):
885  if not isinstance(input, six.string_types):
886  lines = input
887  mergeback = False
888  else:
889  lines = input.splitlines()
890  mergeback = True
891  output = self.__processFile__(lines)
892  if mergeback:
893  output = "\n".join(output)
894  return output
895 
896  def __add__(self, rhs):
897  return FilePreprocessorSequence([self, rhs])
898 
899 
901  def __init__(self, members=[]):
902  self.members = members
903 
904  def __add__(self, rhs):
905  return FilePreprocessorSequence(self.members + [rhs])
906 
907  def __call__(self, input):
908  output = input
909  for pp in self.members:
910  output = pp(output)
911  return output
912 
913 
915  def __init__(self, strings=[], regexps=[]):
916  import re
917 
918  self.strings = strings
919  self.regexps = list(map(re.compile, regexps))
920 
921  def __processLine__(self, line):
922  for s in self.strings:
923  if line.find(s) >= 0:
924  return None
925  for r in self.regexps:
926  if r.search(line):
927  return None
928  return line
929 
930 
932  def __init__(self, start, end):
933  self.start = start
934  self.end = end
935  self._skipping = False
936 
937  def __processLine__(self, line):
938  if self.start in line:
939  self._skipping = True
940  return None
941  elif self.end in line:
942  self._skipping = False
943  elif self._skipping:
944  return None
945  return line
946 
947 
949  def __init__(self, orig, repl="", when=None):
950  if when:
951  when = re.compile(when)
952  self._operations = [(when, re.compile(orig), repl)]
953 
954  def __add__(self, rhs):
955  if isinstance(rhs, RegexpReplacer):
956  res = RegexpReplacer("", "", None)
957  res._operations = self._operations + rhs._operations
958  else:
959  res = FilePreprocessor.__add__(self, rhs)
960  return res
961 
962  def __processLine__(self, line):
963  for w, o, r in self._operations:
964  if w is None or w.search(line):
965  line = o.sub(r, line)
966  return line
967 
968 
969 # Common preprocessors
970 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
971 normalizeDate = RegexpReplacer(
972  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
973  "00:00:00 1970-01-01",
974 )
975 normalizeEOL = FilePreprocessor()
976 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
977 
978 skipEmptyLines = FilePreprocessor()
979 # FIXME: that's ugly
980 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
981 
982 # Special preprocessor sorting the list of strings (whitespace separated)
983 # that follow a signature on a single line
984 
985 
987  def __init__(self, signature):
988  self.signature = signature
989  self.siglen = len(signature)
990 
991  def __processLine__(self, line):
992  pos = line.find(self.signature)
993  if pos >= 0:
994  line = line[: (pos + self.siglen)]
995  lst = line[(pos + self.siglen) :].split()
996  lst.sort()
997  line += " ".join(lst)
998  return line
999 
1000 
1002  """
1003  Sort group of lines matching a regular expression
1004  """
1005 
1006  def __init__(self, exp):
1007  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1008 
1009  def __processFile__(self, lines):
1010  match = self.exp.match
1011  output = []
1012  group = []
1013  for l in lines:
1014  if match(l):
1015  group.append(l)
1016  else:
1017  if group:
1018  group.sort()
1019  output.extend(group)
1020  group = []
1021  output.append(l)
1022  return output
1023 
1024 
1025 # Preprocessors for GaudiExamples
1026 normalizeExamples = maskPointers + normalizeDate
1027 for w, o, r in [
1028  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
1029  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
1030  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1031  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1032  (
1033  "^JobOptionsSvc.*options successfully read in from",
1034  r"read in from .*[/\\]([^/\\]*)$",
1035  r"file \1",
1036  ), # normalize path to options
1037  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1038  (
1039  None,
1040  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1041  "00000000-0000-0000-0000-000000000000",
1042  ),
1043  # Absorb a change in ServiceLocatorHelper
1044  (
1045  "ServiceLocatorHelper::",
1046  "ServiceLocatorHelper::(create|locate)Service",
1047  "ServiceLocatorHelper::service",
1048  ),
1049  # Remove the leading 0 in Windows' exponential format
1050  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1051  # Output line changed in Gaudi v24
1052  (None, r"Service reference count check:", r"Looping over all active services..."),
1053  # Ignore count of declared properties (anyway they are all printed)
1054  (
1055  None,
1056  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1057  r"\1NN",
1058  ),
1059  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1060  (r"Property 'Name': Value", r"( = '[^']+':)'(.*)'", r"\1\2"),
1061  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1062  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1063 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
1064  normalizeExamples += RegexpReplacer(o, r, w)
1065 
1066 lineSkipper = LineSkipper(
1067  [
1068  "//GP:",
1069  "JobOptionsSvc INFO # ",
1070  "JobOptionsSvc WARNING # ",
1071  "Time User",
1072  "Welcome to",
1073  "This machine has a speed",
1074  "running on",
1075  "ToolSvc.Sequenc... INFO",
1076  "DataListenerSvc INFO XML written to file:",
1077  "[INFO]",
1078  "[WARNING]",
1079  "DEBUG No writable file catalog found which contains FID:",
1080  "DEBUG Service base class initialized successfully",
1081  # changed between v20 and v21
1082  "DEBUG Incident timing:",
1083  # introduced with patch #3487
1084  # changed the level of the message from INFO to
1085  # DEBUG
1086  "INFO 'CnvServices':[",
1087  # message removed because could be printed in constructor
1088  "DEBUG 'CnvServices':[",
1089  # The signal handler complains about SIGXCPU not
1090  # defined on some platforms
1091  "SIGXCPU",
1092  # Message removed with redesing of JobOptionsSvc
1093  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1094  # Ignore warnings for properties case mismatch
1095  "mismatching case for property name:",
1096  # Message demoted to DEBUG in gaudi/Gaudi!992
1097  "Histograms saving not required.",
1098  # Message added in gaudi/Gaudi!577
1099  "Properties are dumped into",
1100  # Messages changed in gaudi/Gaudi!1426
1101  "WARNING no ROOT output file name",
1102  "INFO Writing ROOT histograms to:",
1103  "INFO Completed update of ROOT histograms in:",
1104  ],
1105  regexps=[
1106  r"^JobOptionsSvc INFO *$",
1107  r"^# ", # Ignore python comments
1108  # skip the message reporting the version of the root file
1109  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1110  r"File '.*.xml' does not exist",
1111  r"INFO Refer to dataset .* by its file ID:",
1112  r"INFO Referring to dataset .* by its file ID:",
1113  r"INFO Disconnect from dataset",
1114  r"INFO Disconnected from dataset",
1115  r"INFO Disconnected data IO:",
1116  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1117  # Ignore StatusCodeSvc related messages
1118  r".*StatusCodeSvc.*",
1119  r".*StatusCodeCheck.*",
1120  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1121  r"^[-+]*\s*$",
1122  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1123  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1124  # Hide unchecked StatusCodes from dictionaries
1125  r"^ +[0-9]+ \|.*ROOT",
1126  r"^ +[0-9]+ \|.*\|.*Dict",
1127  # Hide EventLoopMgr total timing report
1128  r"EventLoopMgr.*---> Loop Finished",
1129  r"HiveSlimEventLo.*---> Loop Finished",
1130  # Remove ROOT TTree summary table, which changes from one version to the
1131  # other
1132  r"^\*.*\*$",
1133  # Remove Histos Summaries
1134  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1135  r"^ \|",
1136  r"^ ID=",
1137  # Ignore added/removed properties
1138  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1139  r"Property(.*)'Audit(Begin|End)Run':",
1140  # these were missing in tools
1141  r"Property(.*)'AuditRe(start|initialize)':",
1142  r"Property(.*)'Blocking':",
1143  # removed with gaudi/Gaudi!273
1144  r"Property(.*)'ErrorCount(er)?':",
1145  # added with gaudi/Gaudi!306
1146  r"Property(.*)'Sequential':",
1147  # added with gaudi/Gaudi!314
1148  r"Property(.*)'FilterCircularDependencies':",
1149  # removed with gaudi/Gaudi!316
1150  r"Property(.*)'IsClonable':",
1151  # ignore uninteresting/obsolete messages
1152  r"Property update for OutputLevel : new value =",
1153  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1154  ],
1155 )
1156 
1157 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1158  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1159  # fix them
1160  lineSkipper += LineSkipper(
1161  regexps=[
1162  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1163  ]
1164  )
1165 
1166 normalizeExamples = (
1167  lineSkipper
1168  + normalizeExamples
1169  + skipEmptyLines
1170  + normalizeEOL
1171  + LineSorter("Services to release : ")
1172  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1173 )
1174 
1175 # --------------------- Validation functions/classes ---------------------#
1176 
1177 
1179  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1180  self.reffile = os.path.expandvars(reffile)
1181  self.cause = cause
1182  self.result_key = result_key
1183  self.preproc = preproc
1184 
1185  def __call__(self, stdout, result):
1186  causes = []
1187  if os.path.isfile(self.reffile):
1188  orig = open(self.reffile).readlines()
1189  if self.preproc:
1190  orig = self.preproc(orig)
1191  result[self.result_key + ".preproc.orig"] = result.Quote(
1192  "\n".join(map(str.strip, orig))
1193  )
1194  else:
1195  orig = []
1196  new = stdout.splitlines()
1197  if self.preproc:
1198  new = self.preproc(new)
1199 
1200  filterdiffs = list(
1201  difflib.unified_diff(
1202  orig, new, n=1, fromfile="Reference file", tofile="Actual output"
1203  )
1204  )
1205  if filterdiffs:
1206  result[self.result_key] = result.Quote("".join(filterdiffs))
1207  result[self.result_key + ".preproc.new"] = result.Quote(
1208  "\n".join(map(str.strip, new))
1209  )
1210  causes.append(self.cause)
1211  return causes
1212 
1213 
1215  """
1216  Scan stdout to find ROOT TTree summaries and digest them.
1217  """
1218  stars = re.compile(r"^\*+$")
1219  outlines = stdout.splitlines()
1220  nlines = len(outlines)
1221  trees = {}
1222 
1223  i = 0
1224  while i < nlines: # loop over the output
1225  # look for
1226  while i < nlines and not stars.match(outlines[i]):
1227  i += 1
1228  if i < nlines:
1229  tree, i = _parseTTreeSummary(outlines, i)
1230  if tree:
1231  trees[tree["Name"]] = tree
1232 
1233  return trees
1234 
1235 
1236 def cmpTreesDicts(reference, to_check, ignore=None):
1237  """
1238  Check that all the keys in reference are in to_check too, with the same value.
1239  If the value is a dict, the function is called recursively. to_check can
1240  contain more keys than reference, that will not be tested.
1241  The function returns at the first difference found.
1242  """
1243  fail_keys = []
1244  # filter the keys in the reference dictionary
1245  if ignore:
1246  ignore_re = re.compile(ignore)
1247  keys = [key for key in reference if not ignore_re.match(key)]
1248  else:
1249  keys = reference.keys()
1250  # loop over the keys (not ignored) in the reference dictionary
1251  for k in keys:
1252  if k in to_check: # the key must be in the dictionary to_check
1253  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1254  # if both reference and to_check values are dictionaries,
1255  # recurse
1256  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1257  else:
1258  # compare the two values
1259  failed = to_check[k] != reference[k]
1260  else: # handle missing keys in the dictionary to check (i.e. failure)
1261  to_check[k] = None
1262  failed = True
1263  if failed:
1264  fail_keys.insert(0, k)
1265  break # exit from the loop at the first failure
1266  return fail_keys # return the list of keys bringing to the different values
1267 
1268 
1269 def getCmpFailingValues(reference, to_check, fail_path):
1270  c = to_check
1271  r = reference
1272  for k in fail_path:
1273  c = c.get(k, None)
1274  r = r.get(k, None)
1275  if c is None or r is None:
1276  break # one of the dictionaries is not deep enough
1277  return (fail_path, r, c)
1278 
1279 
1280 # signature of the print-out of the histograms
1281 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1282 
1283 
1284 def _parseTTreeSummary(lines, pos):
1285  """
1286  Parse the TTree summary table in lines, starting from pos.
1287  Returns a tuple with the dictionary with the digested informations and the
1288  position of the first line after the summary.
1289  """
1290  result = {}
1291  i = pos + 1 # first line is a sequence of '*'
1292  count = len(lines)
1293 
1294  def splitcols(l):
1295  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1296 
1297  def parseblock(ll):
1298  r = {}
1299  cols = splitcols(ll[0])
1300  r["Name"], r["Title"] = cols[1:]
1301 
1302  cols = splitcols(ll[1])
1303  r["Entries"] = int(cols[1])
1304 
1305  sizes = cols[2].split()
1306  r["Total size"] = int(sizes[2])
1307  if sizes[-1] == "memory":
1308  r["File size"] = 0
1309  else:
1310  r["File size"] = int(sizes[-1])
1311 
1312  cols = splitcols(ll[2])
1313  sizes = cols[2].split()
1314  if cols[0] == "Baskets":
1315  r["Baskets"] = int(cols[1])
1316  r["Basket size"] = int(sizes[2])
1317  r["Compression"] = float(sizes[-1])
1318  return r
1319 
1320  if i < (count - 3) and lines[i].startswith("*Tree"):
1321  result = parseblock(lines[i : i + 3])
1322  result["Branches"] = {}
1323  i += 4
1324  while i < (count - 3) and lines[i].startswith("*Br"):
1325  if i < (count - 2) and lines[i].startswith("*Branch "):
1326  # skip branch header
1327  i += 3
1328  continue
1329  branch = parseblock(lines[i : i + 3])
1330  result["Branches"][branch["Name"]] = branch
1331  i += 4
1332 
1333  return (result, i)
1334 
1335 
1336 def parseHistosSummary(lines, pos):
1337  """
1338  Extract the histograms infos from the lines starting at pos.
1339  Returns the position of the first line after the summary block.
1340  """
1341  global h_count_re
1342  h_table_head = re.compile(
1343  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1344  )
1345  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1346 
1347  nlines = len(lines)
1348 
1349  # decode header
1350  m = h_count_re.search(lines[pos])
1351  name = m.group(1).strip()
1352  total = int(m.group(2))
1353  header = {}
1354  for k, v in [x.split("=") for x in m.group(3).split()]:
1355  header[k] = int(v)
1356  pos += 1
1357  header["Total"] = total
1358 
1359  summ = {}
1360  while pos < nlines:
1361  m = h_table_head.search(lines[pos])
1362  if m:
1363  t, d = m.groups(1) # type and directory
1364  t = t.replace(" profile", "Prof")
1365  pos += 1
1366  if pos < nlines:
1367  l = lines[pos]
1368  else:
1369  l = ""
1370  cont = {}
1371  if l.startswith(" | ID"):
1372  # table format
1373  titles = [x.strip() for x in l.split("|")][1:]
1374  pos += 1
1375  while pos < nlines and lines[pos].startswith(" |"):
1376  l = lines[pos]
1377  values = [x.strip() for x in l.split("|")][1:]
1378  hcont = {}
1379  for i in range(len(titles)):
1380  hcont[titles[i]] = values[i]
1381  cont[hcont["ID"]] = hcont
1382  pos += 1
1383  elif l.startswith(" ID="):
1384  while pos < nlines and lines[pos].startswith(" ID="):
1385  values = [
1386  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1387  ]
1388  cont[values[0]] = values
1389  pos += 1
1390  else: # not interpreted
1391  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1392  if not d in summ:
1393  summ[d] = {}
1394  summ[d][t] = cont
1395  summ[d]["header"] = header
1396  else:
1397  break
1398  if not summ:
1399  # If the full table is not present, we use only the header
1400  summ[name] = {"header": header}
1401  return summ, pos
1402 
1403 
1405  """
1406  Scan stdout to find ROOT TTree summaries and digest them.
1407  """
1408  outlines = stdout.splitlines()
1409  nlines = len(outlines) - 1
1410  summaries = {}
1411  global h_count_re
1412 
1413  pos = 0
1414  while pos < nlines:
1415  summ = {}
1416  # find first line of block:
1417  match = h_count_re.search(outlines[pos])
1418  while pos < nlines and not match:
1419  pos += 1
1420  match = h_count_re.search(outlines[pos])
1421  if match:
1422  summ, pos = parseHistosSummary(outlines, pos)
1423  summaries.update(summ)
1424  return summaries
1425 
1426 
1427 def GetPlatform(self):
1428  """
1429  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1430  """
1431  arch = "None"
1432  # check architecture name
1433  if "BINARY_TAG" in os.environ:
1434  arch = os.environ["BINARY_TAG"]
1435  elif "CMTCONFIG" in os.environ:
1436  arch = os.environ["CMTCONFIG"]
1437  elif "SCRAM_ARCH" in os.environ:
1438  arch = os.environ["SCRAM_ARCH"]
1439  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1440  "Debug",
1441  "FastDebug",
1442  "Developer",
1443  ):
1444  arch = "dummy-dbg"
1445  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1446  "Release",
1447  "MinSizeRel",
1448  "RelWithDebInfo",
1449  "",
1450  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1451  arch = "dummy-opt"
1452  return arch
1453 
1454 
1455 def isWinPlatform(self):
1456  """
1457  Return True if the current platform is Windows.
1458 
1459  This function was needed because of the change in the CMTCONFIG format,
1460  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1461  """
1462  platform = GetPlatform(self)
1463  return "winxp" in platform or platform.startswith("win")
1464 
1465 
1467  def __call__(self, ref, out, result, detailed=True):
1468  """Validate JSON output.
1469  returns -- A list of strings giving causes of failure."""
1470 
1471  causes = []
1472  try:
1473  with open(ref) as f:
1474  expected = json.load(f)
1475  except json.JSONDecodeError as err:
1476  causes.append("json parser error")
1477  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1478  return causes
1479 
1480  if not detailed:
1481  if expected != out:
1482  causes.append("json content")
1483  result["json_diff"] = "detailed diff was turned off"
1484  return causes
1485 
1486  # piggyback on TestCase dict diff report
1487  t = TestCase()
1488  try:
1489  t.assertEqual(expected, out)
1490  except AssertionError as err:
1491  causes.append("json content")
1492  result["json_diff"] = str(err).splitlines()[0]
1493 
1494  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1180
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:126
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1006
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:114
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:869
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:796
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:823
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:70
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:989
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:884
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:986
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1284
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:991
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:129
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:498
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:53
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:107
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:112
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:822
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:116
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1183
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:540
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1269
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:825
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:131
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:38
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:132
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:876
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:118
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:988
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:117
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:933
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:80
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:805
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:904
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:623
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:900
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:109
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:952
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:130
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1009
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:931
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:111
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:127
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:873
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:907
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:121
Gaudi::Functional::details::get
auto get(const Handle &handle, const Algo &, const EventContext &) -> decltype(details::deref(handle.get()))
Definition: FunctionalDetails.h:444
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:935
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1181
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1336
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:134
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:948
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1455
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:919
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:133
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:748
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1007
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:119
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:793
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:934
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:128
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:918
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:827
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1236
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:794
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:125
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1466
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:962
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1179
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:902
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:677
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:120
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:1001
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:351
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:738
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:914
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1178
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:33
gaudirun.type
type
Definition: gaudirun.py:160
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:110
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:901
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1185
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:824
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:896
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1182
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:932
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:949
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1404
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:800
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:452
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:105
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:408
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:113
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:344
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1467
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:821
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:915
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:778
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:136
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1214
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:845
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:954
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:513
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:115
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:987
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:921
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:730
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:122
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1427
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:124
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: FunctionalDetails.h:102
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:937