Processing math: 100%
The Gaudi Framework  v36r16 (ea80daf8)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 
11 
12 import json
13 import logging
14 import os
15 import platform
16 import re
17 import signal
18 import sys
19 import tempfile
20 import threading
21 import time
22 from subprocess import PIPE, STDOUT, Popen
23 from unittest import TestCase
24 
25 try:
26  from html import escape as escape_for_html
27 except ImportError: # Python2
28  from cgi import escape as escape_for_html
29 
30 import six
31 
32 if sys.version_info < (3, 5):
33  # backport of 'backslashreplace' handling of UnicodeDecodeError
34  # to Python < 3.5
35  from codecs import backslashreplace_errors, register_error
36 
38  if isinstance(exc, UnicodeDecodeError):
39  code = hex(ord(exc.object[exc.start]))
40  return ("\\" + code[1:], exc.start + 1)
41  else:
42  return backslashreplace_errors(exc)
43 
44  register_error("backslashreplace", _new_backslashreplace_errors)
45  del register_error
46  del backslashreplace_errors
47  del _new_backslashreplace_errors
48 
49 SKIP_RETURN_CODE = 77
50 
51 
52 def sanitize_for_xml(data):
53  """
54  Take a string with invalid ASCII/UTF characters and quote them so that the
55  string can be used in an XML text.
56 
57  >>> sanitize_for_xml('this is \x1b')
58  'this is [NON-XML-CHAR-0x1B]'
59  """
60  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
61 
62  def quote(match):
63  "helper function"
64  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
65 
66  return bad_chars.sub(quote, data)
67 
68 
69 def dumpProcs(name):
70  """helper to debug GAUDI-1084, dump the list of processes"""
71  from getpass import getuser
72 
73  if "WORKSPACE" in os.environ:
74  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
75  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
76  f.write(p.communicate()[0])
77 
78 
79 def kill_tree(ppid, sig):
80  """
81  Send a signal to a process and all its child processes (starting from the
82  leaves).
83  """
84  log = logging.getLogger("kill_tree")
85  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
86  # Note: start in a clean env to avoid a freeze with libasan.so
87  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
88  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
89  children = map(int, get_children.communicate()[0].split())
90  for child in children:
91  kill_tree(child, sig)
92  try:
93  log.debug("killing process %d", ppid)
94  os.kill(ppid, sig)
95  except OSError as err:
96  if err.errno != 3: # No such process
97  raise
98  log.debug("no such process %d", ppid)
99 
100 
101 # -------------------------------------------------------------------------#
102 
103 
104 class BaseTest(object):
105 
106  _common_tmpdir = None
107 
108  def __init__(self):
109  self.program = ""
110  self.args = []
111  self.reference = ""
112  self.error_reference = ""
113  self.options = ""
114  self.stderr = ""
115  self.timeout = 600
116  self.exit_code = None
117  self.environment = dict(os.environ)
119  self.signal = None
120  self.workdir = os.curdir
121  self.use_temp_dir = False
122  # Variables not for users
123  self.status = None
124  self.name = ""
125  self.causes = []
126  self.result = Result(self)
127  self.returnedCode = 0
128  self.out = ""
129  self.err = ""
130  self.proc = None
131  self.stack_trace = None
132  self.basedir = os.getcwd()
133  self.validate_time = None
134 
135  def run(self):
136  logging.debug("running test %s", self.name)
137 
138  self.result = Result(
139  {
140  "CAUSE": None,
141  "EXCEPTION": None,
142  "RESOURCE": None,
143  "TARGET": None,
144  "TRACEBACK": None,
145  "START_TIME": None,
146  "END_TIME": None,
147  "TIMEOUT_DETAIL": None,
148  }
149  )
150 
151  if self.options:
152  if re.search(
153  r"from\s+Gaudi.Configuration\s+import\s+\*|"
154  r"from\s+Configurables\s+import",
155  self.options,
156  ):
157  suffix, lang = ".py", "python"
158  else:
159  suffix, lang = ".opts", "c++"
160  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
161  lang, escape_for_html(self.options)
162  )
163  optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
164  optionFile.file.write(self.options.encode("utf-8"))
165  optionFile.seek(0)
166  self.args.append(RationalizePath(optionFile.name))
167 
168  platform_id = (
169  self.environment.get("BINARY_TAG")
170  or self.environment.get("CMTCONFIG")
171  or platform.platform()
172  )
173  # If at least one regex matches we skip the test.
174  skip_test = bool(
175  [
176  None
177  for prex in self.unsupported_platforms
178  if re.search(prex, platform_id)
179  ]
180  )
181 
182  if not skip_test:
183  # handle working/temporary directory options
184  workdir = self.workdir
185  if self.use_temp_dir:
186  if self._common_tmpdir:
187  workdir = self._common_tmpdir
188  else:
189  workdir = tempfile.mkdtemp()
190 
191  # prepare the command to execute
192  prog = ""
193  if self.program != "":
194  prog = self.program
195  elif "GAUDIEXE" in self.environment:
196  prog = self.environment["GAUDIEXE"]
197  else:
198  prog = "Gaudi.exe"
199 
200  prog_ext = os.path.splitext(prog)[1]
201  if prog_ext not in [".exe", ".py", ".bat"]:
202  prog += ".exe"
203  prog_ext = ".exe"
204 
205  prog = which(prog) or prog
206 
207  args = list(map(RationalizePath, self.args))
208 
209  if prog_ext == ".py":
210  params = ["python3", RationalizePath(prog)] + args
211  else:
212  params = [RationalizePath(prog)] + args
213 
214  # we need to switch directory because the validator expects to run
215  # in the same dir as the program
216  os.chdir(workdir)
217 
218  # launching test in a different thread to handle timeout exception
219  def target():
220  logging.debug("executing %r in %s", params, workdir)
221  self.proc = Popen(
222  params, stdout=PIPE, stderr=PIPE, env=self.environment
223  )
224  logging.debug("(pid: %d)", self.proc.pid)
225  out, err = self.proc.communicate()
226  self.out = out.decode("utf-8", errors="backslashreplace")
227  self.err = err.decode("utf-8", errors="backslashreplace")
228 
229  thread = threading.Thread(target=target)
230  thread.start()
231  # catching timeout
232  thread.join(self.timeout)
233 
234  if thread.is_alive():
235  logging.debug("time out in test %s (pid %d)", self.name, self.proc.pid)
236  # get the stack trace of the stuck process
237  cmd = [
238  "gdb",
239  "--pid",
240  str(self.proc.pid),
241  "--batch",
242  "--eval-command=thread apply all backtrace",
243  ]
244  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
245  self.stack_trace = gdb.communicate()[0].decode(
246  "utf-8", errors="backslashreplace"
247  )
248 
249  kill_tree(self.proc.pid, signal.SIGTERM)
250  thread.join(60)
251  if thread.is_alive():
252  kill_tree(self.proc.pid, signal.SIGKILL)
253  self.causes.append("timeout")
254  else:
255  self.returnedCode = self.proc.returncode
256  if self.returnedCode != SKIP_RETURN_CODE:
257  logging.debug(
258  f"completed test {self.name} with returncode = {self.returnedCode}"
259  )
260  logging.debug("validating test...")
261  val_start_time = time.perf_counter()
262  self.result, self.causes = self.ValidateOutput(
263  stdout=self.out, stderr=self.err, result=self.result
264  )
265  self.validate_time = round(time.perf_counter() - val_start_time, 2)
266  else:
267  logging.debug(f"skipped test {self.name}")
268  self.status = "skipped"
269 
270  # remove the temporary directory if we created it
271  if self.use_temp_dir and not self._common_tmpdir:
272  shutil.rmtree(workdir, True)
273 
274  os.chdir(self.basedir)
275 
276  if self.status != "skipped":
277  # handle application exit code
278  if self.signal is not None:
279  if int(self.returnedCode) != -int(self.signal):
280  self.causes.append("exit code")
281 
282  elif self.exit_code is not None:
283  if int(self.returnedCode) != int(self.exit_code):
284  self.causes.append("exit code")
285 
286  elif self.returnedCode != 0:
287  self.causes.append("exit code")
288 
289  if self.causes:
290  self.status = "failed"
291  else:
292  self.status = "passed"
293 
294  else:
295  self.status = "skipped"
296 
297  logging.debug("%s: %s", self.name, self.status)
298  field_mapping = {
299  "Exit Code": "returnedCode",
300  "stderr": "err",
301  "Arguments": "args",
302  "Runtime Environment": "environment",
303  "Status": "status",
304  "stdout": "out",
305  "Program Name": "program",
306  "Name": "name",
307  "Validator": "validator",
308  "Validation execution time": "validate_time",
309  "Output Reference File": "reference",
310  "Error Reference File": "error_reference",
311  "Causes": "causes",
312  # 'Validator Result': 'result.annotations',
313  "Unsupported Platforms": "unsupported_platforms",
314  "Stack Trace": "stack_trace",
315  }
316  resultDict = [
317  (key, getattr(self, attr))
318  for key, attr in field_mapping.items()
319  if getattr(self, attr)
320  ]
321  resultDict.append(
322  (
323  "Working Directory",
324  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
325  )
326  )
327  # print(dict(resultDict).keys())
328  resultDict.extend(self.result.annotations.items())
329  # print(self.result.annotations.keys())
330  resultDict = dict(resultDict)
331 
332  # Special cases
333  if "Validator" in resultDict:
334  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
335  "python", escape_for_html(resultDict["Validator"])
336  )
337  return resultDict
338 
339  # -------------------------------------------------#
340  # ----------------Validating tool------------------#
341  # -------------------------------------------------#
342 
343  def ValidateOutput(self, stdout, stderr, result):
344  if not self.stderr:
345  self.validateWithReference(stdout, stderr, result, self.causes)
346  elif stderr.strip() != self.stderr.strip():
347  self.causes.append("standard error")
348  return result, self.causes
349 
351  self,
352  reference=None,
353  stdout=None,
354  result=None,
355  causes=None,
356  signature_offset=0,
357  signature=None,
358  id=None,
359  ):
360  """
361  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
362  """
363 
364  if reference is None:
365  reference = self.reference
366  if stdout is None:
367  stdout = self.out
368  if result is None:
369  result = self.result
370  if causes is None:
371  causes = self.causes
372 
373  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
374  if not reflines:
375  raise RuntimeError("Empty (or null) reference")
376  # the same on standard output
377  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
378 
379  res_field = "GaudiTest.RefBlock"
380  if id:
381  res_field += "_%s" % id
382 
383  if signature is None:
384  if signature_offset < 0:
385  signature_offset = len(reference) + signature_offset
386  signature = reflines[signature_offset]
387  # find the reference block in the output file
388  try:
389  pos = outlines.index(signature)
390  outlines = outlines[
391  pos - signature_offset : pos + len(reflines) - signature_offset
392  ]
393  if reflines != outlines:
394  msg = "standard output"
395  # I do not want 2 messages in causes if the function is called
396  # twice
397  if msg not in causes:
398  causes.append(msg)
399  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
400  except ValueError:
401  causes.append("missing signature")
402  result[res_field + ".signature"] = result.Quote(signature)
403  if len(reflines) > 1 or signature != reflines[0]:
404  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
405  return causes
406 
408  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
409  ):
410  """
411  Count the number of messages with required severity (by default ERROR and FATAL)
412  and check if their numbers match the expected ones (0 by default).
413  The dictionary "expected" can be used to tune the number of errors and fatals
414  allowed, or to limit the number of expected warnings etc.
415  """
416 
417  if stdout is None:
418  stdout = self.out
419  if result is None:
420  result = self.result
421  if causes is None:
422  causes = self.causes
423 
424  # prepare the dictionary to record the extracted lines
425  errors = {}
426  for sev in expected:
427  errors[sev] = []
428 
429  outlines = stdout.splitlines()
430  from math import log10
431 
432  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
433 
434  linecount = 0
435  for l in outlines:
436  linecount += 1
437  words = l.split()
438  if len(words) >= 2 and words[1] in errors:
439  errors[words[1]].append(fmt % (linecount, l.rstrip()))
440 
441  for e in errors:
442  if len(errors[e]) != expected[e]:
443  causes.append("%s(%d)" % (e, len(errors[e])))
444  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
445  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
446  str(expected[e])
447  )
448 
449  return causes
450 
452  self,
453  stdout=None,
454  result=None,
455  causes=None,
456  trees_dict=None,
457  ignore=r"Basket|.*size|Compression",
458  ):
459  """
460  Compare the TTree summaries in stdout with the ones in trees_dict or in
461  the reference file. By default ignore the size, compression and basket
462  fields.
463  The presence of TTree summaries when none is expected is not a failure.
464  """
465  if stdout is None:
466  stdout = self.out
467  if result is None:
468  result = self.result
469  if causes is None:
470  causes = self.causes
471  if trees_dict is None:
472  lreference = self._expandReferenceFileName(self.reference)
473  # call the validator if the file exists
474  if lreference and os.path.isfile(lreference):
475  trees_dict = findTTreeSummaries(open(lreference).read())
476  else:
477  trees_dict = {}
478 
479  from pprint import PrettyPrinter
480 
481  pp = PrettyPrinter()
482  if trees_dict:
483  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
484  if ignore:
485  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
486 
487  trees = findTTreeSummaries(stdout)
488  failed = cmpTreesDicts(trees_dict, trees, ignore)
489  if failed:
490  causes.append("trees summaries")
491  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
492  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
493  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
494 
495  return causes
496 
498  self, stdout=None, result=None, causes=None, dict=None, ignore=None
499  ):
500  """
501  Compare the TTree summaries in stdout with the ones in trees_dict or in
502  the reference file. By default ignore the size, compression and basket
503  fields.
504  The presence of TTree summaries when none is expected is not a failure.
505  """
506  if stdout is None:
507  stdout = self.out
508  if result is None:
509  result = self.result
510  if causes is None:
511  causes = self.causes
512 
513  if dict is None:
514  lreference = self._expandReferenceFileName(self.reference)
515  # call the validator if the file exists
516  if lreference and os.path.isfile(lreference):
517  dict = findHistosSummaries(open(lreference).read())
518  else:
519  dict = {}
520 
521  from pprint import PrettyPrinter
522 
523  pp = PrettyPrinter()
524  if dict:
525  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
526  if ignore:
527  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
528 
529  histos = findHistosSummaries(stdout)
530  failed = cmpTreesDicts(dict, histos, ignore)
531  if failed:
532  causes.append("histos summaries")
533  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
534  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
535  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
536 
537  return causes
538 
540  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
541  ):
542  """
543  Default validation acti*on: compare standard output and error to the
544  reference files.
545  """
546 
547  if stdout is None:
548  stdout = self.out
549  if stderr is None:
550  stderr = self.err
551  if result is None:
552  result = self.result
553  if causes is None:
554  causes = self.causes
555 
556  # set the default output preprocessor
557  if preproc is None:
558  preproc = normalizeExamples
559  # check standard output
560  lreference = self._expandReferenceFileName(self.reference)
561  # call the validator if the file exists
562  if lreference and os.path.isfile(lreference):
563  causes += ReferenceFileValidator(
564  lreference, "standard output", "Output Diff", preproc=preproc
565  )(stdout, result)
566  elif lreference:
567  causes += ["missing reference file"]
568  # Compare TTree summaries
569  causes = self.CheckTTreesSummaries(stdout, result, causes)
570  causes = self.CheckHistosSummaries(stdout, result, causes)
571  if causes and lreference: # Write a new reference file for stdout
572  try:
573  cnt = 0
574  newrefname = ".".join([lreference, "new"])
575  while os.path.exists(newrefname):
576  cnt += 1
577  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
578  newref = open(newrefname, "w")
579  # sanitize newlines
580  for l in stdout.splitlines():
581  newref.write(l.rstrip() + "\n")
582  del newref # flush and close
583  result["New Output Reference File"] = os.path.relpath(
584  newrefname, self.basedir
585  )
586  except IOError:
587  # Ignore IO errors when trying to update reference files
588  # because we may be in a read-only filesystem
589  pass
590 
591  # check standard error
592  lreference = self._expandReferenceFileName(self.error_reference)
593  # call the validator if we have a file to use
594  if lreference:
595  if os.path.isfile(lreference):
596  newcauses = ReferenceFileValidator(
597  lreference, "standard error", "Error Diff", preproc=preproc
598  )(stderr, result)
599  else:
600  newcauses = ["missing error reference file"]
601  causes += newcauses
602  if newcauses and lreference: # Write a new reference file for stdedd
603  cnt = 0
604  newrefname = ".".join([lreference, "new"])
605  while os.path.exists(newrefname):
606  cnt += 1
607  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
608  newref = open(newrefname, "w")
609  # sanitize newlines
610  for l in stderr.splitlines():
611  newref.write(l.rstrip() + "\n")
612  del newref # flush and close
613  result["New Error Reference File"] = os.path.relpath(
614  newrefname, self.basedir
615  )
616  else:
617  causes += BasicOutputValidator(
618  lreference, "standard error", "ExecTest.expected_stderr"
619  )(stderr, result)
620  return causes
621 
623  self,
624  output_file,
625  reference_file,
626  result=None,
627  causes=None,
628  detailed=True,
629  ):
630  """
631  JSON validation action: compare json file to reference file
632  """
633 
634  if result is None:
635  result = self.result
636  if causes is None:
637  causes = self.causes
638 
639  if not os.path.isfile(output_file):
640  causes.append(f"output file {output_file} does not exist")
641  return causes
642 
643  try:
644  with open(output_file) as f:
645  output = json.load(f)
646  except json.JSONDecodeError as err:
647  causes.append("json parser error")
648  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
649  return causes
650 
651  lreference = self._expandReferenceFileName(reference_file)
652  if not lreference:
653  causes.append("reference file not set")
654  elif not os.path.isfile(lreference):
655  causes.append("reference file does not exist")
656  else:
657  causes += JSONOutputValidator()(lreference, output, result, detailed)
658  if causes and lreference: # Write a new reference file for output
659  try:
660  cnt = 0
661  newrefname = ".".join([lreference, "new"])
662  while os.path.exists(newrefname):
663  cnt += 1
664  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
665  with open(newrefname, "w") as newref:
666  json.dump(output, newref, indent=4)
667  result["New JSON Output Reference File"] = os.path.relpath(
668  newrefname, self.basedir
669  )
670  except IOError:
671  # Ignore IO errors when trying to update reference files
672  # because we may be in a read-only filesystem
673  pass
674  return causes
675 
676  def _expandReferenceFileName(self, reffile):
677  # if no file is passed, do nothing
678  if not reffile:
679  return ""
680 
681  # function to split an extension in constituents parts
682  import re
683 
684  def platformSplit(p):
685  return set(re.split(r"[-+]", p))
686 
687  reference = os.path.normpath(
688  os.path.join(self.basedir, os.path.expandvars(reffile))
689  )
690 
691  # old-style platform-specific reference name
692  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
693  if os.path.isfile(spec_ref):
694  reference = spec_ref
695  else: # look for new-style platform specific reference files:
696  # get all the files whose name start with the reference filename
697  dirname, basename = os.path.split(reference)
698  if not dirname:
699  dirname = "."
700  head = basename + "."
701  head_len = len(head)
702  platform = platformSplit(GetPlatform(self))
703  if "do0" in platform:
704  platform.add("dbg")
705  candidates = []
706  for f in os.listdir(dirname):
707  if f.startswith(head):
708  req_plat = platformSplit(f[head_len:])
709  if platform.issuperset(req_plat):
710  candidates.append((len(req_plat), f))
711  if candidates: # take the one with highest matching
712  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
713  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
714  candidates.sort()
715  reference = os.path.join(dirname, candidates[-1][1])
716  return reference
717 
718 
719 # ======= GAUDI TOOLS =======
720 
721 import difflib
722 import shutil
723 
724 try:
725  from GaudiKernel import ROOT6WorkAroundEnabled
726 except ImportError:
727 
729  # dummy implementation
730  return False
731 
732 
733 # --------------------------------- TOOLS ---------------------------------#
734 
735 
737  """
738  Function used to normalize the used path
739  """
740  newPath = os.path.normpath(os.path.expandvars(p))
741  if os.path.exists(newPath):
742  p = os.path.realpath(newPath)
743  return p
744 
745 
746 def which(executable):
747  """
748  Locates an executable in the executables path ($PATH) and returns the full
749  path to it. An application is looked for with or without the '.exe' suffix.
750  If the executable cannot be found, None is returned
751  """
752  if os.path.isabs(executable):
753  if not os.path.isfile(executable):
754  if executable.endswith(".exe"):
755  if os.path.isfile(executable[:-4]):
756  return executable[:-4]
757  else:
758  executable = os.path.split(executable)[1]
759  else:
760  return executable
761  for d in os.environ.get("PATH").split(os.pathsep):
762  fullpath = os.path.join(d, executable)
763  if os.path.isfile(fullpath):
764  return fullpath
765  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
766  return fullpath[:-4]
767  return None
768 
769 
770 # -------------------------------------------------------------------------#
771 # ----------------------------- Result Classe -----------------------------#
772 # -------------------------------------------------------------------------#
773 
774 
775 class Result:
776 
777  PASS = "PASS"
778  FAIL = "FAIL"
779  ERROR = "ERROR"
780  UNTESTED = "UNTESTED"
781 
782  EXCEPTION = ""
783  RESOURCE = ""
784  TARGET = ""
785  TRACEBACK = ""
786  START_TIME = ""
787  END_TIME = ""
788  TIMEOUT_DETAIL = ""
789 
790  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
791  self.annotations = annotations.copy()
792 
793  def __getitem__(self, key):
794  assert isinstance(key, six.string_types)
795  return self.annotations[key]
796 
797  def __setitem__(self, key, value):
798  assert isinstance(key, six.string_types)
799  assert isinstance(value, six.string_types), "{!r} is not a string".format(value)
800  self.annotations[key] = value
801 
802  def Quote(self, text):
803  """
804  Convert text to html by escaping special chars and adding <pre> tags.
805  """
806  return "<pre>{}</pre>".format(escape_for_html(text))
807 
808 
809 # -------------------------------------------------------------------------#
810 # --------------------------- Validator Classes ---------------------------#
811 # -------------------------------------------------------------------------#
812 
813 # Basic implementation of an option validator for Gaudi test. This
814 # implementation is based on the standard (LCG) validation functions used
815 # in QMTest.
816 
817 
819  def __init__(self, ref, cause, result_key):
820  self.ref = ref
821  self.cause = cause
822  self.result_key = result_key
823 
824  def __call__(self, out, result):
825  """Validate the output of the program.
826  'stdout' -- A string containing the data written to the standard output
827  stream.
828  'stderr' -- A string containing the data written to the standard error
829  stream.
830  'result' -- A 'Result' object. It may be used to annotate
831  the outcome according to the content of stderr.
832  returns -- A list of strings giving causes of failure."""
833 
834  causes = []
835  # Check the output
836  if not self.__CompareText(out, self.ref):
837  causes.append(self.cause)
838  result[self.result_key] = result.Quote(self.ref)
839 
840  return causes
841 
842  def __CompareText(self, s1, s2):
843  """Compare 's1' and 's2', ignoring line endings.
844  's1' -- A string.
845  's2' -- A string.
846  returns -- True if 's1' and 's2' are the same, ignoring
847  differences in line endings."""
848  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
849  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
850  # can fix them
851  to_ignore = re.compile(
852  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
853  )
854 
855  def keep_line(l):
856  return not to_ignore.match(l)
857 
858  return list(filter(keep_line, s1.splitlines())) == list(
859  filter(keep_line, s2.splitlines())
860  )
861  else:
862  return s1.splitlines() == s2.splitlines()
863 
864 
865 # ------------------------ Preprocessor elements ------------------------#
867  """Base class for a callable that takes a file and returns a modified
868  version of it."""
869 
870  def __processLine__(self, line):
871  return line
872 
873  def __processFile__(self, lines):
874  output = []
875  for l in lines:
876  l = self.__processLine__(l)
877  if l:
878  output.append(l)
879  return output
880 
881  def __call__(self, input):
882  if not isinstance(input, six.string_types):
883  lines = input
884  mergeback = False
885  else:
886  lines = input.splitlines()
887  mergeback = True
888  output = self.__processFile__(lines)
889  if mergeback:
890  output = "\n".join(output)
891  return output
892 
893  def __add__(self, rhs):
894  return FilePreprocessorSequence([self, rhs])
895 
896 
898  def __init__(self, members=[]):
899  self.members = members
900 
901  def __add__(self, rhs):
902  return FilePreprocessorSequence(self.members + [rhs])
903 
904  def __call__(self, input):
905  output = input
906  for pp in self.members:
907  output = pp(output)
908  return output
909 
910 
912  def __init__(self, strings=[], regexps=[]):
913  import re
914 
915  self.strings = strings
916  self.regexps = list(map(re.compile, regexps))
917 
918  def __processLine__(self, line):
919  for s in self.strings:
920  if line.find(s) >= 0:
921  return None
922  for r in self.regexps:
923  if r.search(line):
924  return None
925  return line
926 
927 
929  def __init__(self, start, end):
930  self.start = start
931  self.end = end
932  self._skipping = False
933 
934  def __processLine__(self, line):
935  if self.start in line:
936  self._skipping = True
937  return None
938  elif self.end in line:
939  self._skipping = False
940  elif self._skipping:
941  return None
942  return line
943 
944 
946  def __init__(self, orig, repl="", when=None):
947  if when:
948  when = re.compile(when)
949  self._operations = [(when, re.compile(orig), repl)]
950 
951  def __add__(self, rhs):
952  if isinstance(rhs, RegexpReplacer):
953  res = RegexpReplacer("", "", None)
954  res._operations = self._operations + rhs._operations
955  else:
956  res = FilePreprocessor.__add__(self, rhs)
957  return res
958 
959  def __processLine__(self, line):
960  for w, o, r in self._operations:
961  if w is None or w.search(line):
962  line = o.sub(r, line)
963  return line
964 
965 
966 # Common preprocessors
967 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
968 normalizeDate = RegexpReplacer(
969  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
970  "00:00:00 1970-01-01",
971 )
972 normalizeEOL = FilePreprocessor()
973 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
974 
975 skipEmptyLines = FilePreprocessor()
976 # FIXME: that's ugly
977 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
978 
979 # Special preprocessor sorting the list of strings (whitespace separated)
980 # that follow a signature on a single line
981 
982 
984  def __init__(self, signature):
985  self.signature = signature
986  self.siglen = len(signature)
987 
988  def __processLine__(self, line):
989  pos = line.find(self.signature)
990  if pos >= 0:
991  line = line[: (pos + self.siglen)]
992  lst = line[(pos + self.siglen) :].split()
993  lst.sort()
994  line += " ".join(lst)
995  return line
996 
997 
999  """
1000  Sort group of lines matching a regular expression
1001  """
1002 
1003  def __init__(self, exp):
1004  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1005 
1006  def __processFile__(self, lines):
1007  match = self.exp.match
1008  output = []
1009  group = []
1010  for l in lines:
1011  if match(l):
1012  group.append(l)
1013  else:
1014  if group:
1015  group.sort()
1016  output.extend(group)
1017  group = []
1018  output.append(l)
1019  return output
1020 
1021 
1022 # Preprocessors for GaudiExamples
1023 normalizeExamples = maskPointers + normalizeDate
1024 for w, o, r in [
1025  # ("TIMER.TIMER",r"[0-9]", "0"), # Normalize time output
1026  ("TIMER.TIMER", r"\s+[+-]?[0-9]+[0-9.]*", " 0"), # Normalize time output
1027  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1028  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1029  (
1030  "^JobOptionsSvc.*options successfully read in from",
1031  r"read in from .*[/\\]([^/\\]*)$",
1032  r"file \1",
1033  ), # normalize path to options
1034  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1035  (
1036  None,
1037  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1038  "00000000-0000-0000-0000-000000000000",
1039  ),
1040  # Absorb a change in ServiceLocatorHelper
1041  (
1042  "ServiceLocatorHelper::",
1043  "ServiceLocatorHelper::(create|locate)Service",
1044  "ServiceLocatorHelper::service",
1045  ),
1046  # Remove the leading 0 in Windows' exponential format
1047  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1048  # Output line changed in Gaudi v24
1049  (None, r"Service reference count check:", r"Looping over all active services..."),
1050  # Ignore count of declared properties (anyway they are all printed)
1051  (
1052  None,
1053  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1054  r"\1NN",
1055  ),
1056  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1057  (r"Property 'Name': Value", r"( = '[^']+':)'(.*)'", r"\1\2"),
1058  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1059  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1060 ]: # [ ("TIMER.TIMER","[0-9]+[0-9.]*", "") ]
1061  normalizeExamples += RegexpReplacer(o, r, w)
1062 
1063 lineSkipper = LineSkipper(
1064  [
1065  "//GP:",
1066  "JobOptionsSvc INFO # ",
1067  "JobOptionsSvc WARNING # ",
1068  "Time User",
1069  "Welcome to",
1070  "This machine has a speed",
1071  "running on",
1072  "ToolSvc.Sequenc... INFO",
1073  "DataListenerSvc INFO XML written to file:",
1074  "[INFO]",
1075  "[WARNING]",
1076  "DEBUG No writable file catalog found which contains FID:",
1077  "DEBUG Service base class initialized successfully",
1078  # changed between v20 and v21
1079  "DEBUG Incident timing:",
1080  # introduced with patch #3487
1081  # changed the level of the message from INFO to
1082  # DEBUG
1083  "INFO 'CnvServices':[",
1084  # message removed because could be printed in constructor
1085  "DEBUG 'CnvServices':[",
1086  # The signal handler complains about SIGXCPU not
1087  # defined on some platforms
1088  "SIGXCPU",
1089  # Message removed with redesing of JobOptionsSvc
1090  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1091  # Ignore warnings for properties case mismatch
1092  "mismatching case for property name:",
1093  # Message demoted to DEBUG in gaudi/Gaudi!992
1094  "Histograms saving not required.",
1095  # Message added in gaudi/Gaudi!577
1096  "Properties are dumped into",
1097  # Messages changed in gaudi/Gaudi!1426
1098  "WARNING no ROOT output file name",
1099  "INFO Writing ROOT histograms to:",
1100  "INFO Completed update of ROOT histograms in:",
1101  # absorb changes in data dependencies reports (https://gitlab.cern.ch/gaudi/Gaudi/-/merge_requests/1348)
1102  "Data Deps for ",
1103  "data dependencies:",
1104  ],
1105  regexps=[
1106  r"^JobOptionsSvc INFO *$",
1107  r"^# ", # Ignore python comments
1108  # skip the message reporting the version of the root file
1109  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1110  r"File '.*.xml' does not exist",
1111  r"INFO Refer to dataset .* by its file ID:",
1112  r"INFO Referring to dataset .* by its file ID:",
1113  r"INFO Disconnect from dataset",
1114  r"INFO Disconnected from dataset",
1115  r"INFO Disconnected data IO:",
1116  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1117  # Ignore StatusCodeSvc related messages
1118  r".*StatusCodeSvc.*",
1119  r".*StatusCodeCheck.*",
1120  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1121  r"^[-+]*\s*$",
1122  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1123  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1124  # Hide unchecked StatusCodes from dictionaries
1125  r"^ +[0-9]+ \|.*ROOT",
1126  r"^ +[0-9]+ \|.*\|.*Dict",
1127  # Hide EventLoopMgr total timing report
1128  r"EventLoopMgr.*---> Loop Finished",
1129  r"HiveSlimEventLo.*---> Loop Finished",
1130  # Remove ROOT TTree summary table, which changes from one version to the
1131  # other
1132  r"^\*.*\*$",
1133  # Remove Histos Summaries
1134  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1135  r"^ \|",
1136  r"^ ID=",
1137  # Ignore added/removed properties
1138  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1139  r"Property(.*)'Audit(Begin|End)Run':",
1140  # these were missing in tools
1141  r"Property(.*)'AuditRe(start|initialize)':",
1142  r"Property(.*)'Blocking':",
1143  # removed with gaudi/Gaudi!273
1144  r"Property(.*)'ErrorCount(er)?':",
1145  # added with gaudi/Gaudi!306
1146  r"Property(.*)'Sequential':",
1147  # added with gaudi/Gaudi!314
1148  r"Property(.*)'FilterCircularDependencies':",
1149  # removed with gaudi/Gaudi!316
1150  r"Property(.*)'IsClonable':",
1151  # ignore uninteresting/obsolete messages
1152  r"Property update for OutputLevel : new value =",
1153  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1154  ],
1155 )
1156 
1157 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1158  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1159  # fix them
1160  lineSkipper += LineSkipper(
1161  regexps=[
1162  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1163  ]
1164  )
1165 
1166 normalizeExamples = (
1167  lineSkipper
1168  + normalizeExamples
1169  + skipEmptyLines
1170  + normalizeEOL
1171  + LineSorter("Services to release : ")
1172  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1173 )
1174 
1175 # --------------------- Validation functions/classes ---------------------#
1176 
1177 
1179  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1180  self.reffile = os.path.expandvars(reffile)
1181  self.cause = cause
1182  self.result_key = result_key
1183  self.preproc = preproc
1184 
1185  def __call__(self, stdout, result):
1186  causes = []
1187  if os.path.isfile(self.reffile):
1188  orig = open(self.reffile).readlines()
1189  if self.preproc:
1190  orig = self.preproc(orig)
1191  result[self.result_key + ".preproc.orig"] = result.Quote(
1192  "\n".join(map(str.strip, orig))
1193  )
1194  else:
1195  orig = []
1196  new = stdout.splitlines()
1197  if self.preproc:
1198  new = self.preproc(new)
1199 
1200  filterdiffs = list(
1201  difflib.unified_diff(
1202  orig, new, n=1, fromfile="Reference file", tofile="Actual output"
1203  )
1204  )
1205  if filterdiffs:
1206  result[self.result_key] = result.Quote("".join(filterdiffs))
1207  result[self.result_key + ".preproc.new"] = result.Quote(
1208  "\n".join(map(str.strip, new))
1209  )
1210  causes.append(self.cause)
1211  return causes
1212 
1213 
1215  """
1216  Scan stdout to find ROOT TTree summaries and digest them.
1217  """
1218  stars = re.compile(r"^\*+$")
1219  outlines = stdout.splitlines()
1220  nlines = len(outlines)
1221  trees = {}
1222 
1223  i = 0
1224  while i < nlines: # loop over the output
1225  # look for
1226  while i < nlines and not stars.match(outlines[i]):
1227  i += 1
1228  if i < nlines:
1229  tree, i = _parseTTreeSummary(outlines, i)
1230  if tree:
1231  trees[tree["Name"]] = tree
1232 
1233  return trees
1234 
1235 
1236 def cmpTreesDicts(reference, to_check, ignore=None):
1237  """
1238  Check that all the keys in reference are in to_check too, with the same value.
1239  If the value is a dict, the function is called recursively. to_check can
1240  contain more keys than reference, that will not be tested.
1241  The function returns at the first difference found.
1242  """
1243  fail_keys = []
1244  # filter the keys in the reference dictionary
1245  if ignore:
1246  ignore_re = re.compile(ignore)
1247  keys = [key for key in reference if not ignore_re.match(key)]
1248  else:
1249  keys = reference.keys()
1250  # loop over the keys (not ignored) in the reference dictionary
1251  for k in keys:
1252  if k in to_check: # the key must be in the dictionary to_check
1253  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1254  # if both reference and to_check values are dictionaries,
1255  # recurse
1256  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1257  else:
1258  # compare the two values
1259  failed = to_check[k] != reference[k]
1260  else: # handle missing keys in the dictionary to check (i.e. failure)
1261  to_check[k] = None
1262  failed = True
1263  if failed:
1264  fail_keys.insert(0, k)
1265  break # exit from the loop at the first failure
1266  return fail_keys # return the list of keys bringing to the different values
1267 
1268 
1269 def getCmpFailingValues(reference, to_check, fail_path):
1270  c = to_check
1271  r = reference
1272  for k in fail_path:
1273  c = c.get(k, None)
1274  r = r.get(k, None)
1275  if c is None or r is None:
1276  break # one of the dictionaries is not deep enough
1277  return (fail_path, r, c)
1278 
1279 
1280 # signature of the print-out of the histograms
1281 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1282 
1283 
1284 def _parseTTreeSummary(lines, pos):
1285  """
1286  Parse the TTree summary table in lines, starting from pos.
1287  Returns a tuple with the dictionary with the digested informations and the
1288  position of the first line after the summary.
1289  """
1290  result = {}
1291  i = pos + 1 # first line is a sequence of '*'
1292  count = len(lines)
1293 
1294  def splitcols(l):
1295  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1296 
1297  def parseblock(ll):
1298  r = {}
1299  cols = splitcols(ll[0])
1300  r["Name"], r["Title"] = cols[1:]
1301 
1302  cols = splitcols(ll[1])
1303  r["Entries"] = int(cols[1])
1304 
1305  sizes = cols[2].split()
1306  r["Total size"] = int(sizes[2])
1307  if sizes[-1] == "memory":
1308  r["File size"] = 0
1309  else:
1310  r["File size"] = int(sizes[-1])
1311 
1312  cols = splitcols(ll[2])
1313  sizes = cols[2].split()
1314  if cols[0] == "Baskets":
1315  r["Baskets"] = int(cols[1])
1316  r["Basket size"] = int(sizes[2])
1317  r["Compression"] = float(sizes[-1])
1318  return r
1319 
1320  if i < (count - 3) and lines[i].startswith("*Tree"):
1321  result = parseblock(lines[i : i + 3])
1322  result["Branches"] = {}
1323  i += 4
1324  while i < (count - 3) and lines[i].startswith("*Br"):
1325  if i < (count - 2) and lines[i].startswith("*Branch "):
1326  # skip branch header
1327  i += 3
1328  continue
1329  branch = parseblock(lines[i : i + 3])
1330  result["Branches"][branch["Name"]] = branch
1331  i += 4
1332 
1333  return (result, i)
1334 
1335 
1336 def parseHistosSummary(lines, pos):
1337  """
1338  Extract the histograms infos from the lines starting at pos.
1339  Returns the position of the first line after the summary block.
1340  """
1341  global h_count_re
1342  h_table_head = re.compile(
1343  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1344  )
1345  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1346 
1347  nlines = len(lines)
1348 
1349  # decode header
1350  m = h_count_re.search(lines[pos])
1351  name = m.group(1).strip()
1352  total = int(m.group(2))
1353  header = {}
1354  for k, v in [x.split("=") for x in m.group(3).split()]:
1355  header[k] = int(v)
1356  pos += 1
1357  header["Total"] = total
1358 
1359  summ = {}
1360  while pos < nlines:
1361  m = h_table_head.search(lines[pos])
1362  if m:
1363  t, d = m.groups(1) # type and directory
1364  t = t.replace(" profile", "Prof")
1365  pos += 1
1366  if pos < nlines:
1367  l = lines[pos]
1368  else:
1369  l = ""
1370  cont = {}
1371  if l.startswith(" | ID"):
1372  # table format
1373  titles = [x.strip() for x in l.split("|")][1:]
1374  pos += 1
1375  while pos < nlines and lines[pos].startswith(" |"):
1376  l = lines[pos]
1377  values = [x.strip() for x in l.split("|")][1:]
1378  hcont = {}
1379  for i in range(len(titles)):
1380  hcont[titles[i]] = values[i]
1381  cont[hcont["ID"]] = hcont
1382  pos += 1
1383  elif l.startswith(" ID="):
1384  while pos < nlines and lines[pos].startswith(" ID="):
1385  values = [
1386  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1387  ]
1388  cont[values[0]] = values
1389  pos += 1
1390  else: # not interpreted
1391  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1392  if d not in summ:
1393  summ[d] = {}
1394  summ[d][t] = cont
1395  summ[d]["header"] = header
1396  else:
1397  break
1398  if not summ:
1399  # If the full table is not present, we use only the header
1400  summ[name] = {"header": header}
1401  return summ, pos
1402 
1403 
1405  """
1406  Scan stdout to find ROOT TTree summaries and digest them.
1407  """
1408  outlines = stdout.splitlines()
1409  nlines = len(outlines) - 1
1410  summaries = {}
1411  global h_count_re
1412 
1413  pos = 0
1414  while pos < nlines:
1415  summ = {}
1416  # find first line of block:
1417  match = h_count_re.search(outlines[pos])
1418  while pos < nlines and not match:
1419  pos += 1
1420  match = h_count_re.search(outlines[pos])
1421  if match:
1422  summ, pos = parseHistosSummary(outlines, pos)
1423  summaries.update(summ)
1424  return summaries
1425 
1426 
1427 def GetPlatform(self):
1428  """
1429  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1430  """
1431  arch = "None"
1432  # check architecture name
1433  if "BINARY_TAG" in os.environ:
1434  arch = os.environ["BINARY_TAG"]
1435  elif "CMTCONFIG" in os.environ:
1436  arch = os.environ["CMTCONFIG"]
1437  elif "SCRAM_ARCH" in os.environ:
1438  arch = os.environ["SCRAM_ARCH"]
1439  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1440  "Debug",
1441  "FastDebug",
1442  "Developer",
1443  ):
1444  arch = "dummy-dbg"
1445  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1446  "Release",
1447  "MinSizeRel",
1448  "RelWithDebInfo",
1449  "",
1450  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1451  arch = "dummy-opt"
1452  return arch
1453 
1454 
1455 def isWinPlatform(self):
1456  """
1457  Return True if the current platform is Windows.
1458 
1459  This function was needed because of the change in the CMTCONFIG format,
1460  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1461  """
1462  platform = GetPlatform(self)
1463  return "winxp" in platform or platform.startswith("win")
1464 
1465 
1467  def __call__(self, ref, out, result, detailed=True):
1468  """Validate JSON output.
1469  returns -- A list of strings giving causes of failure."""
1470 
1471  causes = []
1472  try:
1473  with open(ref) as f:
1474  expected = json.load(f)
1475  except json.JSONDecodeError as err:
1476  causes.append("json parser error")
1477  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1478  return causes
1479 
1480  if not detailed:
1481  if expected != out:
1482  causes.append("json content")
1483  result["json_diff"] = "detailed diff was turned off"
1484  return causes
1485 
1486  # piggyback on TestCase dict diff report
1487  t = TestCase()
1488  try:
1489  t.assertEqual(expected, out)
1490  except AssertionError as err:
1491  causes.append("json content")
1492  result["json_diff"] = str(err).splitlines()[0]
1493 
1494  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1180
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:125
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1003
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:113
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:866
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:793
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:820
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:69
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:986
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:881
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:983
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1284
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:988
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:128
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:497
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:52
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:106
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:111
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:819
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:115
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1183
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:539
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1269
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:822
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:130
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:37
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:131
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:873
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:117
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:985
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:116
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:930
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:79
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:802
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:901
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:622
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:897
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:108
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:949
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:129
compareOutputFiles.target
target
Definition: compareOutputFiles.py:498
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1006
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:928
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:110
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:126
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:870
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:904
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:120
Gaudi::Functional::details::get
auto get(const Handle &handle, const Algo &, const EventContext &) -> decltype(details::deref(handle.get()))
Definition: FunctionalDetails.h:444
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:932
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1181
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1336
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:133
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:945
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1455
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:916
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:132
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:746
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1004
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:118
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:790
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:931
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:127
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:915
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:824
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1236
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:791
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:124
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1466
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:959
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1179
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:899
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:676
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:119
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:998
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:350
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:736
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:911
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1178
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:33
gaudirun.type
type
Definition: gaudirun.py:162
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:109
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:898
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1185
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:821
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:893
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1182
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:929
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:946
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1404
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:797
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:451
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:104
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:407
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:112
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:343
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1467
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:818
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:912
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:775
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:135
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1214
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:842
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:951
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:516
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:114
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:984
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:918
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:728
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:121
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1427
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:123
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: FunctionalDetails.h:102
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:934