The Gaudi Framework  v37r1 (a7f61348)
BaseTest.py
Go to the documentation of this file.
1 
11 
12 import json
13 import logging
14 import os
15 import platform
16 import re
17 import signal
18 import sys
19 import tempfile
20 import threading
21 import time
22 from subprocess import PIPE, STDOUT, Popen
23 from unittest import TestCase
24 
25 try:
26  from html import escape as escape_for_html
27 except ImportError: # Python2
28  from cgi import escape as escape_for_html
29 
30 import six
31 
32 if sys.version_info < (3, 5):
33  # backport of 'backslashreplace' handling of UnicodeDecodeError
34  # to Python < 3.5
35  from codecs import backslashreplace_errors, register_error
36 
38  if isinstance(exc, UnicodeDecodeError):
39  code = hex(ord(exc.object[exc.start]))
40  return ("\\" + code[1:], exc.start + 1)
41  else:
42  return backslashreplace_errors(exc)
43 
44  register_error("backslashreplace", _new_backslashreplace_errors)
45  del register_error
46  del backslashreplace_errors
47  del _new_backslashreplace_errors
48 
49 SKIP_RETURN_CODE = 77
50 
51 
52 def sanitize_for_xml(data):
53  """
54  Take a string with invalid ASCII/UTF characters and quote them so that the
55  string can be used in an XML text.
56 
57  >>> sanitize_for_xml('this is \x1b')
58  'this is [NON-XML-CHAR-0x1B]'
59  """
60  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
61 
62  def quote(match):
63  "helper function"
64  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
65 
66  return bad_chars.sub(quote, data)
67 
68 
69 def dumpProcs(name):
70  """helper to debug GAUDI-1084, dump the list of processes"""
71  from getpass import getuser
72 
73  if "WORKSPACE" in os.environ:
74  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
75  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
76  f.write(p.communicate()[0])
77 
78 
79 def kill_tree(ppid, sig):
80  """
81  Send a signal to a process and all its child processes (starting from the
82  leaves).
83  """
84  log = logging.getLogger("kill_tree")
85  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
86  # Note: start in a clean env to avoid a freeze with libasan.so
87  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
88  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
89  children = map(int, get_children.communicate()[0].split())
90  for child in children:
91  kill_tree(child, sig)
92  try:
93  log.debug("killing process %d", ppid)
94  os.kill(ppid, sig)
95  except OSError as err:
96  if err.errno != 3: # No such process
97  raise
98  log.debug("no such process %d", ppid)
99 
100 
101 # -------------------------------------------------------------------------#
102 
103 
104 class BaseTest(object):
105  _common_tmpdir = None
106 
107  def __init__(self):
108  self.program = ""
109  self.args = []
110  self.reference = ""
111  self.error_reference = ""
112  self.options = ""
113  self.stderr = ""
114  self.timeout = 600
115  self.exit_code = None
116  self.environment = dict(os.environ)
118  self.signal = None
119  self.workdir = os.curdir
120  self.use_temp_dir = False
121  # Variables not for users
122  self.status = None
123  self.name = ""
124  self.causes = []
125  self.result = Result(self)
126  self.returnedCode = 0
127  self.out = ""
128  self.err = ""
129  self.proc = None
130  self.stack_trace = None
131  self.basedir = os.getcwd()
132  self.validate_time = None
133 
134  def run(self):
135  logging.debug("running test %s", self.name)
136 
137  self.result = Result(
138  {
139  "CAUSE": None,
140  "EXCEPTION": None,
141  "RESOURCE": None,
142  "TARGET": None,
143  "TRACEBACK": None,
144  "START_TIME": None,
145  "END_TIME": None,
146  "TIMEOUT_DETAIL": None,
147  }
148  )
149 
150  if self.options:
151  if re.search(
152  r"from\s+Gaudi.Configuration\s+import\s+\*|"
153  r"from\s+Configurables\s+import",
154  self.options,
155  ):
156  suffix, lang = ".py", "python"
157  else:
158  suffix, lang = ".opts", "c++"
159  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
160  lang, escape_for_html(self.options)
161  )
162  optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
163  optionFile.file.write(self.options.encode("utf-8"))
164  optionFile.seek(0)
165  self.args.append(RationalizePath(optionFile.name))
166 
167  platform_id = (
168  self.environment.get("BINARY_TAG")
169  or self.environment.get("CMTCONFIG")
170  or platform.platform()
171  )
172  # If at least one regex matches we skip the test.
173  skip_test = bool(
174  [
175  None
176  for prex in self.unsupported_platforms
177  if re.search(prex, platform_id)
178  ]
179  )
180 
181  if not skip_test:
182  # handle working/temporary directory options
183  workdir = self.workdir
184  if self.use_temp_dir:
185  if self._common_tmpdir:
186  workdir = self._common_tmpdir
187  else:
188  workdir = tempfile.mkdtemp()
189 
190  # prepare the command to execute
191  prog = ""
192  if self.program != "":
193  prog = self.program
194  elif "GAUDIEXE" in self.environment:
195  prog = self.environment["GAUDIEXE"]
196  else:
197  prog = "Gaudi.exe"
198 
199  prog_ext = os.path.splitext(prog)[1]
200  if prog_ext not in [".exe", ".py", ".bat"]:
201  prog += ".exe"
202  prog_ext = ".exe"
203 
204  prog = which(prog) or prog
205 
206  args = list(map(RationalizePath, self.args))
207 
208  if prog_ext == ".py":
209  params = ["python3", RationalizePath(prog)] + args
210  else:
211  params = [RationalizePath(prog)] + args
212 
213  # we need to switch directory because the validator expects to run
214  # in the same dir as the program
215  os.chdir(workdir)
216 
217  # launching test in a different thread to handle timeout exception
218  def target():
219  logging.debug("executing %r in %s", params, workdir)
220  self.proc = Popen(
221  params, stdout=PIPE, stderr=PIPE, env=self.environment
222  )
223  logging.debug("(pid: %d)", self.proc.pid)
224  out, err = self.proc.communicate()
225  self.out = out.decode("utf-8", errors="backslashreplace")
226  self.err = err.decode("utf-8", errors="backslashreplace")
227 
228  thread = threading.Thread(target=target)
229  thread.start()
230  # catching timeout
231  thread.join(self.timeout)
232 
233  if thread.is_alive():
234  logging.debug("time out in test %s (pid %d)", self.name, self.proc.pid)
235  # get the stack trace of the stuck process
236  cmd = [
237  "gdb",
238  "--pid",
239  str(self.proc.pid),
240  "--batch",
241  "--eval-command=thread apply all backtrace",
242  ]
243  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
244  self.stack_trace = gdb.communicate()[0].decode(
245  "utf-8", errors="backslashreplace"
246  )
247 
248  kill_tree(self.proc.pid, signal.SIGTERM)
249  thread.join(60)
250  if thread.is_alive():
251  kill_tree(self.proc.pid, signal.SIGKILL)
252  self.causes.append("timeout")
253  else:
254  self.returnedCode = self.proc.returncode
255  if self.returnedCode != SKIP_RETURN_CODE:
256  logging.debug(
257  f"completed test {self.name} with returncode = {self.returnedCode}"
258  )
259  logging.debug("validating test...")
260  val_start_time = time.perf_counter()
261  self.result, self.causes = self.ValidateOutput(
262  stdout=self.out, stderr=self.err, result=self.result
263  )
264  self.validate_time = round(time.perf_counter() - val_start_time, 2)
265  else:
266  logging.debug(f"skipped test {self.name}")
267  self.status = "skipped"
268 
269  # remove the temporary directory if we created it
270  if self.use_temp_dir and not self._common_tmpdir:
271  shutil.rmtree(workdir, True)
272 
273  os.chdir(self.basedir)
274 
275  if self.status != "skipped":
276  # handle application exit code
277  if self.signal is not None:
278  if int(self.returnedCode) != -int(self.signal):
279  self.causes.append("exit code")
280 
281  elif self.exit_code is not None:
282  if int(self.returnedCode) != int(self.exit_code):
283  self.causes.append("exit code")
284 
285  elif self.returnedCode != 0:
286  self.causes.append("exit code")
287 
288  if self.causes:
289  self.status = "failed"
290  else:
291  self.status = "passed"
292 
293  else:
294  self.status = "skipped"
295 
296  logging.debug("%s: %s", self.name, self.status)
297  field_mapping = {
298  "Exit Code": "returnedCode",
299  "stderr": "err",
300  "Arguments": "args",
301  "Runtime Environment": "environment",
302  "Status": "status",
303  "stdout": "out",
304  "Program Name": "program",
305  "Name": "name",
306  "Validator": "validator",
307  "Validation execution time": "validate_time",
308  "Output Reference File": "reference",
309  "Error Reference File": "error_reference",
310  "Causes": "causes",
311  # 'Validator Result': 'result.annotations',
312  "Unsupported Platforms": "unsupported_platforms",
313  "Stack Trace": "stack_trace",
314  }
315  resultDict = [
316  (key, getattr(self, attr))
317  for key, attr in field_mapping.items()
318  if getattr(self, attr)
319  ]
320  resultDict.append(
321  (
322  "Working Directory",
323  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
324  )
325  )
326  # print(dict(resultDict).keys())
327  resultDict.extend(self.result.annotations.items())
328  # print(self.result.annotations.keys())
329  resultDict = dict(resultDict)
330 
331  # Special cases
332  if "Validator" in resultDict:
333  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
334  "python", escape_for_html(resultDict["Validator"])
335  )
336  return resultDict
337 
338  # -------------------------------------------------#
339  # ----------------Validating tool------------------#
340  # -------------------------------------------------#
341 
342  def ValidateOutput(self, stdout, stderr, result):
343  if not self.stderr:
344  self.validateWithReference(stdout, stderr, result, self.causes)
345  elif stderr.strip() != self.stderr.strip():
346  self.causes.append("standard error")
347  return result, self.causes
348 
350  self,
351  reference=None,
352  stdout=None,
353  result=None,
354  causes=None,
355  signature_offset=0,
356  signature=None,
357  id=None,
358  ):
359  """
360  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
361  """
362 
363  if reference is None:
364  reference = self.reference
365  if stdout is None:
366  stdout = self.out
367  if result is None:
368  result = self.result
369  if causes is None:
370  causes = self.causes
371 
372  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
373  if not reflines:
374  raise RuntimeError("Empty (or null) reference")
375  # the same on standard output
376  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
377 
378  res_field = "GaudiTest.RefBlock"
379  if id:
380  res_field += "_%s" % id
381 
382  if signature is None:
383  if signature_offset < 0:
384  signature_offset = len(reference) + signature_offset
385  signature = reflines[signature_offset]
386  # find the reference block in the output file
387  try:
388  pos = outlines.index(signature)
389  outlines = outlines[
390  pos - signature_offset : pos + len(reflines) - signature_offset
391  ]
392  if reflines != outlines:
393  msg = "standard output"
394  # I do not want 2 messages in causes if the function is called
395  # twice
396  if msg not in causes:
397  causes.append(msg)
398  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
399  except ValueError:
400  causes.append("missing signature")
401  result[res_field + ".signature"] = result.Quote(signature)
402  if len(reflines) > 1 or signature != reflines[0]:
403  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
404  return causes
405 
407  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
408  ):
409  """
410  Count the number of messages with required severity (by default ERROR and FATAL)
411  and check if their numbers match the expected ones (0 by default).
412  The dictionary "expected" can be used to tune the number of errors and fatals
413  allowed, or to limit the number of expected warnings etc.
414  """
415 
416  if stdout is None:
417  stdout = self.out
418  if result is None:
419  result = self.result
420  if causes is None:
421  causes = self.causes
422 
423  # prepare the dictionary to record the extracted lines
424  errors = {}
425  for sev in expected:
426  errors[sev] = []
427 
428  outlines = stdout.splitlines()
429  from math import log10
430 
431  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
432 
433  linecount = 0
434  for l in outlines:
435  linecount += 1
436  words = l.split()
437  if len(words) >= 2 and words[1] in errors:
438  errors[words[1]].append(fmt % (linecount, l.rstrip()))
439 
440  for e in errors:
441  if len(errors[e]) != expected[e]:
442  causes.append("%s(%d)" % (e, len(errors[e])))
443  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
444  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
445  str(expected[e])
446  )
447 
448  return causes
449 
451  self,
452  stdout=None,
453  result=None,
454  causes=None,
455  trees_dict=None,
456  ignore=r"Basket|.*size|Compression",
457  ):
458  """
459  Compare the TTree summaries in stdout with the ones in trees_dict or in
460  the reference file. By default ignore the size, compression and basket
461  fields.
462  The presence of TTree summaries when none is expected is not a failure.
463  """
464  if stdout is None:
465  stdout = self.out
466  if result is None:
467  result = self.result
468  if causes is None:
469  causes = self.causes
470  if trees_dict is None:
471  lreference = self._expandReferenceFileName(self.reference)
472  # call the validator if the file exists
473  if lreference and os.path.isfile(lreference):
474  trees_dict = findTTreeSummaries(open(lreference).read())
475  else:
476  trees_dict = {}
477 
478  from pprint import PrettyPrinter
479 
480  pp = PrettyPrinter()
481  if trees_dict:
482  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
483  if ignore:
484  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
485 
486  trees = findTTreeSummaries(stdout)
487  failed = cmpTreesDicts(trees_dict, trees, ignore)
488  if failed:
489  causes.append("trees summaries")
490  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
491  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
492  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
493 
494  return causes
495 
497  self, stdout=None, result=None, causes=None, dict=None, ignore=None
498  ):
499  """
500  Compare the TTree summaries in stdout with the ones in trees_dict or in
501  the reference file. By default ignore the size, compression and basket
502  fields.
503  The presence of TTree summaries when none is expected is not a failure.
504  """
505  if stdout is None:
506  stdout = self.out
507  if result is None:
508  result = self.result
509  if causes is None:
510  causes = self.causes
511 
512  if dict is None:
513  lreference = self._expandReferenceFileName(self.reference)
514  # call the validator if the file exists
515  if lreference and os.path.isfile(lreference):
516  dict = findHistosSummaries(open(lreference).read())
517  else:
518  dict = {}
519 
520  from pprint import PrettyPrinter
521 
522  pp = PrettyPrinter()
523  if dict:
524  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
525  if ignore:
526  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
527 
528  histos = findHistosSummaries(stdout)
529  failed = cmpTreesDicts(dict, histos, ignore)
530  if failed:
531  causes.append("histos summaries")
532  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
533  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
534  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
535 
536  return causes
537 
539  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
540  ):
541  """
542  Default validation acti*on: compare standard output and error to the
543  reference files.
544  """
545 
546  if stdout is None:
547  stdout = self.out
548  if stderr is None:
549  stderr = self.err
550  if result is None:
551  result = self.result
552  if causes is None:
553  causes = self.causes
554 
555  # set the default output preprocessor
556  if preproc is None:
557  preproc = normalizeExamples
558  # check standard output
559  lreference = self._expandReferenceFileName(self.reference)
560  # call the validator if the file exists
561  if lreference and os.path.isfile(lreference):
562  causes += ReferenceFileValidator(
563  lreference, "standard output", "Output Diff", preproc=preproc
564  )(stdout, result)
565  elif lreference:
566  causes += ["missing reference file"]
567  # Compare TTree summaries
568  causes = self.CheckTTreesSummaries(stdout, result, causes)
569  causes = self.CheckHistosSummaries(stdout, result, causes)
570  if causes and lreference: # Write a new reference file for stdout
571  try:
572  cnt = 0
573  newrefname = ".".join([lreference, "new"])
574  while os.path.exists(newrefname):
575  cnt += 1
576  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
577  newref = open(newrefname, "w")
578  # sanitize newlines
579  for l in stdout.splitlines():
580  newref.write(l.rstrip() + "\n")
581  del newref # flush and close
582  result["New Output Reference File"] = os.path.relpath(
583  newrefname, self.basedir
584  )
585  except IOError:
586  # Ignore IO errors when trying to update reference files
587  # because we may be in a read-only filesystem
588  pass
589 
590  # check standard error
591  lreference = self._expandReferenceFileName(self.error_reference)
592  # call the validator if we have a file to use
593  if lreference:
594  if os.path.isfile(lreference):
595  newcauses = ReferenceFileValidator(
596  lreference, "standard error", "Error Diff", preproc=preproc
597  )(stderr, result)
598  else:
599  newcauses = ["missing error reference file"]
600  causes += newcauses
601  if newcauses and lreference: # Write a new reference file for stdedd
602  cnt = 0
603  newrefname = ".".join([lreference, "new"])
604  while os.path.exists(newrefname):
605  cnt += 1
606  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
607  newref = open(newrefname, "w")
608  # sanitize newlines
609  for l in stderr.splitlines():
610  newref.write(l.rstrip() + "\n")
611  del newref # flush and close
612  result["New Error Reference File"] = os.path.relpath(
613  newrefname, self.basedir
614  )
615  else:
616  causes += BasicOutputValidator(
617  lreference, "standard error", "ExecTest.expected_stderr"
618  )(stderr, result)
619  return causes
620 
622  self,
623  output_file,
624  reference_file,
625  result=None,
626  causes=None,
627  detailed=True,
628  ):
629  """
630  JSON validation action: compare json file to reference file
631  """
632 
633  if result is None:
634  result = self.result
635  if causes is None:
636  causes = self.causes
637 
638  if not os.path.isfile(output_file):
639  causes.append(f"output file {output_file} does not exist")
640  return causes
641 
642  try:
643  with open(output_file) as f:
644  output = json.load(f)
645  except json.JSONDecodeError as err:
646  causes.append("json parser error")
647  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
648  return causes
649 
650  lreference = self._expandReferenceFileName(reference_file)
651  if not lreference:
652  causes.append("reference file not set")
653  elif not os.path.isfile(lreference):
654  causes.append("reference file does not exist")
655  else:
656  causes += JSONOutputValidator()(lreference, output, result, detailed)
657  if causes and lreference: # Write a new reference file for output
658  try:
659  cnt = 0
660  newrefname = ".".join([lreference, "new"])
661  while os.path.exists(newrefname):
662  cnt += 1
663  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
664  with open(newrefname, "w") as newref:
665  json.dump(output, newref, indent=4)
666  result["New JSON Output Reference File"] = os.path.relpath(
667  newrefname, self.basedir
668  )
669  except IOError:
670  # Ignore IO errors when trying to update reference files
671  # because we may be in a read-only filesystem
672  pass
673  return causes
674 
675  def _expandReferenceFileName(self, reffile):
676  # if no file is passed, do nothing
677  if not reffile:
678  return ""
679 
680  # function to split an extension in constituents parts
681  import re
682 
683  def platformSplit(p):
684  return set(re.split(r"[-+]", p))
685 
686  reference = os.path.normpath(
687  os.path.join(self.basedir, os.path.expandvars(reffile))
688  )
689 
690  # old-style platform-specific reference name
691  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
692  if os.path.isfile(spec_ref):
693  reference = spec_ref
694  else: # look for new-style platform specific reference files:
695  # get all the files whose name start with the reference filename
696  dirname, basename = os.path.split(reference)
697  if not dirname:
698  dirname = "."
699  head = basename + "."
700  head_len = len(head)
701  platform = platformSplit(GetPlatform(self))
702  if "do0" in platform:
703  platform.add("dbg")
704  candidates = []
705  for f in os.listdir(dirname):
706  if f.startswith(head):
707  req_plat = platformSplit(f[head_len:])
708  if platform.issuperset(req_plat):
709  candidates.append((len(req_plat), f))
710  if candidates: # take the one with highest matching
711  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
712  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
713  candidates.sort()
714  reference = os.path.join(dirname, candidates[-1][1])
715  return reference
716 
717 
718 # ======= GAUDI TOOLS =======
719 
720 import difflib
721 import shutil
722 
723 try:
724  from GaudiKernel import ROOT6WorkAroundEnabled
725 except ImportError:
726 
728  # dummy implementation
729  return False
730 
731 
732 # --------------------------------- TOOLS ---------------------------------#
733 
734 
736  """
737  Function used to normalize the used path
738  """
739  newPath = os.path.normpath(os.path.expandvars(p))
740  if os.path.exists(newPath):
741  p = os.path.realpath(newPath)
742  return p
743 
744 
745 def which(executable):
746  """
747  Locates an executable in the executables path ($PATH) and returns the full
748  path to it. An application is looked for with or without the '.exe' suffix.
749  If the executable cannot be found, None is returned
750  """
751  if os.path.isabs(executable):
752  if not os.path.isfile(executable):
753  if executable.endswith(".exe"):
754  if os.path.isfile(executable[:-4]):
755  return executable[:-4]
756  else:
757  executable = os.path.split(executable)[1]
758  else:
759  return executable
760  for d in os.environ.get("PATH").split(os.pathsep):
761  fullpath = os.path.join(d, executable)
762  if os.path.isfile(fullpath):
763  return fullpath
764  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
765  return fullpath[:-4]
766  return None
767 
768 
769 # -------------------------------------------------------------------------#
770 # ----------------------------- Result Classe -----------------------------#
771 # -------------------------------------------------------------------------#
772 
773 
774 class Result:
775  PASS = "PASS"
776  FAIL = "FAIL"
777  ERROR = "ERROR"
778  UNTESTED = "UNTESTED"
779 
780  EXCEPTION = ""
781  RESOURCE = ""
782  TARGET = ""
783  TRACEBACK = ""
784  START_TIME = ""
785  END_TIME = ""
786  TIMEOUT_DETAIL = ""
787 
788  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
789  self.annotations = annotations.copy()
790 
791  def __getitem__(self, key):
792  assert isinstance(key, six.string_types)
793  return self.annotations[key]
794 
795  def __setitem__(self, key, value):
796  assert isinstance(key, six.string_types)
797  assert isinstance(value, six.string_types), "{!r} is not a string".format(value)
798  self.annotations[key] = value
799 
800  def Quote(self, text):
801  """
802  Convert text to html by escaping special chars and adding <pre> tags.
803  """
804  return "<pre>{}</pre>".format(escape_for_html(text))
805 
806 
807 # -------------------------------------------------------------------------#
808 # --------------------------- Validator Classes ---------------------------#
809 # -------------------------------------------------------------------------#
810 
811 # Basic implementation of an option validator for Gaudi test. This
812 # implementation is based on the standard (LCG) validation functions used
813 # in QMTest.
814 
815 
817  def __init__(self, ref, cause, result_key):
818  self.ref = ref
819  self.cause = cause
820  self.result_key = result_key
821 
822  def __call__(self, out, result):
823  """Validate the output of the program.
824  'stdout' -- A string containing the data written to the standard output
825  stream.
826  'stderr' -- A string containing the data written to the standard error
827  stream.
828  'result' -- A 'Result' object. It may be used to annotate
829  the outcome according to the content of stderr.
830  returns -- A list of strings giving causes of failure."""
831 
832  causes = []
833  # Check the output
834  if not self.__CompareText(out, self.ref):
835  causes.append(self.cause)
836  result[self.result_key] = result.Quote(self.ref)
837 
838  return causes
839 
840  def __CompareText(self, s1, s2):
841  """Compare 's1' and 's2', ignoring line endings.
842  's1' -- A string.
843  's2' -- A string.
844  returns -- True if 's1' and 's2' are the same, ignoring
845  differences in line endings."""
846  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
847  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
848  # can fix them
849  to_ignore = re.compile(
850  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
851  )
852 
853  def keep_line(l):
854  return not to_ignore.match(l)
855 
856  return list(filter(keep_line, s1.splitlines())) == list(
857  filter(keep_line, s2.splitlines())
858  )
859  else:
860  return s1.splitlines() == s2.splitlines()
861 
862 
863 # ------------------------ Preprocessor elements ------------------------#
865  """Base class for a callable that takes a file and returns a modified
866  version of it."""
867 
868  def __processLine__(self, line):
869  return line
870 
871  def __processFile__(self, lines):
872  output = []
873  for l in lines:
874  l = self.__processLine__(l)
875  if l:
876  output.append(l)
877  return output
878 
879  def __call__(self, input):
880  if not isinstance(input, six.string_types):
881  lines = input
882  mergeback = False
883  else:
884  lines = input.splitlines()
885  mergeback = True
886  output = self.__processFile__(lines)
887  if mergeback:
888  output = "\n".join(output)
889  return output
890 
891  def __add__(self, rhs):
892  return FilePreprocessorSequence([self, rhs])
893 
894 
896  def __init__(self, members=[]):
897  self.members = members
898 
899  def __add__(self, rhs):
900  return FilePreprocessorSequence(self.members + [rhs])
901 
902  def __call__(self, input):
903  output = input
904  for pp in self.members:
905  output = pp(output)
906  return output
907 
908 
910  def __init__(self, strings=[], regexps=[]):
911  import re
912 
913  self.strings = strings
914  self.regexps = list(map(re.compile, regexps))
915 
916  def __processLine__(self, line):
917  for s in self.strings:
918  if line.find(s) >= 0:
919  return None
920  for r in self.regexps:
921  if r.search(line):
922  return None
923  return line
924 
925 
927  def __init__(self, start, end):
928  self.start = start
929  self.end = end
930  self._skipping = False
931 
932  def __processLine__(self, line):
933  if self.start in line:
934  self._skipping = True
935  return None
936  elif self.end in line:
937  self._skipping = False
938  elif self._skipping:
939  return None
940  return line
941 
942 
944  def __init__(self, orig, repl="", when=None):
945  if when:
946  when = re.compile(when)
947  self._operations = [(when, re.compile(orig), repl)]
948 
949  def __add__(self, rhs):
950  if isinstance(rhs, RegexpReplacer):
951  res = RegexpReplacer("", "", None)
952  res._operations = self._operations + rhs._operations
953  else:
954  res = FilePreprocessor.__add__(self, rhs)
955  return res
956 
957  def __processLine__(self, line):
958  for w, o, r in self._operations:
959  if w is None or w.search(line):
960  line = o.sub(r, line)
961  return line
962 
963 
964 # Common preprocessors
965 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
966 normalizeDate = RegexpReplacer(
967  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
968  "00:00:00 1970-01-01",
969 )
970 normalizeEOL = FilePreprocessor()
971 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
972 
973 skipEmptyLines = FilePreprocessor()
974 # FIXME: that's ugly
975 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
976 
977 # Special preprocessor sorting the list of strings (whitespace separated)
978 # that follow a signature on a single line
979 
980 
982  def __init__(self, signature):
983  self.signature = signature
984  self.siglen = len(signature)
985 
986  def __processLine__(self, line):
987  pos = line.find(self.signature)
988  if pos >= 0:
989  line = line[: (pos + self.siglen)]
990  lst = line[(pos + self.siglen) :].split()
991  lst.sort()
992  line += " ".join(lst)
993  return line
994 
995 
997  """
998  Sort group of lines matching a regular expression
999  """
1000 
1001  def __init__(self, exp):
1002  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1003 
1004  def __processFile__(self, lines):
1005  match = self.exp.match
1006  output = []
1007  group = []
1008  for l in lines:
1009  if match(l):
1010  group.append(l)
1011  else:
1012  if group:
1013  group.sort()
1014  output.extend(group)
1015  group = []
1016  output.append(l)
1017  return output
1018 
1019 
1020 # Preprocessors for GaudiExamples
1021 normalizeExamples = maskPointers + normalizeDate
1022 for w, o, r in [
1023  ("TIMER", r"\s+[+-]?[0-9]+[0-9.e+-]*", " 0"), # Normalize time output
1024  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1025  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1026  (
1027  "^JobOptionsSvc.*options successfully read in from",
1028  r"read in from .*[/\\]([^/\\]*)$",
1029  r"file \1",
1030  ), # normalize path to options
1031  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1032  (
1033  None,
1034  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1035  "00000000-0000-0000-0000-000000000000",
1036  ),
1037  # Absorb a change in ServiceLocatorHelper
1038  (
1039  "ServiceLocatorHelper::",
1040  "ServiceLocatorHelper::(create|locate)Service",
1041  "ServiceLocatorHelper::service",
1042  ),
1043  # Remove the leading 0 in Windows' exponential format
1044  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1045  # Output line changed in Gaudi v24
1046  (None, r"Service reference count check:", r"Looping over all active services..."),
1047  # Ignore count of declared properties (anyway they are all printed)
1048  (
1049  None,
1050  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1051  r"\1NN",
1052  ),
1053  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1054  (r"Property \['Name': Value\]", r"( = '[^']+':)'(.*)'", r"\1\2"),
1055  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1056  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1057 ]:
1058  normalizeExamples += RegexpReplacer(o, r, w)
1059 
1060 lineSkipper = LineSkipper(
1061  [
1062  "//GP:",
1063  "JobOptionsSvc INFO # ",
1064  "JobOptionsSvc WARNING # ",
1065  "Time User",
1066  "Welcome to",
1067  "This machine has a speed",
1068  "running on",
1069  "ToolSvc.Sequenc... INFO",
1070  "DataListenerSvc INFO XML written to file:",
1071  "[INFO]",
1072  "[WARNING]",
1073  "DEBUG No writable file catalog found which contains FID:",
1074  "DEBUG Service base class initialized successfully",
1075  # changed between v20 and v21
1076  "DEBUG Incident timing:",
1077  # introduced with patch #3487
1078  # changed the level of the message from INFO to
1079  # DEBUG
1080  "INFO 'CnvServices':[",
1081  # message removed because could be printed in constructor
1082  "DEBUG 'CnvServices':[",
1083  # The signal handler complains about SIGXCPU not
1084  # defined on some platforms
1085  "SIGXCPU",
1086  # Message removed with redesing of JobOptionsSvc
1087  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1088  # Ignore warnings for properties case mismatch
1089  "mismatching case for property name:",
1090  # Message demoted to DEBUG in gaudi/Gaudi!992
1091  "Histograms saving not required.",
1092  # Message added in gaudi/Gaudi!577
1093  "Properties are dumped into",
1094  # Messages changed in gaudi/Gaudi!1426
1095  "WARNING no ROOT output file name",
1096  "INFO Writing ROOT histograms to:",
1097  "INFO Completed update of ROOT histograms in:",
1098  # absorb changes in data dependencies reports (https://gitlab.cern.ch/gaudi/Gaudi/-/merge_requests/1348)
1099  "Data Deps for ",
1100  "data dependencies:",
1101  ],
1102  regexps=[
1103  r"^JobOptionsSvc INFO *$",
1104  r"^# ", # Ignore python comments
1105  # skip the message reporting the version of the root file
1106  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1107  r"File '.*.xml' does not exist",
1108  r"INFO Refer to dataset .* by its file ID:",
1109  r"INFO Referring to dataset .* by its file ID:",
1110  r"INFO Disconnect from dataset",
1111  r"INFO Disconnected from dataset",
1112  r"INFO Disconnected data IO:",
1113  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1114  # Ignore StatusCodeSvc related messages
1115  r".*StatusCodeSvc.*",
1116  r".*StatusCodeCheck.*",
1117  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1118  r"^[-+]*\s*$",
1119  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1120  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1121  # Hide unchecked StatusCodes from dictionaries
1122  r"^ +[0-9]+ \|.*ROOT",
1123  r"^ +[0-9]+ \|.*\|.*Dict",
1124  # Hide EventLoopMgr total timing report
1125  r"EventLoopMgr.*---> Loop Finished",
1126  r"HiveSlimEventLo.*---> Loop Finished",
1127  # Remove ROOT TTree summary table, which changes from one version to the
1128  # other
1129  r"^\*.*\*$",
1130  # Remove Histos Summaries
1131  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1132  r"^ \|",
1133  r"^ ID=",
1134  # Ignore added/removed properties
1135  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1136  r"Property(.*)'Audit(Begin|End)Run':",
1137  # these were missing in tools
1138  r"Property(.*)'AuditRe(start|initialize)':",
1139  r"Property(.*)'Blocking':",
1140  # removed with gaudi/Gaudi!273
1141  r"Property(.*)'ErrorCount(er)?':",
1142  # added with gaudi/Gaudi!306
1143  r"Property(.*)'Sequential':",
1144  # added with gaudi/Gaudi!314
1145  r"Property(.*)'FilterCircularDependencies':",
1146  # removed with gaudi/Gaudi!316
1147  r"Property(.*)'IsClonable':",
1148  # ignore uninteresting/obsolete messages
1149  r"Property update for OutputLevel : new value =",
1150  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1151  ],
1152 )
1153 
1154 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1155  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1156  # fix them
1157  lineSkipper += LineSkipper(
1158  regexps=[
1159  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1160  ]
1161  )
1162 
1163 normalizeExamples = (
1164  lineSkipper
1165  + normalizeExamples
1166  + skipEmptyLines
1167  + normalizeEOL
1168  + LineSorter("Services to release : ")
1169  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1170 )
1171 
1172 # --------------------- Validation functions/classes ---------------------#
1173 
1174 
1176  def __init__(self, reffile, cause, result_key, preproc=normalizeExamples):
1177  self.reffile = os.path.expandvars(reffile)
1178  self.cause = cause
1179  self.result_key = result_key
1180  self.preproc = preproc
1181 
1182  def __call__(self, stdout, result):
1183  causes = []
1184  if os.path.isfile(self.reffile):
1185  orig = open(self.reffile).readlines()
1186  if self.preproc:
1187  orig = self.preproc(orig)
1188  result[self.result_key + ".preproc.orig"] = result.Quote(
1189  "\n".join(map(str.strip, orig))
1190  )
1191  else:
1192  orig = []
1193  new = stdout.splitlines()
1194  if self.preproc:
1195  new = self.preproc(new)
1196 
1197  filterdiffs = list(
1198  difflib.unified_diff(
1199  orig, new, n=1, fromfile="Reference file", tofile="Actual output"
1200  )
1201  )
1202  if filterdiffs:
1203  result[self.result_key] = result.Quote("".join(filterdiffs))
1204  result[self.result_key + ".preproc.new"] = result.Quote(
1205  "\n".join(map(str.strip, new))
1206  )
1207  causes.append(self.cause)
1208  return causes
1209 
1210 
1212  """
1213  Scan stdout to find ROOT TTree summaries and digest them.
1214  """
1215  stars = re.compile(r"^\*+$")
1216  outlines = stdout.splitlines()
1217  nlines = len(outlines)
1218  trees = {}
1219 
1220  i = 0
1221  while i < nlines: # loop over the output
1222  # look for
1223  while i < nlines and not stars.match(outlines[i]):
1224  i += 1
1225  if i < nlines:
1226  tree, i = _parseTTreeSummary(outlines, i)
1227  if tree:
1228  trees[tree["Name"]] = tree
1229 
1230  return trees
1231 
1232 
1233 def cmpTreesDicts(reference, to_check, ignore=None):
1234  """
1235  Check that all the keys in reference are in to_check too, with the same value.
1236  If the value is a dict, the function is called recursively. to_check can
1237  contain more keys than reference, that will not be tested.
1238  The function returns at the first difference found.
1239  """
1240  fail_keys = []
1241  # filter the keys in the reference dictionary
1242  if ignore:
1243  ignore_re = re.compile(ignore)
1244  keys = [key for key in reference if not ignore_re.match(key)]
1245  else:
1246  keys = reference.keys()
1247  # loop over the keys (not ignored) in the reference dictionary
1248  for k in keys:
1249  if k in to_check: # the key must be in the dictionary to_check
1250  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1251  # if both reference and to_check values are dictionaries,
1252  # recurse
1253  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1254  else:
1255  # compare the two values
1256  failed = to_check[k] != reference[k]
1257  else: # handle missing keys in the dictionary to check (i.e. failure)
1258  to_check[k] = None
1259  failed = True
1260  if failed:
1261  fail_keys.insert(0, k)
1262  break # exit from the loop at the first failure
1263  return fail_keys # return the list of keys bringing to the different values
1264 
1265 
1266 def getCmpFailingValues(reference, to_check, fail_path):
1267  c = to_check
1268  r = reference
1269  for k in fail_path:
1270  c = c.get(k, None)
1271  r = r.get(k, None)
1272  if c is None or r is None:
1273  break # one of the dictionaries is not deep enough
1274  return (fail_path, r, c)
1275 
1276 
1277 # signature of the print-out of the histograms
1278 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1279 
1280 
1281 def _parseTTreeSummary(lines, pos):
1282  """
1283  Parse the TTree summary table in lines, starting from pos.
1284  Returns a tuple with the dictionary with the digested informations and the
1285  position of the first line after the summary.
1286  """
1287  result = {}
1288  i = pos + 1 # first line is a sequence of '*'
1289  count = len(lines)
1290 
1291  def splitcols(l):
1292  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1293 
1294  def parseblock(ll):
1295  r = {}
1296  delta_i = 0
1297  cols = splitcols(ll[0])
1298 
1299  if len(ll) == 3:
1300  # default one line name/title
1301  r["Name"], r["Title"] = cols[1:]
1302  elif len(ll) == 4:
1303  # in case title is moved to next line due to too long name
1304  delta_i = 1
1305  r["Name"] = cols[1]
1306  r["Title"] = ll[1].strip("*\n").split("|")[1].strip()
1307  else:
1308  assert False
1309 
1310  cols = splitcols(ll[1 + delta_i])
1311  r["Entries"] = int(cols[1])
1312 
1313  sizes = cols[2].split()
1314  r["Total size"] = int(sizes[2])
1315  if sizes[-1] == "memory":
1316  r["File size"] = 0
1317  else:
1318  r["File size"] = int(sizes[-1])
1319 
1320  cols = splitcols(ll[2 + delta_i])
1321  sizes = cols[2].split()
1322  if cols[0] == "Baskets":
1323  r["Baskets"] = int(cols[1])
1324  r["Basket size"] = int(sizes[2])
1325  r["Compression"] = float(sizes[-1])
1326 
1327  return r
1328 
1329  def nextblock(lines, i):
1330  delta_i = 1
1331  dots = re.compile(r"^\.+$")
1332  stars = re.compile(r"^\*+$")
1333  count = len(lines)
1334  while (
1335  i + delta_i < count
1336  and not dots.match(lines[i + delta_i][1:-1])
1337  and not stars.match(lines[i + delta_i])
1338  ):
1339  delta_i += 1
1340  return i + delta_i
1341 
1342  if i < (count - 3) and lines[i].startswith("*Tree"):
1343  i_nextblock = nextblock(lines, i)
1344  result = parseblock(lines[i:i_nextblock])
1345  result["Branches"] = {}
1346  i = i_nextblock + 1
1347  while i < (count - 3) and lines[i].startswith("*Br"):
1348  if i < (count - 2) and lines[i].startswith("*Branch "):
1349  # skip branch header
1350  i += 3
1351  continue
1352  i_nextblock = nextblock(lines, i)
1353  if i_nextblock >= count:
1354  break
1355  branch = parseblock(lines[i:i_nextblock])
1356  result["Branches"][branch["Name"]] = branch
1357  i = i_nextblock + 1
1358 
1359  return (result, i)
1360 
1361 
1362 def parseHistosSummary(lines, pos):
1363  """
1364  Extract the histograms infos from the lines starting at pos.
1365  Returns the position of the first line after the summary block.
1366  """
1367  global h_count_re
1368  h_table_head = re.compile(
1369  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1370  )
1371  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1372 
1373  nlines = len(lines)
1374 
1375  # decode header
1376  m = h_count_re.search(lines[pos])
1377  name = m.group(1).strip()
1378  total = int(m.group(2))
1379  header = {}
1380  for k, v in [x.split("=") for x in m.group(3).split()]:
1381  header[k] = int(v)
1382  pos += 1
1383  header["Total"] = total
1384 
1385  summ = {}
1386  while pos < nlines:
1387  m = h_table_head.search(lines[pos])
1388  if m:
1389  t, d = m.groups(1) # type and directory
1390  t = t.replace(" profile", "Prof")
1391  pos += 1
1392  if pos < nlines:
1393  l = lines[pos]
1394  else:
1395  l = ""
1396  cont = {}
1397  if l.startswith(" | ID"):
1398  # table format
1399  titles = [x.strip() for x in l.split("|")][1:]
1400  pos += 1
1401  while pos < nlines and lines[pos].startswith(" |"):
1402  l = lines[pos]
1403  values = [x.strip() for x in l.split("|")][1:]
1404  hcont = {}
1405  for i in range(len(titles)):
1406  hcont[titles[i]] = values[i]
1407  cont[hcont["ID"]] = hcont
1408  pos += 1
1409  elif l.startswith(" ID="):
1410  while pos < nlines and lines[pos].startswith(" ID="):
1411  values = [
1412  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1413  ]
1414  cont[values[0]] = values
1415  pos += 1
1416  else: # not interpreted
1417  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1418  if d not in summ:
1419  summ[d] = {}
1420  summ[d][t] = cont
1421  summ[d]["header"] = header
1422  else:
1423  break
1424  if not summ:
1425  # If the full table is not present, we use only the header
1426  summ[name] = {"header": header}
1427  return summ, pos
1428 
1429 
1431  """
1432  Scan stdout to find ROOT TTree summaries and digest them.
1433  """
1434  outlines = stdout.splitlines()
1435  nlines = len(outlines) - 1
1436  summaries = {}
1437  global h_count_re
1438 
1439  pos = 0
1440  while pos < nlines:
1441  summ = {}
1442  # find first line of block:
1443  match = h_count_re.search(outlines[pos])
1444  while pos < nlines and not match:
1445  pos += 1
1446  match = h_count_re.search(outlines[pos])
1447  if match:
1448  summ, pos = parseHistosSummary(outlines, pos)
1449  summaries.update(summ)
1450  return summaries
1451 
1452 
1453 def GetPlatform(self):
1454  """
1455  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1456  """
1457  arch = "None"
1458  # check architecture name
1459  if "BINARY_TAG" in os.environ:
1460  arch = os.environ["BINARY_TAG"]
1461  elif "CMTCONFIG" in os.environ:
1462  arch = os.environ["CMTCONFIG"]
1463  elif "SCRAM_ARCH" in os.environ:
1464  arch = os.environ["SCRAM_ARCH"]
1465  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1466  "Debug",
1467  "FastDebug",
1468  "Developer",
1469  ):
1470  arch = "dummy-dbg"
1471  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1472  "Release",
1473  "MinSizeRel",
1474  "RelWithDebInfo",
1475  "",
1476  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1477  arch = "dummy-opt"
1478  return arch
1479 
1480 
1481 def isWinPlatform(self):
1482  """
1483  Return True if the current platform is Windows.
1484 
1485  This function was needed because of the change in the CMTCONFIG format,
1486  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1487  """
1488  platform = GetPlatform(self)
1489  return "winxp" in platform or platform.startswith("win")
1490 
1491 
1493  def __call__(self, ref, out, result, detailed=True):
1494  """Validate JSON output.
1495  returns -- A list of strings giving causes of failure."""
1496 
1497  causes = []
1498  try:
1499  with open(ref) as f:
1500  expected = json.load(f)
1501  except json.JSONDecodeError as err:
1502  causes.append("json parser error")
1503  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1504  return causes
1505 
1506  if not detailed:
1507  if expected != out:
1508  causes.append("json content")
1509  result["json_diff"] = "detailed diff was turned off"
1510  return causes
1511 
1512  # piggyback on TestCase dict diff report
1513  t = TestCase()
1514  # sort both lists (these are list of entities) as the order is not supposed to matter
1515  # indeed, the JSONSink implementation does not garantee any particular order
1516  # but as JSON does not have sets, we get back a sorted list here
1517  expected = sorted(expected, key=lambda item: (item["component"], item["name"]))
1518  out = sorted(out, key=lambda item: (item["component"], item["name"]))
1519  try:
1520  t.assertEqual(expected, out)
1521  except AssertionError as err:
1522  causes.append("json content")
1523  result["json_diff"] = str(err).splitlines()[0]
1524 
1525  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1177
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:124
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1001
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:112
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:864
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:791
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:818
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:69
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:984
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:879
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:981
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1281
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:986
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:127
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:496
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:52
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:105
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:110
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:817
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:114
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1180
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:538
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1266
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:820
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:129
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:37
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:130
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:871
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:116
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:983
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:115
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:928
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:79
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:800
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:899
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:621
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:895
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:107
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:947
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:128
compareOutputFiles.target
target
Definition: compareOutputFiles.py:489
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1004
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:926
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:109
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:125
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:868
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:902
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:119
Gaudi::Functional::details::get
auto get(const Handle &handle, const Algo &, const EventContext &) -> decltype(details::deref(handle.get()))
Definition: details.h:440
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:930
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1178
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1362
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:132
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:943
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1481
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:914
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:131
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:745
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1002
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:117
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:788
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:929
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:126
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:913
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:822
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1233
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:789
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:123
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1492
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:957
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeExamples)
Definition: BaseTest.py:1176
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:897
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:675
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:118
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:996
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:349
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:735
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:909
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1175
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:33
gaudirun.type
type
Definition: gaudirun.py:162
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:108
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:896
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1182
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:819
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:891
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1179
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:927
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:944
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1430
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:795
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:450
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:104
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:406
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:111
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:342
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1493
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:816
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:910
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:774
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:134
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1211
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:840
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:949
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:507
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:113
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:982
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:916
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:727
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:120
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1453
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:122
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: details.h:98
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:932