Processing math: 100%
The Gaudi Framework  v38r1p1 (ae26267b)
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Modules Pages
BaseTest.py
Go to the documentation of this file.
1 
11 
12 import json
13 import logging
14 import os
15 import platform
16 import re
17 import signal
18 import sys
19 import tempfile
20 import threading
21 import time
22 from html import escape as escape_for_html
23 from subprocess import PIPE, STDOUT, Popen
24 from unittest import TestCase
25 
26 if sys.version_info < (3, 5):
27  # backport of 'backslashreplace' handling of UnicodeDecodeError
28  # to Python < 3.5
29  from codecs import backslashreplace_errors, register_error
30 
32  if isinstance(exc, UnicodeDecodeError):
33  code = hex(ord(exc.object[exc.start]))
34  return ("\\" + code[1:], exc.start + 1)
35  else:
36  return backslashreplace_errors(exc)
37 
38  register_error("backslashreplace", _new_backslashreplace_errors)
39  del register_error
40  del backslashreplace_errors
41  del _new_backslashreplace_errors
42 
43 SKIP_RETURN_CODE = 77
44 
45 
46 def sanitize_for_xml(data):
47  """
48  Take a string with invalid ASCII/UTF characters and quote them so that the
49  string can be used in an XML text.
50 
51  >>> sanitize_for_xml('this is \x1b')
52  'this is [NON-XML-CHAR-0x1B]'
53  """
54  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1F\uD800-\uDFFF\uFFFE\uFFFF]")
55 
56  def quote(match):
57  "helper function"
58  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
59 
60  return bad_chars.sub(quote, data)
61 
62 
63 def dumpProcs(name):
64  """helper to debug GAUDI-1084, dump the list of processes"""
65  from getpass import getuser
66 
67  if "WORKSPACE" in os.environ:
68  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
69  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
70  f.write(p.communicate()[0])
71 
72 
73 def kill_tree(ppid, sig):
74  """
75  Send a signal to a process and all its child processes (starting from the
76  leaves).
77  """
78  log = logging.getLogger("kill_tree")
79  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
80  # Note: start in a clean env to avoid a freeze with libasan.so
81  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
82  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
83  children = map(int, get_children.communicate()[0].split())
84  for child in children:
85  kill_tree(child, sig)
86  try:
87  log.debug("killing process %d", ppid)
88  os.kill(ppid, sig)
89  except OSError as err:
90  if err.errno != 3: # No such process
91  raise
92  log.debug("no such process %d", ppid)
93 
94 
95 # -------------------------------------------------------------------------#
96 
97 
98 class BaseTest(object):
99  _common_tmpdir = None
100 
101  def __init__(self):
102  self.program = ""
103  self.args = []
104  self.reference = ""
105  self.error_reference = ""
106  self.options = ""
107  self.stderr = ""
108  self.timeout = 600
109  self.exit_code = None
110  self.environment = dict(os.environ)
112  self.signal = None
113  self.workdir = os.curdir
114  self.use_temp_dir = False
115  # Variables not for users
116  self.status = None
117  self.name = ""
118  self.causes = []
119  self.result = Result(self)
120  self.returnedCode = 0
121  self.out = ""
122  self.err = ""
123  self.proc = None
124  self.stack_trace = None
125  self.basedir = os.getcwd()
126  self.validate_time = None
127 
128  def run(self):
129  logging.debug("running test %s", self.name)
130 
131  self.result = Result(
132  {
133  "CAUSE": None,
134  "EXCEPTION": None,
135  "RESOURCE": None,
136  "TARGET": None,
137  "TRACEBACK": None,
138  "START_TIME": None,
139  "END_TIME": None,
140  "TIMEOUT_DETAIL": None,
141  }
142  )
143 
144  if self.options:
145  if re.search(
146  r"from\s+Gaudi.Configuration\s+import\s+\*|"
147  r"from\s+Configurables\s+import",
148  self.options,
149  ):
150  suffix, lang = ".py", "python"
151  else:
152  suffix, lang = ".opts", "c++"
153  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
154  lang, escape_for_html(self.options)
155  )
156  optionFile = tempfile.NamedTemporaryFile(suffix=suffix)
157  optionFile.file.write(self.options.encode("utf-8"))
158  optionFile.seek(0)
159  self.args.append(RationalizePath(optionFile.name))
160 
161  platform_id = (
162  self.environment.get("BINARY_TAG")
163  or self.environment.get("CMTCONFIG")
164  or platform.platform()
165  )
166  # If at least one regex matches we skip the test.
167  skip_test = bool(
168  [
169  None
170  for prex in self.unsupported_platforms
171  if re.search(prex, platform_id)
172  ]
173  )
174 
175  if not skip_test:
176  # handle working/temporary directory options
177  workdir = self.workdir
178  if self.use_temp_dir:
179  if self._common_tmpdir:
180  workdir = self._common_tmpdir
181  else:
182  workdir = tempfile.mkdtemp()
183 
184  # prepare the command to execute
185  prog = ""
186  if self.program != "":
187  prog = self.program
188  elif "GAUDIEXE" in self.environment:
189  prog = self.environment["GAUDIEXE"]
190  else:
191  prog = "Gaudi.exe"
192 
193  prog_ext = os.path.splitext(prog)[1]
194  if prog_ext not in [".exe", ".py", ".bat"]:
195  prog += ".exe"
196  prog_ext = ".exe"
197 
198  prog = which(prog) or prog
199 
200  args = list(map(RationalizePath, self.args))
201 
202  if prog_ext == ".py":
203  params = ["python3", RationalizePath(prog)] + args
204  else:
205  params = [RationalizePath(prog)] + args
206 
207  # we need to switch directory because the validator expects to run
208  # in the same dir as the program
209  os.chdir(workdir)
210 
211  # launching test in a different thread to handle timeout exception
212  def target():
213  logging.debug("executing %r in %s", params, workdir)
214  self.proc = Popen(
215  params, stdout=PIPE, stderr=PIPE, env=self.environment
216  )
217  logging.debug("(pid: %d)", self.proc.pid)
218  out, err = self.proc.communicate()
219  self.out = out.decode("utf-8", errors="backslashreplace")
220  self.err = err.decode("utf-8", errors="backslashreplace")
221 
222  thread = threading.Thread(target=target)
223  thread.start()
224  # catching timeout
225  thread.join(self.timeout)
226 
227  if thread.is_alive():
228  logging.debug("time out in test %s (pid %d)", self.name, self.proc.pid)
229  # get the stack trace of the stuck process
230  cmd = [
231  "gdb",
232  "--pid",
233  str(self.proc.pid),
234  "--batch",
235  "--eval-command=thread apply all backtrace",
236  ]
237  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
238  self.stack_trace = gdb.communicate()[0].decode(
239  "utf-8", errors="backslashreplace"
240  )
241 
242  kill_tree(self.proc.pid, signal.SIGTERM)
243  thread.join(60)
244  if thread.is_alive():
245  kill_tree(self.proc.pid, signal.SIGKILL)
246  self.causes.append("timeout")
247  else:
248  self.returnedCode = self.proc.returncode
249  if self.returnedCode != SKIP_RETURN_CODE:
250  logging.debug(
251  f"completed test {self.name} with returncode = {self.returnedCode}"
252  )
253  logging.debug("validating test...")
254  val_start_time = time.perf_counter()
255  self.result, self.causes = self.ValidateOutput(
256  stdout=self.out, stderr=self.err, result=self.result
257  )
258  self.validate_time = round(time.perf_counter() - val_start_time, 2)
259  else:
260  logging.debug(f"skipped test {self.name}")
261  self.status = "skipped"
262 
263  # remove the temporary directory if we created it
264  if self.use_temp_dir and not self._common_tmpdir:
265  shutil.rmtree(workdir, True)
266 
267  os.chdir(self.basedir)
268 
269  if self.status != "skipped":
270  # handle application exit code
271  if self.signal is not None:
272  if int(self.returnedCode) != -int(self.signal):
273  self.causes.append("exit code")
274 
275  elif self.exit_code is not None:
276  if int(self.returnedCode) != int(self.exit_code):
277  self.causes.append("exit code")
278 
279  elif self.returnedCode != 0:
280  self.causes.append("exit code")
281 
282  if self.causes:
283  self.status = "failed"
284  else:
285  self.status = "passed"
286 
287  else:
288  self.status = "skipped"
289 
290  logging.debug("%s: %s", self.name, self.status)
291  field_mapping = {
292  "Exit Code": "returnedCode",
293  "stderr": "err",
294  "Arguments": "args",
295  "Runtime Environment": "environment",
296  "Status": "status",
297  "stdout": "out",
298  "Program Name": "program",
299  "Name": "name",
300  "Validator": "validator",
301  "Validation execution time": "validate_time",
302  "Output Reference File": "reference",
303  "Error Reference File": "error_reference",
304  "Causes": "causes",
305  # 'Validator Result': 'result.annotations',
306  "Unsupported Platforms": "unsupported_platforms",
307  "Stack Trace": "stack_trace",
308  }
309  resultDict = [
310  (key, getattr(self, attr))
311  for key, attr in field_mapping.items()
312  if getattr(self, attr)
313  ]
314  resultDict.append(
315  (
316  "Working Directory",
317  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
318  )
319  )
320  # print(dict(resultDict).keys())
321  resultDict.extend(self.result.annotations.items())
322  # print(self.result.annotations.keys())
323  resultDict = dict(resultDict)
324 
325  # Special cases
326  if "Validator" in resultDict:
327  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
328  "python", escape_for_html(resultDict["Validator"])
329  )
330  return resultDict
331 
332  # -------------------------------------------------#
333  # ----------------Validating tool------------------#
334  # -------------------------------------------------#
335 
336  def ValidateOutput(self, stdout, stderr, result):
337  if not self.stderr:
338  self.validateWithReference(stdout, stderr, result, self.causes)
339  elif stderr.strip() != self.stderr.strip():
340  self.causes.append("standard error")
341  return result, self.causes
342 
344  self,
345  reference=None,
346  stdout=None,
347  result=None,
348  causes=None,
349  signature_offset=0,
350  signature=None,
351  id=None,
352  ):
353  """
354  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
355  """
356 
357  if reference is None:
358  reference = self.reference
359  if stdout is None:
360  stdout = self.out
361  if result is None:
362  result = self.result
363  if causes is None:
364  causes = self.causes
365 
366  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
367  if not reflines:
368  raise RuntimeError("Empty (or null) reference")
369  # the same on standard output
370  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
371 
372  res_field = "GaudiTest.RefBlock"
373  if id:
374  res_field += "_%s" % id
375 
376  if signature is None:
377  if signature_offset < 0:
378  signature_offset = len(reference) + signature_offset
379  signature = reflines[signature_offset]
380  # find the reference block in the output file
381  try:
382  pos = outlines.index(signature)
383  outlines = outlines[
384  pos - signature_offset : pos + len(reflines) - signature_offset
385  ]
386  if reflines != outlines:
387  msg = "standard output"
388  # I do not want 2 messages in causes if the function is called
389  # twice
390  if msg not in causes:
391  causes.append(msg)
392  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
393  except ValueError:
394  causes.append("missing signature")
395  result[res_field + ".signature"] = result.Quote(signature)
396  if len(reflines) > 1 or signature != reflines[0]:
397  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
398  return causes
399 
401  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
402  ):
403  """
404  Count the number of messages with required severity (by default ERROR and FATAL)
405  and check if their numbers match the expected ones (0 by default).
406  The dictionary "expected" can be used to tune the number of errors and fatals
407  allowed, or to limit the number of expected warnings etc.
408  """
409 
410  if stdout is None:
411  stdout = self.out
412  if result is None:
413  result = self.result
414  if causes is None:
415  causes = self.causes
416 
417  # prepare the dictionary to record the extracted lines
418  errors = {}
419  for sev in expected:
420  errors[sev] = []
421 
422  outlines = stdout.splitlines()
423  from math import log10
424 
425  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
426 
427  linecount = 0
428  for l in outlines:
429  linecount += 1
430  words = l.split()
431  if len(words) >= 2 and words[1] in errors:
432  errors[words[1]].append(fmt % (linecount, l.rstrip()))
433 
434  for e in errors:
435  if len(errors[e]) != expected[e]:
436  causes.append("%s(%d)" % (e, len(errors[e])))
437  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
438  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
439  str(expected[e])
440  )
441 
442  return causes
443 
445  self,
446  stdout=None,
447  result=None,
448  causes=None,
449  trees_dict=None,
450  ignore=r"Basket|.*size|Compression",
451  ):
452  """
453  Compare the TTree summaries in stdout with the ones in trees_dict or in
454  the reference file. By default ignore the size, compression and basket
455  fields.
456  The presence of TTree summaries when none is expected is not a failure.
457  """
458  if stdout is None:
459  stdout = self.out
460  if result is None:
461  result = self.result
462  if causes is None:
463  causes = self.causes
464  if trees_dict is None:
465  lreference = self._expandReferenceFileName(self.reference)
466  # call the validator if the file exists
467  if lreference and os.path.isfile(lreference):
468  trees_dict = findTTreeSummaries(open(lreference).read())
469  else:
470  trees_dict = {}
471 
472  from pprint import PrettyPrinter
473 
474  pp = PrettyPrinter()
475  if trees_dict:
476  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
477  if ignore:
478  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
479 
480  trees = findTTreeSummaries(stdout)
481  failed = cmpTreesDicts(trees_dict, trees, ignore)
482  if failed:
483  causes.append("trees summaries")
484  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
485  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
486  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
487 
488  return causes
489 
491  self, stdout=None, result=None, causes=None, dict=None, ignore=None
492  ):
493  """
494  Compare the TTree summaries in stdout with the ones in trees_dict or in
495  the reference file. By default ignore the size, compression and basket
496  fields.
497  The presence of TTree summaries when none is expected is not a failure.
498  """
499  if stdout is None:
500  stdout = self.out
501  if result is None:
502  result = self.result
503  if causes is None:
504  causes = self.causes
505 
506  if dict is None:
507  lreference = self._expandReferenceFileName(self.reference)
508  # call the validator if the file exists
509  if lreference and os.path.isfile(lreference):
510  dict = findHistosSummaries(open(lreference).read())
511  else:
512  dict = {}
513 
514  from pprint import PrettyPrinter
515 
516  pp = PrettyPrinter()
517  if dict:
518  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
519  if ignore:
520  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
521 
522  histos = findHistosSummaries(stdout)
523  failed = cmpTreesDicts(dict, histos, ignore)
524  if failed:
525  causes.append("histos summaries")
526  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
527  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
528  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
529 
530  return causes
531 
533  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
534  ):
535  """
536  Default validation acti*on: compare standard output and error to the
537  reference files.
538  """
539 
540  if stdout is None:
541  stdout = self.out
542  if stderr is None:
543  stderr = self.err
544  if result is None:
545  result = self.result
546  if causes is None:
547  causes = self.causes
548 
549  # set the default output preprocessor
550  if preproc is None:
551  preproc = normalizeTestSuite
552  # check standard output
553  lreference = self._expandReferenceFileName(self.reference)
554  # call the validator if the file exists
555  if lreference and os.path.isfile(lreference):
556  causes += ReferenceFileValidator(
557  lreference, "standard output", "Output Diff", preproc=preproc
558  )(stdout, result)
559  elif lreference:
560  causes += ["missing reference file"]
561  # Compare TTree summaries
562  causes = self.CheckTTreesSummaries(stdout, result, causes)
563  causes = self.CheckHistosSummaries(stdout, result, causes)
564  if causes and lreference: # Write a new reference file for stdout
565  try:
566  cnt = 0
567  newrefname = ".".join([lreference, "new"])
568  while os.path.exists(newrefname):
569  cnt += 1
570  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
571  newref = open(newrefname, "w")
572  # sanitize newlines
573  for l in stdout.splitlines():
574  newref.write(l.rstrip() + "\n")
575  del newref # flush and close
576  result["New Output Reference File"] = os.path.relpath(
577  newrefname, self.basedir
578  )
579  except IOError:
580  # Ignore IO errors when trying to update reference files
581  # because we may be in a read-only filesystem
582  pass
583 
584  # check standard error
585  lreference = self._expandReferenceFileName(self.error_reference)
586  # call the validator if we have a file to use
587  if lreference:
588  if os.path.isfile(lreference):
589  newcauses = ReferenceFileValidator(
590  lreference, "standard error", "Error Diff", preproc=preproc
591  )(stderr, result)
592  else:
593  newcauses = ["missing error reference file"]
594  causes += newcauses
595  if newcauses and lreference: # Write a new reference file for stdedd
596  cnt = 0
597  newrefname = ".".join([lreference, "new"])
598  while os.path.exists(newrefname):
599  cnt += 1
600  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
601  newref = open(newrefname, "w")
602  # sanitize newlines
603  for l in stderr.splitlines():
604  newref.write(l.rstrip() + "\n")
605  del newref # flush and close
606  result["New Error Reference File"] = os.path.relpath(
607  newrefname, self.basedir
608  )
609  else:
610  causes += BasicOutputValidator(
611  lreference, "standard error", "ExecTest.expected_stderr"
612  )(stderr, result)
613  return causes
614 
616  self,
617  output_file,
618  reference_file,
619  result=None,
620  causes=None,
621  detailed=True,
622  ):
623  """
624  JSON validation action: compare json file to reference file
625  """
626 
627  if result is None:
628  result = self.result
629  if causes is None:
630  causes = self.causes
631 
632  if not os.path.isfile(output_file):
633  causes.append(f"output file {output_file} does not exist")
634  return causes
635 
636  try:
637  with open(output_file) as f:
638  output = json.load(f)
639  except json.JSONDecodeError as err:
640  causes.append("json parser error")
641  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
642  return causes
643 
644  lreference = self._expandReferenceFileName(reference_file)
645  if not lreference:
646  causes.append("reference file not set")
647  elif not os.path.isfile(lreference):
648  causes.append("reference file does not exist")
649  else:
650  causes += JSONOutputValidator()(lreference, output, result, detailed)
651  if causes and lreference: # Write a new reference file for output
652  try:
653  cnt = 0
654  newrefname = ".".join([lreference, "new"])
655  while os.path.exists(newrefname):
656  cnt += 1
657  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
658  with open(newrefname, "w") as newref:
659  json.dump(output, newref, indent=4)
660  result["New JSON Output Reference File"] = os.path.relpath(
661  newrefname, self.basedir
662  )
663  except IOError:
664  # Ignore IO errors when trying to update reference files
665  # because we may be in a read-only filesystem
666  pass
667  return causes
668 
669  def _expandReferenceFileName(self, reffile):
670  # if no file is passed, do nothing
671  if not reffile:
672  return ""
673 
674  # function to split an extension in constituents parts
675  import re
676 
677  def platformSplit(p):
678  return set(re.split(r"[-+]", p))
679 
680  reference = os.path.normpath(
681  os.path.join(self.basedir, os.path.expandvars(reffile))
682  )
683 
684  # old-style platform-specific reference name
685  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
686  if os.path.isfile(spec_ref):
687  reference = spec_ref
688  else: # look for new-style platform specific reference files:
689  # get all the files whose name start with the reference filename
690  dirname, basename = os.path.split(reference)
691  if not dirname:
692  dirname = "."
693  head = basename + "."
694  head_len = len(head)
695  platform = platformSplit(GetPlatform(self))
696  if "do0" in platform:
697  platform.add("dbg")
698  candidates = []
699  for f in os.listdir(dirname):
700  if f.startswith(head):
701  req_plat = platformSplit(f[head_len:])
702  if platform.issuperset(req_plat):
703  candidates.append((len(req_plat), f))
704  if candidates: # take the one with highest matching
705  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
706  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
707  candidates.sort()
708  reference = os.path.join(dirname, candidates[-1][1])
709  return reference
710 
711 
712 # ======= GAUDI TOOLS =======
713 
714 import difflib
715 import shutil
716 
717 try:
718  from GaudiKernel import ROOT6WorkAroundEnabled
719 except ImportError:
720 
722  # dummy implementation
723  return False
724 
725 
726 # --------------------------------- TOOLS ---------------------------------#
727 
728 
730  """
731  Function used to normalize the used path
732  """
733  newPath = os.path.normpath(os.path.expandvars(p))
734  if os.path.exists(newPath):
735  p = os.path.realpath(newPath)
736  return p
737 
738 
739 def which(executable):
740  """
741  Locates an executable in the executables path ($PATH) and returns the full
742  path to it. An application is looked for with or without the '.exe' suffix.
743  If the executable cannot be found, None is returned
744  """
745  if os.path.isabs(executable):
746  if not os.path.isfile(executable):
747  if executable.endswith(".exe"):
748  if os.path.isfile(executable[:-4]):
749  return executable[:-4]
750  else:
751  executable = os.path.split(executable)[1]
752  else:
753  return executable
754  for d in os.environ.get("PATH").split(os.pathsep):
755  fullpath = os.path.join(d, executable)
756  if os.path.isfile(fullpath):
757  return fullpath
758  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
759  return fullpath[:-4]
760  return None
761 
762 
763 # -------------------------------------------------------------------------#
764 # ----------------------------- Result Classe -----------------------------#
765 # -------------------------------------------------------------------------#
766 
767 
768 class Result:
769  PASS = "PASS"
770  FAIL = "FAIL"
771  ERROR = "ERROR"
772  UNTESTED = "UNTESTED"
773 
774  EXCEPTION = ""
775  RESOURCE = ""
776  TARGET = ""
777  TRACEBACK = ""
778  START_TIME = ""
779  END_TIME = ""
780  TIMEOUT_DETAIL = ""
781 
782  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
783  self.annotations = annotations.copy()
784 
785  def __getitem__(self, key):
786  assert isinstance(key, str)
787  return self.annotations[key]
788 
789  def __setitem__(self, key, value):
790  assert isinstance(key, str)
791  assert isinstance(value, str), "{!r} is not a string".format(value)
792  self.annotations[key] = value
793 
794  def Quote(self, text):
795  """
796  Convert text to html by escaping special chars and adding <pre> tags.
797  """
798  return "<pre>{}</pre>".format(escape_for_html(text))
799 
800 
801 # -------------------------------------------------------------------------#
802 # --------------------------- Validator Classes ---------------------------#
803 # -------------------------------------------------------------------------#
804 
805 # Basic implementation of an option validator for Gaudi test. This
806 # implementation is based on the standard (LCG) validation functions used
807 # in QMTest.
808 
809 
811  def __init__(self, ref, cause, result_key):
812  self.ref = ref
813  self.cause = cause
814  self.result_key = result_key
815 
816  def __call__(self, out, result):
817  """Validate the output of the program.
818  'stdout' -- A string containing the data written to the standard output
819  stream.
820  'stderr' -- A string containing the data written to the standard error
821  stream.
822  'result' -- A 'Result' object. It may be used to annotate
823  the outcome according to the content of stderr.
824  returns -- A list of strings giving causes of failure."""
825 
826  causes = []
827  # Check the output
828  if not self.__CompareText(out, self.ref):
829  causes.append(self.cause)
830  result[self.result_key] = result.Quote(self.ref)
831 
832  return causes
833 
834  def __CompareText(self, s1, s2):
835  """Compare 's1' and 's2', ignoring line endings.
836  's1' -- A string.
837  's2' -- A string.
838  returns -- True if 's1' and 's2' are the same, ignoring
839  differences in line endings."""
840  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
841  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
842  # can fix them
843  to_ignore = re.compile(
844  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
845  )
846 
847  def keep_line(l):
848  return not to_ignore.match(l)
849 
850  return list(filter(keep_line, s1.splitlines())) == list(
851  filter(keep_line, s2.splitlines())
852  )
853  else:
854  return s1.splitlines() == s2.splitlines()
855 
856 
857 # ------------------------ Preprocessor elements ------------------------#
859  """Base class for a callable that takes a file and returns a modified
860  version of it."""
861 
862  def __processLine__(self, line):
863  return line
864 
865  def __processFile__(self, lines):
866  output = []
867  for l in lines:
868  l = self.__processLine__(l)
869  if l:
870  output.append(l)
871  return output
872 
873  def __call__(self, input):
874  if not isinstance(input, str):
875  lines = input
876  mergeback = False
877  else:
878  lines = input.splitlines()
879  mergeback = True
880  output = self.__processFile__(lines)
881  if mergeback:
882  output = "\n".join(output)
883  return output
884 
885  def __add__(self, rhs):
886  return FilePreprocessorSequence([self, rhs])
887 
888 
890  def __init__(self, members=[]):
891  self.members = members
892 
893  def __add__(self, rhs):
894  return FilePreprocessorSequence(self.members + [rhs])
895 
896  def __call__(self, input):
897  output = input
898  for pp in self.members:
899  output = pp(output)
900  return output
901 
902 
904  def __init__(self, strings=[], regexps=[]):
905  import re
906 
907  self.strings = strings
908  self.regexps = list(map(re.compile, regexps))
909 
910  def __processLine__(self, line):
911  for s in self.strings:
912  if line.find(s) >= 0:
913  return None
914  for r in self.regexps:
915  if r.search(line):
916  return None
917  return line
918 
919 
921  def __init__(self, start, end):
922  self.start = start
923  self.end = end
924  self._skipping = False
925 
926  def __processLine__(self, line):
927  if self.start in line:
928  self._skipping = True
929  return None
930  elif self.end in line:
931  self._skipping = False
932  elif self._skipping:
933  return None
934  return line
935 
936 
938  def __init__(self, orig, repl="", when=None):
939  if when:
940  when = re.compile(when)
941  self._operations = [(when, re.compile(orig), repl)]
942 
943  def __add__(self, rhs):
944  if isinstance(rhs, RegexpReplacer):
945  res = RegexpReplacer("", "", None)
946  res._operations = self._operations + rhs._operations
947  else:
948  res = FilePreprocessor.__add__(self, rhs)
949  return res
950 
951  def __processLine__(self, line):
952  for w, o, r in self._operations:
953  if w is None or w.search(line):
954  line = o.sub(r, line)
955  return line
956 
957 
958 # Common preprocessors
959 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
960 normalizeDate = RegexpReplacer(
961  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
962  "00:00:00 1970-01-01",
963 )
964 normalizeEOL = FilePreprocessor()
965 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
966 
967 skipEmptyLines = FilePreprocessor()
968 # FIXME: that's ugly
969 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
970 
971 # Special preprocessor sorting the list of strings (whitespace separated)
972 # that follow a signature on a single line
973 
974 
976  def __init__(self, signature):
977  self.signature = signature
978  self.siglen = len(signature)
979 
980  def __processLine__(self, line):
981  pos = line.find(self.signature)
982  if pos >= 0:
983  line = line[: (pos + self.siglen)]
984  lst = line[(pos + self.siglen) :].split()
985  lst.sort()
986  line += " ".join(lst)
987  return line
988 
989 
991  """
992  Sort group of lines matching a regular expression
993  """
994 
995  def __init__(self, exp):
996  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
997 
998  def __processFile__(self, lines):
999  match = self.exp.match
1000  output = []
1001  group = []
1002  for l in lines:
1003  if match(l):
1004  group.append(l)
1005  else:
1006  if group:
1007  group.sort()
1008  output.extend(group)
1009  group = []
1010  output.append(l)
1011  return output
1012 
1013 
1014 # Preprocessors for GaudiTestSuite
1015 normalizeTestSuite = maskPointers + normalizeDate
1016 for w, o, r in [
1017  ("TIMER", r"\s+[+-]?[0-9]+[0-9.e+-]*", " 0"), # Normalize time output
1018  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1019  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1020  (
1021  "^JobOptionsSvc.*options successfully read in from",
1022  r"read in from .*[/\\]([^/\\]*)$",
1023  r"file \1",
1024  ), # normalize path to options
1025  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1026  (
1027  None,
1028  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1029  "00000000-0000-0000-0000-000000000000",
1030  ),
1031  # Absorb a change in ServiceLocatorHelper
1032  (
1033  "ServiceLocatorHelper::",
1034  "ServiceLocatorHelper::(create|locate)Service",
1035  "ServiceLocatorHelper::service",
1036  ),
1037  # Remove the leading 0 in Windows' exponential format
1038  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1039  # Output line changed in Gaudi v24
1040  (None, r"Service reference count check:", r"Looping over all active services..."),
1041  # Ignore count of declared properties (anyway they are all printed)
1042  (
1043  None,
1044  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1045  r"\1NN",
1046  ),
1047  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1048  (r"Property 'Name': Value", r"( = '[^']+':)'(.*)'", r"\1\2"),
1049  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1050  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1051 ]:
1052  normalizeTestSuite += RegexpReplacer(o, r, w)
1053 
1054 lineSkipper = LineSkipper(
1055  [
1056  "//GP:",
1057  "JobOptionsSvc INFO # ",
1058  "JobOptionsSvc WARNING # ",
1059  "Time User",
1060  "Welcome to",
1061  "This machine has a speed",
1062  "running on",
1063  "ToolSvc.Sequenc... INFO",
1064  "DataListenerSvc INFO XML written to file:",
1065  "[INFO]",
1066  "[WARNING]",
1067  "DEBUG No writable file catalog found which contains FID:",
1068  "DEBUG Service base class initialized successfully",
1069  # changed between v20 and v21
1070  "DEBUG Incident timing:",
1071  # introduced with patch #3487
1072  # changed the level of the message from INFO to
1073  # DEBUG
1074  "INFO 'CnvServices':[",
1075  # message removed because could be printed in constructor
1076  "DEBUG 'CnvServices':[",
1077  # The signal handler complains about SIGXCPU not
1078  # defined on some platforms
1079  "SIGXCPU",
1080  # Message removed with redesing of JobOptionsSvc
1081  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1082  # Ignore warnings for properties case mismatch
1083  "mismatching case for property name:",
1084  # Message demoted to DEBUG in gaudi/Gaudi!992
1085  "Histograms saving not required.",
1086  # Message added in gaudi/Gaudi!577
1087  "Properties are dumped into",
1088  # Messages changed in gaudi/Gaudi!1426
1089  "WARNING no ROOT output file name",
1090  "INFO Writing ROOT histograms to:",
1091  "INFO Completed update of ROOT histograms in:",
1092  # absorb changes in data dependencies reports (https://gitlab.cern.ch/gaudi/Gaudi/-/merge_requests/1348)
1093  "Data Deps for ",
1094  "data dependencies:",
1095  ],
1096  regexps=[
1097  r"^JobOptionsSvc INFO *$",
1098  r"^# ", # Ignore python comments
1099  # skip the message reporting the version of the root file
1100  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1101  r"File '.*.xml' does not exist",
1102  r"INFO Refer to dataset .* by its file ID:",
1103  r"INFO Referring to dataset .* by its file ID:",
1104  r"INFO Disconnect from dataset",
1105  r"INFO Disconnected from dataset",
1106  r"INFO Disconnected data IO:",
1107  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1108  # Ignore StatusCodeSvc related messages
1109  r".*StatusCodeSvc.*",
1110  r".*StatusCodeCheck.*",
1111  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1112  r"^[-+]*\s*$",
1113  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1114  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1115  # Hide unchecked StatusCodes from dictionaries
1116  r"^ +[0-9]+ \|.*ROOT",
1117  r"^ +[0-9]+ \|.*\|.*Dict",
1118  # Hide EventLoopMgr total timing report
1119  r"EventLoopMgr.*---> Loop Finished",
1120  r"HiveSlimEventLo.*---> Loop Finished",
1121  # Remove ROOT TTree summary table, which changes from one version to the
1122  # other
1123  r"^\*.*\*$",
1124  # Remove Histos Summaries
1125  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1126  r"^ \|",
1127  r"^ ID=",
1128  # Ignore added/removed properties
1129  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1130  r"Property(.*)'Audit(Begin|End)Run':",
1131  # these were missing in tools
1132  r"Property(.*)'AuditRe(start|initialize)':",
1133  r"Property(.*)'Blocking':",
1134  # removed with gaudi/Gaudi!273
1135  r"Property(.*)'ErrorCount(er)?':",
1136  # added with gaudi/Gaudi!306
1137  r"Property(.*)'Sequential':",
1138  # added with gaudi/Gaudi!314
1139  r"Property(.*)'FilterCircularDependencies':",
1140  # removed with gaudi/Gaudi!316
1141  r"Property(.*)'IsClonable':",
1142  # ignore uninteresting/obsolete messages
1143  r"Property update for OutputLevel : new value =",
1144  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1145  ],
1146 )
1147 
1148 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1149  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1150  # fix them
1151  lineSkipper += LineSkipper(
1152  regexps=[
1153  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1154  ]
1155  )
1156 
1157 normalizeTestSuite = (
1158  lineSkipper
1159  + normalizeTestSuite
1160  + skipEmptyLines
1161  + normalizeEOL
1162  + LineSorter("Services to release : ")
1163  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1164 )
1165 # for backward compatibility
1166 normalizeExamples = normalizeTestSuite
1167 
1168 # --------------------- Validation functions/classes ---------------------#
1169 
1170 
1172  def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite):
1173  self.reffile = os.path.expandvars(reffile)
1174  self.cause = cause
1175  self.result_key = result_key
1176  self.preproc = preproc
1177 
1178  def __call__(self, stdout, result):
1179  causes = []
1180  if os.path.isfile(self.reffile):
1181  orig = open(self.reffile).readlines()
1182  if self.preproc:
1183  orig = self.preproc(orig)
1184  result[self.result_key + ".preproc.orig"] = result.Quote(
1185  "\n".join(map(str.strip, orig))
1186  )
1187  else:
1188  orig = []
1189  new = stdout.splitlines()
1190  if self.preproc:
1191  new = self.preproc(new)
1192 
1193  # Note: we have to make sure that we do not have `\n` in the comparison
1194  filterdiffs = list(
1195  difflib.unified_diff(
1196  [l.rstrip() for l in orig],
1197  [l.rstrip() for l in new],
1198  n=1,
1199  fromfile="Reference file",
1200  tofile="Actual output",
1201  lineterm="",
1202  )
1203  )
1204  if filterdiffs:
1205  result[self.result_key] = result.Quote("\n".join(filterdiffs))
1206  result[self.result_key + ".preproc.new"] = result.Quote(
1207  "\n".join(map(str.strip, new))
1208  )
1209  causes.append(self.cause)
1210  return causes
1211 
1212 
1214  """
1215  Scan stdout to find ROOT TTree summaries and digest them.
1216  """
1217  stars = re.compile(r"^\*+$")
1218  outlines = stdout.splitlines()
1219  nlines = len(outlines)
1220  trees = {}
1221 
1222  i = 0
1223  while i < nlines: # loop over the output
1224  # look for
1225  while i < nlines and not stars.match(outlines[i]):
1226  i += 1
1227  if i < nlines:
1228  tree, i = _parseTTreeSummary(outlines, i)
1229  if tree:
1230  trees[tree["Name"]] = tree
1231 
1232  return trees
1233 
1234 
1235 def cmpTreesDicts(reference, to_check, ignore=None):
1236  """
1237  Check that all the keys in reference are in to_check too, with the same value.
1238  If the value is a dict, the function is called recursively. to_check can
1239  contain more keys than reference, that will not be tested.
1240  The function returns at the first difference found.
1241  """
1242  fail_keys = []
1243  # filter the keys in the reference dictionary
1244  if ignore:
1245  ignore_re = re.compile(ignore)
1246  keys = [key for key in reference if not ignore_re.match(key)]
1247  else:
1248  keys = reference.keys()
1249  # loop over the keys (not ignored) in the reference dictionary
1250  for k in keys:
1251  if k in to_check: # the key must be in the dictionary to_check
1252  if (type(reference[k]) is dict) and (type(to_check[k]) is dict):
1253  # if both reference and to_check values are dictionaries,
1254  # recurse
1255  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1256  else:
1257  # compare the two values
1258  failed = to_check[k] != reference[k]
1259  else: # handle missing keys in the dictionary to check (i.e. failure)
1260  to_check[k] = None
1261  failed = True
1262  if failed:
1263  fail_keys.insert(0, k)
1264  break # exit from the loop at the first failure
1265  return fail_keys # return the list of keys bringing to the different values
1266 
1267 
1268 def getCmpFailingValues(reference, to_check, fail_path):
1269  c = to_check
1270  r = reference
1271  for k in fail_path:
1272  c = c.get(k, None)
1273  r = r.get(k, None)
1274  if c is None or r is None:
1275  break # one of the dictionaries is not deep enough
1276  return (fail_path, r, c)
1277 
1278 
1279 # signature of the print-out of the histograms
1280 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1281 
1282 
1283 def _parseTTreeSummary(lines, pos):
1284  """
1285  Parse the TTree summary table in lines, starting from pos.
1286  Returns a tuple with the dictionary with the digested informations and the
1287  position of the first line after the summary.
1288  """
1289  result = {}
1290  i = pos + 1 # first line is a sequence of '*'
1291  count = len(lines)
1292 
1293  def splitcols(l):
1294  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1295 
1296  def parseblock(ll):
1297  r = {}
1298  delta_i = 0
1299  cols = splitcols(ll[0])
1300 
1301  if len(ll) == 3:
1302  # default one line name/title
1303  r["Name"], r["Title"] = cols[1:]
1304  elif len(ll) == 4:
1305  # in case title is moved to next line due to too long name
1306  delta_i = 1
1307  r["Name"] = cols[1]
1308  r["Title"] = ll[1].strip("*\n").split("|")[1].strip()
1309  else:
1310  assert False
1311 
1312  cols = splitcols(ll[1 + delta_i])
1313  r["Entries"] = int(cols[1])
1314 
1315  sizes = cols[2].split()
1316  r["Total size"] = int(sizes[2])
1317  if sizes[-1] == "memory":
1318  r["File size"] = 0
1319  else:
1320  r["File size"] = int(sizes[-1])
1321 
1322  cols = splitcols(ll[2 + delta_i])
1323  sizes = cols[2].split()
1324  if cols[0] == "Baskets":
1325  r["Baskets"] = int(cols[1])
1326  r["Basket size"] = int(sizes[2])
1327  r["Compression"] = float(sizes[-1])
1328 
1329  return r
1330 
1331  def nextblock(lines, i):
1332  delta_i = 1
1333  dots = re.compile(r"^\.+$")
1334  stars = re.compile(r"^\*+$")
1335  count = len(lines)
1336  while (
1337  i + delta_i < count
1338  and not dots.match(lines[i + delta_i][1:-1])
1339  and not stars.match(lines[i + delta_i])
1340  ):
1341  delta_i += 1
1342  return i + delta_i
1343 
1344  if i < (count - 3) and lines[i].startswith("*Tree"):
1345  i_nextblock = nextblock(lines, i)
1346  result = parseblock(lines[i:i_nextblock])
1347  result["Branches"] = {}
1348  i = i_nextblock + 1
1349  while i < (count - 3) and lines[i].startswith("*Br"):
1350  if i < (count - 2) and lines[i].startswith("*Branch "):
1351  # skip branch header
1352  i += 3
1353  continue
1354  i_nextblock = nextblock(lines, i)
1355  if i_nextblock >= count:
1356  break
1357  branch = parseblock(lines[i:i_nextblock])
1358  result["Branches"][branch["Name"]] = branch
1359  i = i_nextblock + 1
1360 
1361  return (result, i)
1362 
1363 
1364 def parseHistosSummary(lines, pos):
1365  """
1366  Extract the histograms infos from the lines starting at pos.
1367  Returns the position of the first line after the summary block.
1368  """
1369  global h_count_re
1370  h_table_head = re.compile(
1371  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1372  )
1373  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1374 
1375  nlines = len(lines)
1376 
1377  # decode header
1378  m = h_count_re.search(lines[pos])
1379  name = m.group(1).strip()
1380  total = int(m.group(2))
1381  header = {}
1382  for k, v in [x.split("=") for x in m.group(3).split()]:
1383  header[k] = int(v)
1384  pos += 1
1385  header["Total"] = total
1386 
1387  summ = {}
1388  while pos < nlines:
1389  m = h_table_head.search(lines[pos])
1390  if m:
1391  t, d = m.groups(1) # type and directory
1392  t = t.replace(" profile", "Prof")
1393  pos += 1
1394  if pos < nlines:
1395  l = lines[pos]
1396  else:
1397  l = ""
1398  cont = {}
1399  if l.startswith(" | ID"):
1400  # table format
1401  titles = [x.strip() for x in l.split("|")][1:]
1402  pos += 1
1403  while pos < nlines and lines[pos].startswith(" |"):
1404  l = lines[pos]
1405  values = [x.strip() for x in l.split("|")][1:]
1406  hcont = {}
1407  for i in range(len(titles)):
1408  hcont[titles[i]] = values[i]
1409  cont[hcont["ID"]] = hcont
1410  pos += 1
1411  elif l.startswith(" ID="):
1412  while pos < nlines and lines[pos].startswith(" ID="):
1413  values = [
1414  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1415  ]
1416  cont[values[0]] = values
1417  pos += 1
1418  else: # not interpreted
1419  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1420  if d not in summ:
1421  summ[d] = {}
1422  summ[d][t] = cont
1423  summ[d]["header"] = header
1424  else:
1425  break
1426  if not summ:
1427  # If the full table is not present, we use only the header
1428  summ[name] = {"header": header}
1429  return summ, pos
1430 
1431 
1433  """
1434  Scan stdout to find ROOT TTree summaries and digest them.
1435  """
1436  outlines = stdout.splitlines()
1437  nlines = len(outlines) - 1
1438  summaries = {}
1439  global h_count_re
1440 
1441  pos = 0
1442  while pos < nlines:
1443  summ = {}
1444  # find first line of block:
1445  match = h_count_re.search(outlines[pos])
1446  while pos < nlines and not match:
1447  pos += 1
1448  match = h_count_re.search(outlines[pos])
1449  if match:
1450  summ, pos = parseHistosSummary(outlines, pos)
1451  summaries.update(summ)
1452  return summaries
1453 
1454 
1455 def GetPlatform(self):
1456  """
1457  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1458  """
1459  arch = "None"
1460  # check architecture name
1461  if "BINARY_TAG" in os.environ:
1462  arch = os.environ["BINARY_TAG"]
1463  elif "CMTCONFIG" in os.environ:
1464  arch = os.environ["CMTCONFIG"]
1465  elif "SCRAM_ARCH" in os.environ:
1466  arch = os.environ["SCRAM_ARCH"]
1467  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1468  "Debug",
1469  "FastDebug",
1470  "Developer",
1471  ):
1472  arch = "dummy-dbg"
1473  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1474  "Release",
1475  "MinSizeRel",
1476  "RelWithDebInfo",
1477  "",
1478  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1479  arch = "dummy-opt"
1480  return arch
1481 
1482 
1483 def isWinPlatform(self):
1484  """
1485  Return True if the current platform is Windows.
1486 
1487  This function was needed because of the change in the CMTCONFIG format,
1488  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1489  """
1490  platform = GetPlatform(self)
1491  return "winxp" in platform or platform.startswith("win")
1492 
1493 
1495  def __call__(self, ref, out, result, detailed=True):
1496  """Validate JSON output.
1497  returns -- A list of strings giving causes of failure."""
1498 
1499  causes = []
1500  try:
1501  with open(ref) as f:
1502  expected = json.load(f)
1503  except json.JSONDecodeError as err:
1504  causes.append("json parser error")
1505  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1506  return causes
1507 
1508  if not detailed:
1509  if expected != out:
1510  causes.append("json content")
1511  result["json_diff"] = "detailed diff was turned off"
1512  return causes
1513 
1514  # piggyback on TestCase dict diff report
1515  t = TestCase()
1516  # sort both lists (these are list of entities) as the order is not supposed to matter
1517  # indeed, the JSONSink implementation does not garantee any particular order
1518  # but as JSON does not have sets, we get back a sorted list here
1519  expected = sorted(expected, key=lambda item: (item["component"], item["name"]))
1520  out = sorted(out, key=lambda item: (item["component"], item["name"]))
1521  try:
1522  t.assertEqual(expected, out)
1523  except AssertionError as err:
1524  causes.append("json content")
1525  result["json_diff"] = str(err).splitlines()[0]
1526 
1527  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1173
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:118
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:995
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:106
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:858
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:785
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:812
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:63
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:978
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:873
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:975
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1283
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:980
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:121
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:490
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:46
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:99
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:104
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:811
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:108
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1176
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:532
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1268
GaudiPartProp.decorators.get
get
decorate the vector of properties
Definition: decorators.py:282
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:814
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:123
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:31
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:124
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:865
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:110
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:977
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:109
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:922
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:73
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:794
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:893
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:615
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:889
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:101
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:941
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:122
compareOutputFiles.target
target
Definition: compareOutputFiles.py:488
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:998
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:920
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:103
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:119
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:862
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:896
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:113
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:924
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1174
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1364
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:126
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:937
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1483
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:908
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:125
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:739
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:996
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:111
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:782
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:923
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:120
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:907
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:816
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1235
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:783
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:117
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1494
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:951
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:891
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:669
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:112
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:990
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:343
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite)
Definition: BaseTest.py:1172
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:729
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:903
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1171
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:32
gaudirun.type
type
Definition: gaudirun.py:160
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:102
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:890
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1178
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:813
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:885
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1175
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:921
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:938
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1432
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:789
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:444
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:98
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:400
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:105
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:336
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1495
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:810
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:904
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:768
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:128
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1213
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:834
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:943
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:506
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:107
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:976
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:910
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:721
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:114
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1455
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:116
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: details.h:98
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:926