The Gaudi Framework  v38r3 (c3fc9673)
BaseTest.py
Go to the documentation of this file.
1 
11 
12 import json
13 import logging
14 import os
15 import platform
16 import re
17 import signal
18 import sys
19 import threading
20 import time
21 from datetime import datetime, timedelta
22 from html import escape as escape_for_html
23 from subprocess import PIPE, STDOUT, Popen
24 from tempfile import NamedTemporaryFile, mkdtemp
25 from unittest import TestCase
26 
27 if sys.version_info < (3, 5):
28  # backport of 'backslashreplace' handling of UnicodeDecodeError
29  # to Python < 3.5
30  from codecs import backslashreplace_errors, register_error
31 
33  if isinstance(exc, UnicodeDecodeError):
34  code = hex(ord(exc.object[exc.start]))
35  return ("\\" + code[1:], exc.start + 1)
36  else:
37  return backslashreplace_errors(exc)
38 
39  register_error("backslashreplace", _new_backslashreplace_errors)
40  del register_error
41  del backslashreplace_errors
42  del _new_backslashreplace_errors
43 
44 SKIP_RETURN_CODE = 77
45 
46 # default of 100MB
47 OUTPUT_LIMIT = int(os.environ.get("GAUDI_TEST_STDOUT_LIMIT", 100 * 1024**2))
48 
49 
50 def sanitize_for_xml(data):
51  """
52  Take a string with invalid ASCII/UTF characters and quote them so that the
53  string can be used in an XML text.
54 
55  >>> sanitize_for_xml('this is \x1b')
56  'this is [NON-XML-CHAR-0x1B]'
57  """
58  bad_chars = re.compile("[\x00-\x08\x0b\x0c\x0e-\x1f\ud800-\udfff\ufffe\uffff]")
59 
60  def quote(match):
61  "helper function"
62  return "".join("[NON-XML-CHAR-0x%2X]" % ord(c) for c in match.group())
63 
64  return bad_chars.sub(quote, data)
65 
66 
67 def dumpProcs(name):
68  """helper to debug GAUDI-1084, dump the list of processes"""
69  from getpass import getuser
70 
71  if "WORKSPACE" in os.environ:
72  p = Popen(["ps", "-fH", "-U", getuser()], stdout=PIPE)
73  with open(os.path.join(os.environ["WORKSPACE"], name), "wb") as f:
74  f.write(p.communicate()[0])
75 
76 
77 def kill_tree(ppid, sig):
78  """
79  Send a signal to a process and all its child processes (starting from the
80  leaves).
81  """
82  log = logging.getLogger("kill_tree")
83  ps_cmd = ["ps", "--no-headers", "-o", "pid", "--ppid", str(ppid)]
84  # Note: start in a clean env to avoid a freeze with libasan.so
85  # See https://sourceware.org/bugzilla/show_bug.cgi?id=27653
86  get_children = Popen(ps_cmd, stdout=PIPE, stderr=PIPE, env={})
87  children = map(int, get_children.communicate()[0].split())
88  for child in children:
89  kill_tree(child, sig)
90  try:
91  log.debug("killing process %d", ppid)
92  os.kill(ppid, sig)
93  except OSError as err:
94  if err.errno != 3: # No such process
95  raise
96  log.debug("no such process %d", ppid)
97 
98 
99 # -------------------------------------------------------------------------#
100 
101 
102 class BaseTest(object):
103  _common_tmpdir = None
104 
105  def __init__(self):
106  self.program = ""
107  self.args = []
108  self.reference = ""
109  self.error_reference = ""
110  self.options = ""
111  self.stderr = ""
112  self.timeout = 600
113  self.exit_code = None
114  self.environment = dict(os.environ)
116  self.signal = None
117  self.workdir = os.curdir
118  self.use_temp_dir = False
119  # Variables not for users
120  self.status = None
121  self.name = ""
122  self.causes = []
123  self.result = Result(self)
124  self.returnedCode = 0
125  self.out = ""
126  self.err = ""
127  self.proc = None
128  self.stack_trace = None
129  self.basedir = os.getcwd()
130  self.validate_time = None
131 
132  def run(self):
133  logging.debug("running test %s", self.name)
134 
135  self.result = Result(
136  {
137  "CAUSE": None,
138  "EXCEPTION": None,
139  "RESOURCE": None,
140  "TARGET": None,
141  "TRACEBACK": None,
142  "START_TIME": None,
143  "END_TIME": None,
144  "TIMEOUT_DETAIL": None,
145  }
146  )
147 
148  if self.options:
149  if re.search(
150  r"from\s+Gaudi.Configuration\s+import\s+\*|"
151  r"from\s+Configurables\s+import",
152  self.options,
153  ):
154  suffix, lang = ".py", "python"
155  else:
156  suffix, lang = ".opts", "c++"
157  self.result["Options"] = '<code lang="{}"><pre>{}</pre></code>'.format(
158  lang, escape_for_html(self.options)
159  )
160  optionFile = NamedTemporaryFile(suffix=suffix)
161  optionFile.file.write(self.options.encode("utf-8"))
162  optionFile.seek(0)
163  self.args.append(RationalizePath(optionFile.name))
164 
165  platform_id = (
166  self.environment.get("BINARY_TAG")
167  or self.environment.get("CMTCONFIG")
168  or platform.platform()
169  )
170  # If at least one regex matches we skip the test.
171  skip_test = bool(
172  [
173  None
174  for prex in self.unsupported_platforms
175  if re.search(prex, platform_id)
176  ]
177  )
178 
179  if not skip_test:
180  # handle working/temporary directory options
181  workdir = self.workdir
182  if self.use_temp_dir:
183  if self._common_tmpdir:
184  workdir = self._common_tmpdir
185  else:
186  workdir = mkdtemp()
187 
188  # prepare the command to execute
189  prog = ""
190  if self.program != "":
191  prog = self.program
192  elif "GAUDIEXE" in self.environment:
193  prog = self.environment["GAUDIEXE"]
194  else:
195  prog = "Gaudi.exe"
196 
197  prog_ext = os.path.splitext(prog)[1]
198  if prog_ext not in [".exe", ".py", ".bat"]:
199  prog += ".exe"
200  prog_ext = ".exe"
201 
202  prog = which(prog) or prog
203 
204  args = list(map(RationalizePath, self.args))
205 
206  if prog_ext == ".py":
207  params = ["python3", RationalizePath(prog)] + args
208  else:
209  params = [RationalizePath(prog)] + args
210 
211  # we need to switch directory because the validator expects to run
212  # in the same dir as the program
213  os.chdir(workdir)
214 
215  tmp_streams = {
216  "stdout": NamedTemporaryFile(),
217  "stderr": NamedTemporaryFile(),
218  }
219 
220  # launching test in a different thread to handle timeout exception
221  def target():
222  logging.debug("executing %r in %s", params, workdir)
223  self.proc = Popen(
224  params,
225  stdout=tmp_streams["stdout"],
226  stderr=tmp_streams["stderr"],
227  env=self.environment,
228  )
229  logging.debug("(pid: %d)", self.proc.pid)
230  self.proc.communicate()
231  tmp_streams["stdout"].seek(0)
232  self.out = (
233  tmp_streams["stdout"]
234  .read()
235  .decode("utf-8", errors="backslashreplace")
236  )
237  tmp_streams["stderr"].seek(0)
238  self.err = (
239  tmp_streams["stderr"]
240  .read()
241  .decode("utf-8", errors="backslashreplace")
242  )
243 
244  thread = threading.Thread(target=target)
245  thread.start()
246  # checking for timeout and stdout/err cutoff
247  when_to_stop = datetime.now() + timedelta(seconds=self.timeout)
248  too_big_stream = None
249  while (
250  datetime.now() < when_to_stop
251  and thread.is_alive()
252  and not too_big_stream
253  ):
254  # we check stdout and stderr size a few times per second
255  thread.join(0.1)
256  # if we are done, there is no need to check output size
257  if thread.is_alive():
258  for stream in tmp_streams:
259  if os.path.getsize(tmp_streams[stream].name) > OUTPUT_LIMIT:
260  too_big_stream = stream
261 
262  if thread.is_alive():
263  if not too_big_stream:
264  logging.debug(
265  "time out in test %s (pid %d)", self.name, self.proc.pid
266  )
267  # get the stack trace of the stuck process
268  cmd = [
269  "gdb",
270  "--pid",
271  str(self.proc.pid),
272  "--batch",
273  "--eval-command=thread apply all backtrace",
274  ]
275  gdb = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
276  self.stack_trace = gdb.communicate()[0].decode(
277  "utf-8", errors="backslashreplace"
278  )
279  self.causes.append("timeout")
280  else:
281  logging.debug(
282  "too big %s detected (pid %d)", too_big_stream, self.proc.pid
283  )
284  self.result[f"{too_big_stream} limit"] = str(OUTPUT_LIMIT)
285  self.result[f"{too_big_stream} size"] = str(
286  os.path.getsize(tmp_streams[too_big_stream].name)
287  )
288  self.causes.append(f"too big {too_big_stream}")
289 
290  kill_tree(self.proc.pid, signal.SIGTERM)
291  thread.join(60)
292  if thread.is_alive():
293  kill_tree(self.proc.pid, signal.SIGKILL)
294 
295  else:
296  self.returnedCode = self.proc.returncode
297  if self.returnedCode != SKIP_RETURN_CODE:
298  logging.debug(
299  f"completed test {self.name} with returncode = {self.returnedCode}"
300  )
301  logging.debug("validating test...")
302  val_start_time = time.perf_counter()
303  self.result, self.causes = self.ValidateOutput(
304  stdout=self.out, stderr=self.err, result=self.result
305  )
306  self.validate_time = round(time.perf_counter() - val_start_time, 2)
307  else:
308  logging.debug(f"skipped test {self.name}")
309  self.status = "skipped"
310 
311  # remove the temporary directory if we created it
312  if self.use_temp_dir and not self._common_tmpdir:
313  shutil.rmtree(workdir, True)
314 
315  os.chdir(self.basedir)
316 
317  if self.status != "skipped":
318  # handle application exit code
319  if self.signal is not None:
320  if int(self.returnedCode) != -int(self.signal):
321  self.causes.append("exit code")
322 
323  elif self.exit_code is not None:
324  if int(self.returnedCode) != int(self.exit_code):
325  self.causes.append("exit code")
326 
327  elif self.returnedCode != 0:
328  self.causes.append("exit code")
329 
330  if self.causes:
331  self.status = "failed"
332  else:
333  self.status = "passed"
334 
335  else:
336  self.status = "skipped"
337 
338  logging.debug("%s: %s", self.name, self.status)
339  field_mapping = {
340  "Exit Code": "returnedCode",
341  "stderr": "err",
342  "Arguments": "args",
343  "Runtime Environment": "environment",
344  "Status": "status",
345  "stdout": "out",
346  "Program Name": "program",
347  "Name": "name",
348  "Validator": "validator",
349  "Validation execution time": "validate_time",
350  "Output Reference File": "reference",
351  "Error Reference File": "error_reference",
352  "Causes": "causes",
353  # 'Validator Result': 'result.annotations',
354  "Unsupported Platforms": "unsupported_platforms",
355  "Stack Trace": "stack_trace",
356  }
357  resultDict = [
358  (key, getattr(self, attr))
359  for key, attr in field_mapping.items()
360  if getattr(self, attr)
361  ]
362  resultDict.append(
363  (
364  "Working Directory",
365  RationalizePath(os.path.join(os.getcwd(), self.workdir)),
366  )
367  )
368  # print(dict(resultDict).keys())
369  resultDict.extend(self.result.annotations.items())
370  # print(self.result.annotations.keys())
371  resultDict = dict(resultDict)
372 
373  # Special cases
374  if "Validator" in resultDict:
375  resultDict["Validator"] = '<code lang="{}"><pre>{}</pre></code>'.format(
376  "python", escape_for_html(resultDict["Validator"])
377  )
378  return resultDict
379 
380  # -------------------------------------------------#
381  # ----------------Validating tool------------------#
382  # -------------------------------------------------#
383 
384  def ValidateOutput(self, stdout, stderr, result):
385  if not self.stderr:
386  self.validateWithReference(stdout, stderr, result, self.causes)
387  elif stderr.strip() != self.stderr.strip():
388  self.causes.append("standard error")
389  return result, self.causes
390 
392  self,
393  reference=None,
394  stdout=None,
395  result=None,
396  causes=None,
397  signature_offset=0,
398  signature=None,
399  id=None,
400  ):
401  """
402  Given a block of text, tries to find it in the output. The block had to be identified by a signature line. By default, the first line is used as signature, or the line pointed to by signature_offset. If signature_offset points outside the block, a signature line can be passed as signature argument. Note: if 'signature' is None (the default), a negative signature_offset is interpreted as index in a list (e.g. -1 means the last line), otherwise the it is interpreted as the number of lines before the first one of the block the signature must appear. The parameter 'id' allow to distinguish between different calls to this function in the same validation code.
403  """
404 
405  if reference is None:
406  reference = self.reference
407  if stdout is None:
408  stdout = self.out
409  if result is None:
410  result = self.result
411  if causes is None:
412  causes = self.causes
413 
414  reflines = list(filter(None, map(lambda s: s.rstrip(), reference.splitlines())))
415  if not reflines:
416  raise RuntimeError("Empty (or null) reference")
417  # the same on standard output
418  outlines = list(filter(None, map(lambda s: s.rstrip(), stdout.splitlines())))
419 
420  res_field = "GaudiTest.RefBlock"
421  if id:
422  res_field += "_%s" % id
423 
424  if signature is None:
425  if signature_offset < 0:
426  signature_offset = len(reference) + signature_offset
427  signature = reflines[signature_offset]
428  # find the reference block in the output file
429  try:
430  pos = outlines.index(signature)
431  outlines = outlines[
432  pos - signature_offset : pos + len(reflines) - signature_offset
433  ]
434  if reflines != outlines:
435  msg = "standard output"
436  # I do not want 2 messages in causes if the function is called
437  # twice
438  if msg not in causes:
439  causes.append(msg)
440  result[res_field + ".observed"] = result.Quote("\n".join(outlines))
441  except ValueError:
442  causes.append("missing signature")
443  result[res_field + ".signature"] = result.Quote(signature)
444  if len(reflines) > 1 or signature != reflines[0]:
445  result[res_field + ".expected"] = result.Quote("\n".join(reflines))
446  return causes
447 
449  self, expected={"ERROR": 0, "FATAL": 0}, stdout=None, result=None, causes=None
450  ):
451  """
452  Count the number of messages with required severity (by default ERROR and FATAL)
453  and check if their numbers match the expected ones (0 by default).
454  The dictionary "expected" can be used to tune the number of errors and fatals
455  allowed, or to limit the number of expected warnings etc.
456  """
457 
458  if stdout is None:
459  stdout = self.out
460  if result is None:
461  result = self.result
462  if causes is None:
463  causes = self.causes
464 
465  # prepare the dictionary to record the extracted lines
466  errors = {}
467  for sev in expected:
468  errors[sev] = []
469 
470  outlines = stdout.splitlines()
471  from math import log10
472 
473  fmt = "%%%dd - %%s" % (int(log10(len(outlines) + 1)))
474 
475  linecount = 0
476  for l in outlines:
477  linecount += 1
478  words = l.split()
479  if len(words) >= 2 and words[1] in errors:
480  errors[words[1]].append(fmt % (linecount, l.rstrip()))
481 
482  for e in errors:
483  if len(errors[e]) != expected[e]:
484  causes.append("%s(%d)" % (e, len(errors[e])))
485  result["GaudiTest.lines.%s" % e] = result.Quote("\n".join(errors[e]))
486  result["GaudiTest.lines.%s.expected#" % e] = result.Quote(
487  str(expected[e])
488  )
489 
490  return causes
491 
493  self,
494  stdout=None,
495  result=None,
496  causes=None,
497  trees_dict=None,
498  ignore=r"Basket|.*size|Compression",
499  ):
500  """
501  Compare the TTree summaries in stdout with the ones in trees_dict or in
502  the reference file. By default ignore the size, compression and basket
503  fields.
504  The presence of TTree summaries when none is expected is not a failure.
505  """
506  if stdout is None:
507  stdout = self.out
508  if result is None:
509  result = self.result
510  if causes is None:
511  causes = self.causes
512  if trees_dict is None:
513  lreference = self._expandReferenceFileName(self.reference)
514  # call the validator if the file exists
515  if lreference and os.path.isfile(lreference):
516  trees_dict = findTTreeSummaries(open(lreference).read())
517  else:
518  trees_dict = {}
519 
520  from pprint import PrettyPrinter
521 
522  pp = PrettyPrinter()
523  if trees_dict:
524  result["GaudiTest.TTrees.expected"] = result.Quote(pp.pformat(trees_dict))
525  if ignore:
526  result["GaudiTest.TTrees.ignore"] = result.Quote(ignore)
527 
528  trees = findTTreeSummaries(stdout)
529  failed = cmpTreesDicts(trees_dict, trees, ignore)
530  if failed:
531  causes.append("trees summaries")
532  msg = "%s: %s != %s" % getCmpFailingValues(trees_dict, trees, failed)
533  result["GaudiTest.TTrees.failure_on"] = result.Quote(msg)
534  result["GaudiTest.TTrees.found"] = result.Quote(pp.pformat(trees))
535 
536  return causes
537 
539  self, stdout=None, result=None, causes=None, dict=None, ignore=None
540  ):
541  """
542  Compare the TTree summaries in stdout with the ones in trees_dict or in
543  the reference file. By default ignore the size, compression and basket
544  fields.
545  The presence of TTree summaries when none is expected is not a failure.
546  """
547  if stdout is None:
548  stdout = self.out
549  if result is None:
550  result = self.result
551  if causes is None:
552  causes = self.causes
553 
554  if dict is None:
555  lreference = self._expandReferenceFileName(self.reference)
556  # call the validator if the file exists
557  if lreference and os.path.isfile(lreference):
558  dict = findHistosSummaries(open(lreference).read())
559  else:
560  dict = {}
561 
562  from pprint import PrettyPrinter
563 
564  pp = PrettyPrinter()
565  if dict:
566  result["GaudiTest.Histos.expected"] = result.Quote(pp.pformat(dict))
567  if ignore:
568  result["GaudiTest.Histos.ignore"] = result.Quote(ignore)
569 
570  histos = findHistosSummaries(stdout)
571  failed = cmpTreesDicts(dict, histos, ignore)
572  if failed:
573  causes.append("histos summaries")
574  msg = "%s: %s != %s" % getCmpFailingValues(dict, histos, failed)
575  result["GaudiTest.Histos.failure_on"] = result.Quote(msg)
576  result["GaudiTest.Histos.found"] = result.Quote(pp.pformat(histos))
577 
578  return causes
579 
581  self, stdout=None, stderr=None, result=None, causes=None, preproc=None
582  ):
583  """
584  Default validation acti*on: compare standard output and error to the
585  reference files.
586  """
587 
588  if stdout is None:
589  stdout = self.out
590  if stderr is None:
591  stderr = self.err
592  if result is None:
593  result = self.result
594  if causes is None:
595  causes = self.causes
596 
597  # set the default output preprocessor
598  if preproc is None:
599  preproc = normalizeTestSuite
600  # check standard output
601  lreference = self._expandReferenceFileName(self.reference)
602  # call the validator if the file exists
603  if lreference and os.path.isfile(lreference):
604  causes += ReferenceFileValidator(
605  lreference, "standard output", "Output Diff", preproc=preproc
606  )(stdout, result)
607  elif lreference:
608  causes += ["missing reference file"]
609  # Compare TTree summaries
610  causes = self.CheckTTreesSummaries(stdout, result, causes)
611  causes = self.CheckHistosSummaries(stdout, result, causes)
612  if causes and lreference: # Write a new reference file for stdout
613  try:
614  cnt = 0
615  newrefname = ".".join([lreference, "new"])
616  while os.path.exists(newrefname):
617  cnt += 1
618  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
619  newref = open(newrefname, "w")
620  # sanitize newlines
621  for l in stdout.splitlines():
622  newref.write(l.rstrip() + "\n")
623  del newref # flush and close
624  result["New Output Reference File"] = os.path.relpath(
625  newrefname, self.basedir
626  )
627  except IOError:
628  # Ignore IO errors when trying to update reference files
629  # because we may be in a read-only filesystem
630  pass
631 
632  # check standard error
633  lreference = self._expandReferenceFileName(self.error_reference)
634  # call the validator if we have a file to use
635  if lreference:
636  if os.path.isfile(lreference):
637  newcauses = ReferenceFileValidator(
638  lreference, "standard error", "Error Diff", preproc=preproc
639  )(stderr, result)
640  else:
641  newcauses = ["missing error reference file"]
642  causes += newcauses
643  if newcauses and lreference: # Write a new reference file for stdedd
644  cnt = 0
645  newrefname = ".".join([lreference, "new"])
646  while os.path.exists(newrefname):
647  cnt += 1
648  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
649  newref = open(newrefname, "w")
650  # sanitize newlines
651  for l in stderr.splitlines():
652  newref.write(l.rstrip() + "\n")
653  del newref # flush and close
654  result["New Error Reference File"] = os.path.relpath(
655  newrefname, self.basedir
656  )
657  else:
658  causes += BasicOutputValidator(
659  lreference, "standard error", "ExecTest.expected_stderr"
660  )(stderr, result)
661  return causes
662 
664  self,
665  output_file,
666  reference_file,
667  result=None,
668  causes=None,
669  detailed=True,
670  ):
671  """
672  JSON validation action: compare json file to reference file
673  """
674 
675  if result is None:
676  result = self.result
677  if causes is None:
678  causes = self.causes
679 
680  if not os.path.isfile(output_file):
681  causes.append(f"output file {output_file} does not exist")
682  return causes
683 
684  try:
685  with open(output_file) as f:
686  output = json.load(f)
687  except json.JSONDecodeError as err:
688  causes.append("json parser error")
689  result["output_parse_error"] = f"json parser error in {output_file}: {err}"
690  return causes
691 
692  lreference = self._expandReferenceFileName(reference_file)
693  if not lreference:
694  causes.append("reference file not set")
695  elif not os.path.isfile(lreference):
696  causes.append("reference file does not exist")
697  else:
698  causes += JSONOutputValidator()(lreference, output, result, detailed)
699  if causes and lreference: # Write a new reference file for output
700  try:
701  cnt = 0
702  newrefname = ".".join([lreference, "new"])
703  while os.path.exists(newrefname):
704  cnt += 1
705  newrefname = ".".join([lreference, "~%d~" % cnt, "new"])
706  with open(newrefname, "w") as newref:
707  json.dump(output, newref, indent=4)
708  result["New JSON Output Reference File"] = os.path.relpath(
709  newrefname, self.basedir
710  )
711  except IOError:
712  # Ignore IO errors when trying to update reference files
713  # because we may be in a read-only filesystem
714  pass
715  return causes
716 
717  def _expandReferenceFileName(self, reffile):
718  # if no file is passed, do nothing
719  if not reffile:
720  return ""
721 
722  # function to split an extension in constituents parts
723  import re
724 
725  def platformSplit(p):
726  return set(re.split(r"[-+]", p))
727 
728  reference = os.path.normpath(
729  os.path.join(self.basedir, os.path.expandvars(reffile))
730  )
731 
732  # old-style platform-specific reference name
733  spec_ref = reference[:-3] + GetPlatform(self)[0:3] + reference[-3:]
734  if os.path.isfile(spec_ref):
735  reference = spec_ref
736  else: # look for new-style platform specific reference files:
737  # get all the files whose name start with the reference filename
738  dirname, basename = os.path.split(reference)
739  if not dirname:
740  dirname = "."
741  head = basename + "."
742  head_len = len(head)
743  platform = platformSplit(GetPlatform(self))
744  if "do0" in platform:
745  platform.add("dbg")
746  candidates = []
747  for f in os.listdir(dirname):
748  if f.startswith(head):
749  req_plat = platformSplit(f[head_len:])
750  if platform.issuperset(req_plat):
751  candidates.append((len(req_plat), f))
752  if candidates: # take the one with highest matching
753  # FIXME: it is not possible to say if x86_64-slc5-gcc43-dbg
754  # has to use ref.x86_64-gcc43 or ref.slc5-dbg
755  candidates.sort()
756  reference = os.path.join(dirname, candidates[-1][1])
757  return reference
758 
759 
760 # ======= GAUDI TOOLS =======
761 
762 import difflib
763 import shutil
764 
765 try:
766  from GaudiKernel import ROOT6WorkAroundEnabled
767 except ImportError:
768 
770  # dummy implementation
771  return False
772 
773 
774 # --------------------------------- TOOLS ---------------------------------#
775 
776 
778  """
779  Function used to normalize the used path
780  """
781  newPath = os.path.normpath(os.path.expandvars(p))
782  if os.path.exists(newPath):
783  p = os.path.realpath(newPath)
784  return p
785 
786 
787 def which(executable):
788  """
789  Locates an executable in the executables path ($PATH) and returns the full
790  path to it. An application is looked for with or without the '.exe' suffix.
791  If the executable cannot be found, None is returned
792  """
793  if os.path.isabs(executable):
794  if not os.path.isfile(executable):
795  if executable.endswith(".exe"):
796  if os.path.isfile(executable[:-4]):
797  return executable[:-4]
798  else:
799  executable = os.path.split(executable)[1]
800  else:
801  return executable
802  for d in os.environ.get("PATH").split(os.pathsep):
803  fullpath = os.path.join(d, executable)
804  if os.path.isfile(fullpath):
805  return fullpath
806  elif executable.endswith(".exe") and os.path.isfile(fullpath[:-4]):
807  return fullpath[:-4]
808  return None
809 
810 
811 # -------------------------------------------------------------------------#
812 # ----------------------------- Result Classe -----------------------------#
813 # -------------------------------------------------------------------------#
814 
815 
816 class Result:
817  PASS = "PASS"
818  FAIL = "FAIL"
819  ERROR = "ERROR"
820  UNTESTED = "UNTESTED"
821 
822  EXCEPTION = ""
823  RESOURCE = ""
824  TARGET = ""
825  TRACEBACK = ""
826  START_TIME = ""
827  END_TIME = ""
828  TIMEOUT_DETAIL = ""
829 
830  def __init__(self, kind=None, id=None, outcome=PASS, annotations={}):
831  self.annotations = annotations.copy()
832 
833  def __getitem__(self, key):
834  assert isinstance(key, str)
835  return self.annotations[key]
836 
837  def __setitem__(self, key, value):
838  assert isinstance(key, str)
839  assert isinstance(value, str), "{!r} is not a string".format(value)
840  self.annotations[key] = value
841 
842  def Quote(self, text):
843  """
844  Convert text to html by escaping special chars and adding <pre> tags.
845  """
846  return "<pre>{}</pre>".format(escape_for_html(text))
847 
848 
849 # -------------------------------------------------------------------------#
850 # --------------------------- Validator Classes ---------------------------#
851 # -------------------------------------------------------------------------#
852 
853 # Basic implementation of an option validator for Gaudi test. This
854 # implementation is based on the standard (LCG) validation functions used
855 # in QMTest.
856 
857 
859  def __init__(self, ref, cause, result_key):
860  self.ref = ref
861  self.cause = cause
862  self.result_key = result_key
863 
864  def __call__(self, out, result):
865  """Validate the output of the program.
866  'stdout' -- A string containing the data written to the standard output
867  stream.
868  'stderr' -- A string containing the data written to the standard error
869  stream.
870  'result' -- A 'Result' object. It may be used to annotate
871  the outcome according to the content of stderr.
872  returns -- A list of strings giving causes of failure."""
873 
874  causes = []
875  # Check the output
876  if not self.__CompareText(out, self.ref):
877  causes.append(self.cause)
878  result[self.result_key] = result.Quote(self.ref)
879 
880  return causes
881 
882  def __CompareText(self, s1, s2):
883  """Compare 's1' and 's2', ignoring line endings.
884  's1' -- A string.
885  's2' -- A string.
886  returns -- True if 's1' and 's2' are the same, ignoring
887  differences in line endings."""
888  if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
889  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we
890  # can fix them
891  to_ignore = re.compile(
892  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*"
893  )
894 
895  def keep_line(l):
896  return not to_ignore.match(l)
897 
898  return list(filter(keep_line, s1.splitlines())) == list(
899  filter(keep_line, s2.splitlines())
900  )
901  else:
902  return s1.splitlines() == s2.splitlines()
903 
904 
905 # ------------------------ Preprocessor elements ------------------------#
907  """Base class for a callable that takes a file and returns a modified
908  version of it."""
909 
910  def __processLine__(self, line):
911  return line
912 
913  def __processFile__(self, lines):
914  output = []
915  for l in lines:
916  l = self.__processLine__(l)
917  if l:
918  output.append(l)
919  return output
920 
921  def __call__(self, input):
922  if not isinstance(input, str):
923  lines = input
924  mergeback = False
925  else:
926  lines = input.splitlines()
927  mergeback = True
928  output = self.__processFile__(lines)
929  if mergeback:
930  output = "\n".join(output)
931  return output
932 
933  def __add__(self, rhs):
934  return FilePreprocessorSequence([self, rhs])
935 
936 
938  def __init__(self, members=[]):
939  self.members = members
940 
941  def __add__(self, rhs):
942  return FilePreprocessorSequence(self.members + [rhs])
943 
944  def __call__(self, input):
945  output = input
946  for pp in self.members:
947  output = pp(output)
948  return output
949 
950 
952  def __init__(self, strings=[], regexps=[]):
953  import re
954 
955  self.strings = strings
956  self.regexps = list(map(re.compile, regexps))
957 
958  def __processLine__(self, line):
959  for s in self.strings:
960  if line.find(s) >= 0:
961  return None
962  for r in self.regexps:
963  if r.search(line):
964  return None
965  return line
966 
967 
969  def __init__(self, start, end):
970  self.start = start
971  self.end = end
972  self._skipping = False
973 
974  def __processLine__(self, line):
975  if self.start in line:
976  self._skipping = True
977  return None
978  elif self.end in line:
979  self._skipping = False
980  elif self._skipping:
981  return None
982  return line
983 
984 
986  def __init__(self, orig, repl="", when=None):
987  if when:
988  when = re.compile(when)
989  self._operations = [(when, re.compile(orig), repl)]
990 
991  def __add__(self, rhs):
992  if isinstance(rhs, RegexpReplacer):
993  res = RegexpReplacer("", "", None)
994  res._operations = self._operations + rhs._operations
995  else:
996  res = FilePreprocessor.__add__(self, rhs)
997  return res
998 
999  def __processLine__(self, line):
1000  for w, o, r in self._operations:
1001  if w is None or w.search(line):
1002  line = o.sub(r, line)
1003  return line
1004 
1005 
1006 # Common preprocessors
1007 maskPointers = RegexpReplacer("0x[0-9a-fA-F]{4,16}", "0x########")
1008 normalizeDate = RegexpReplacer(
1009  "[0-2]?[0-9]:[0-5][0-9]:[0-5][0-9] [0-9]{4}[-/][01][0-9][-/][0-3][0-9][ A-Z]*",
1010  "00:00:00 1970-01-01",
1011 )
1012 normalizeEOL = FilePreprocessor()
1013 normalizeEOL.__processLine__ = lambda line: str(line).rstrip() + "\n"
1014 
1015 skipEmptyLines = FilePreprocessor()
1016 # FIXME: that's ugly
1017 skipEmptyLines.__processLine__ = lambda line: (line.strip() and line) or None
1018 
1019 # Special preprocessor sorting the list of strings (whitespace separated)
1020 # that follow a signature on a single line
1021 
1022 
1024  def __init__(self, signature):
1025  self.signature = signature
1026  self.siglen = len(signature)
1027 
1028  def __processLine__(self, line):
1029  pos = line.find(self.signature)
1030  if pos >= 0:
1031  line = line[: (pos + self.siglen)]
1032  lst = line[(pos + self.siglen) :].split()
1033  lst.sort()
1034  line += " ".join(lst)
1035  return line
1036 
1037 
1039  """
1040  Sort group of lines matching a regular expression
1041  """
1042 
1043  def __init__(self, exp):
1044  self.exp = exp if hasattr(exp, "match") else re.compile(exp)
1045 
1046  def __processFile__(self, lines):
1047  match = self.exp.match
1048  output = []
1049  group = []
1050  for l in lines:
1051  if match(l):
1052  group.append(l)
1053  else:
1054  if group:
1055  group.sort()
1056  output.extend(group)
1057  group = []
1058  output.append(l)
1059  return output
1060 
1061 
1062 # Preprocessors for GaudiTestSuite
1063 normalizeTestSuite = maskPointers + normalizeDate
1064 for w, o, r in [
1065  ("TIMER", r"\s+[+-]?[0-9]+[0-9.e+-]*", " 0"), # Normalize time output
1066  ("release all pending", r"^.*/([^/]*:.*)", r"\1"),
1067  ("^#.*file", r"file '.*[/\\]([^/\\]*)$", r"file '\1"),
1068  (
1069  "^JobOptionsSvc.*options successfully read in from",
1070  r"read in from .*[/\\]([^/\\]*)$",
1071  r"file \1",
1072  ), # normalize path to options
1073  # Normalize UUID, except those ending with all 0s (i.e. the class IDs)
1074  (
1075  None,
1076  r"[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}(?!-0{12})-[0-9A-Fa-f]{12}",
1077  "00000000-0000-0000-0000-000000000000",
1078  ),
1079  # Absorb a change in ServiceLocatorHelper
1080  (
1081  "ServiceLocatorHelper::",
1082  "ServiceLocatorHelper::(create|locate)Service",
1083  "ServiceLocatorHelper::service",
1084  ),
1085  # Remove the leading 0 in Windows' exponential format
1086  (None, r"e([-+])0([0-9][0-9])", r"e\1\2"),
1087  # Output line changed in Gaudi v24
1088  (None, r"Service reference count check:", r"Looping over all active services..."),
1089  # Ignore count of declared properties (anyway they are all printed)
1090  (
1091  None,
1092  r"^(.*(DEBUG|SUCCESS) List of ALL properties of .*#properties = )\d+",
1093  r"\1NN",
1094  ),
1095  ("ApplicationMgr", r"(declareMultiSvcType|addMultiSvc): ", ""),
1096  (r"Property \['Name': Value\]", r"( = '[^']+':)'(.*)'", r"\1\2"),
1097  ("TimelineSvc", "to file 'TimelineFile':", "to file "),
1098  ("DataObjectHandleBase", r'DataObjectHandleBase\‍("([^"]*)"\‍)', r"'\1'"),
1099  # Output line changes in Gaudi v38r3
1100  (
1101  "Added successfully Conversion service:",
1102  "Added successfully Conversion service:",
1103  "Added successfully Conversion service ",
1104  ),
1105 ]:
1106  normalizeTestSuite += RegexpReplacer(o, r, w)
1107 
1108 lineSkipper = LineSkipper(
1109  [
1110  "//GP:",
1111  "JobOptionsSvc INFO # ",
1112  "JobOptionsSvc WARNING # ",
1113  "Time User",
1114  "Welcome to",
1115  "This machine has a speed",
1116  "running on",
1117  "ToolSvc.Sequenc... INFO",
1118  "DataListenerSvc INFO XML written to file:",
1119  "[INFO]",
1120  "[WARNING]",
1121  "DEBUG No writable file catalog found which contains FID:",
1122  "DEBUG Service base class initialized successfully",
1123  # changed between v20 and v21
1124  "DEBUG Incident timing:",
1125  # introduced with patch #3487
1126  # changed the level of the message from INFO to
1127  # DEBUG
1128  "INFO 'CnvServices':[",
1129  # message removed because could be printed in constructor
1130  "DEBUG 'CnvServices':[",
1131  # The signal handler complains about SIGXCPU not
1132  # defined on some platforms
1133  "SIGXCPU",
1134  # Message removed with redesing of JobOptionsSvc
1135  "ServiceLocatorHelper::service: found service JobOptionsSvc",
1136  # Ignore warnings for properties case mismatch
1137  "mismatching case for property name:",
1138  # Message demoted to DEBUG in gaudi/Gaudi!992
1139  "Histograms saving not required.",
1140  # Message added in gaudi/Gaudi!577
1141  "Properties are dumped into",
1142  # Messages changed in gaudi/Gaudi!1426
1143  "WARNING no ROOT output file name",
1144  "INFO Writing ROOT histograms to:",
1145  "INFO Completed update of ROOT histograms in:",
1146  # absorb changes in data dependencies reports (https://gitlab.cern.ch/gaudi/Gaudi/-/merge_requests/1348)
1147  "Data Deps for ",
1148  "data dependencies:",
1149  ],
1150  regexps=[
1151  r"^JobOptionsSvc INFO *$",
1152  r"^# ", # Ignore python comments
1153  # skip the message reporting the version of the root file
1154  r"(Always|SUCCESS)\s*(Root f|[^ ]* F)ile version:",
1155  r"File '.*.xml' does not exist",
1156  r"INFO Refer to dataset .* by its file ID:",
1157  r"INFO Referring to dataset .* by its file ID:",
1158  r"INFO Disconnect from dataset",
1159  r"INFO Disconnected from dataset",
1160  r"INFO Disconnected data IO:",
1161  r"IncidentSvc\s*(DEBUG (Adding|Removing)|VERBOSE Calling)",
1162  # Ignore StatusCodeSvc related messages
1163  r".*StatusCodeSvc.*",
1164  r".*StatusCodeCheck.*",
1165  r"Num\s*\|\s*Function\s*\|\s*Source Library",
1166  r"^[-+]*\s*$",
1167  # Hide the fake error message coming from POOL/ROOT (ROOT 5.21)
1168  r"ERROR Failed to modify file: .* Errno=2 No such file or directory",
1169  # Hide unchecked StatusCodes from dictionaries
1170  r"^ +[0-9]+ \|.*ROOT",
1171  r"^ +[0-9]+ \|.*\|.*Dict",
1172  # Hide EventLoopMgr total timing report
1173  r"EventLoopMgr.*---> Loop Finished",
1174  r"HiveSlimEventLo.*---> Loop Finished",
1175  # Remove ROOT TTree summary table, which changes from one version to the
1176  # other
1177  r"^\*.*\*$",
1178  # Remove Histos Summaries
1179  r"SUCCESS\s*Booked \d+ Histogram\‍(s\‍)",
1180  r"^ \|",
1181  r"^ ID=",
1182  # Ignore added/removed properties
1183  r"Property(.*)'Audit(Algorithm|Tool|Service)s':",
1184  r"Property(.*)'Audit(Begin|End)Run':",
1185  # these were missing in tools
1186  r"Property(.*)'AuditRe(start|initialize)':",
1187  r"Property(.*)'Blocking':",
1188  # removed with gaudi/Gaudi!273
1189  r"Property(.*)'ErrorCount(er)?':",
1190  # added with gaudi/Gaudi!306
1191  r"Property(.*)'Sequential':",
1192  # added with gaudi/Gaudi!314
1193  r"Property(.*)'FilterCircularDependencies':",
1194  # removed with gaudi/Gaudi!316
1195  r"Property(.*)'IsClonable':",
1196  # ignore uninteresting/obsolete messages
1197  r"Property update for OutputLevel : new value =",
1198  r"EventLoopMgr\s*DEBUG Creating OutputStream",
1199  ],
1200 )
1201 
1202 if ROOT6WorkAroundEnabled("ReadRootmapCheck"):
1203  # FIXME: (MCl) Hide warnings from new rootmap sanity check until we can
1204  # fix them
1205  lineSkipper += LineSkipper(
1206  regexps=[
1207  r"Warning in <TInterpreter::ReadRootmapFile>: .* is already in .*",
1208  ]
1209  )
1210 
1211 normalizeTestSuite = (
1212  lineSkipper
1213  + normalizeTestSuite
1214  + skipEmptyLines
1215  + normalizeEOL
1216  + LineSorter("Services to release : ")
1217  + SortGroupOfLines(r"^\S+\s+(DEBUG|SUCCESS) Property \[\'Name\':")
1218 )
1219 # for backward compatibility
1220 normalizeExamples = normalizeTestSuite
1221 
1222 # --------------------- Validation functions/classes ---------------------#
1223 
1224 
1226  def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite):
1227  self.reffile = os.path.expandvars(reffile)
1228  self.cause = cause
1229  self.result_key = result_key
1230  self.preproc = preproc
1231 
1232  def __call__(self, stdout, result):
1233  causes = []
1234  if os.path.isfile(self.reffile):
1235  orig = open(self.reffile).readlines()
1236  if self.preproc:
1237  orig = self.preproc(orig)
1238  result[self.result_key + ".preproc.orig"] = result.Quote(
1239  "\n".join(map(str.strip, orig))
1240  )
1241  else:
1242  orig = []
1243  new = stdout.splitlines()
1244  if self.preproc:
1245  new = self.preproc(new)
1246 
1247  # Note: we have to make sure that we do not have `\n` in the comparison
1248  filterdiffs = list(
1249  difflib.unified_diff(
1250  [l.rstrip() for l in orig],
1251  [l.rstrip() for l in new],
1252  n=1,
1253  fromfile="Reference file",
1254  tofile="Actual output",
1255  lineterm="",
1256  )
1257  )
1258  if filterdiffs:
1259  result[self.result_key] = result.Quote("\n".join(filterdiffs))
1260  result[self.result_key + ".preproc.new"] = result.Quote(
1261  "\n".join(map(str.strip, new))
1262  )
1263  causes.append(self.cause)
1264  return causes
1265 
1266 
1268  """
1269  Scan stdout to find ROOT TTree summaries and digest them.
1270  """
1271  stars = re.compile(r"^\*+$")
1272  outlines = stdout.splitlines()
1273  nlines = len(outlines)
1274  trees = {}
1275 
1276  i = 0
1277  while i < nlines: # loop over the output
1278  # look for
1279  while i < nlines and not stars.match(outlines[i]):
1280  i += 1
1281  if i < nlines:
1282  tree, i = _parseTTreeSummary(outlines, i)
1283  if tree:
1284  trees[tree["Name"]] = tree
1285 
1286  return trees
1287 
1288 
1289 def cmpTreesDicts(reference, to_check, ignore=None):
1290  """
1291  Check that all the keys in reference are in to_check too, with the same value.
1292  If the value is a dict, the function is called recursively. to_check can
1293  contain more keys than reference, that will not be tested.
1294  The function returns at the first difference found.
1295  """
1296  fail_keys = []
1297  # filter the keys in the reference dictionary
1298  if ignore:
1299  ignore_re = re.compile(ignore)
1300  keys = [key for key in reference if not ignore_re.match(key)]
1301  else:
1302  keys = reference.keys()
1303  # loop over the keys (not ignored) in the reference dictionary
1304  for k in keys:
1305  if k in to_check: # the key must be in the dictionary to_check
1306  if isinstance(reference[k], dict) and isinstance(to_check[k], dict):
1307  # if both reference and to_check values are dictionaries,
1308  # recurse
1309  failed = fail_keys = cmpTreesDicts(reference[k], to_check[k], ignore)
1310  else:
1311  # compare the two values
1312  failed = to_check[k] != reference[k]
1313  else: # handle missing keys in the dictionary to check (i.e. failure)
1314  to_check[k] = None
1315  failed = True
1316  if failed:
1317  fail_keys.insert(0, k)
1318  break # exit from the loop at the first failure
1319  return fail_keys # return the list of keys bringing to the different values
1320 
1321 
1322 def getCmpFailingValues(reference, to_check, fail_path):
1323  c = to_check
1324  r = reference
1325  for k in fail_path:
1326  c = c.get(k, None)
1327  r = r.get(k, None)
1328  if c is None or r is None:
1329  break # one of the dictionaries is not deep enough
1330  return (fail_path, r, c)
1331 
1332 
1333 # signature of the print-out of the histograms
1334 h_count_re = re.compile(r"^(.*)SUCCESS\s+Booked (\d+) Histogram\‍(s\‍) :\s+([\s\w=-]*)")
1335 
1336 
1337 def _parseTTreeSummary(lines, pos):
1338  """
1339  Parse the TTree summary table in lines, starting from pos.
1340  Returns a tuple with the dictionary with the digested informations and the
1341  position of the first line after the summary.
1342  """
1343  result = {}
1344  i = pos + 1 # first line is a sequence of '*'
1345  count = len(lines)
1346 
1347  def splitcols(l):
1348  return [f.strip() for f in l.strip("*\n").split(":", 2)]
1349 
1350  def parseblock(ll):
1351  r = {}
1352  delta_i = 0
1353  cols = splitcols(ll[0])
1354 
1355  if len(ll) == 3:
1356  # default one line name/title
1357  r["Name"], r["Title"] = cols[1:]
1358  elif len(ll) == 4:
1359  # in case title is moved to next line due to too long name
1360  delta_i = 1
1361  r["Name"] = cols[1]
1362  r["Title"] = ll[1].strip("*\n").split("|")[1].strip()
1363  else:
1364  assert False
1365 
1366  cols = splitcols(ll[1 + delta_i])
1367  r["Entries"] = int(cols[1])
1368 
1369  sizes = cols[2].split()
1370  r["Total size"] = int(sizes[2])
1371  if sizes[-1] == "memory":
1372  r["File size"] = 0
1373  else:
1374  r["File size"] = int(sizes[-1])
1375 
1376  cols = splitcols(ll[2 + delta_i])
1377  sizes = cols[2].split()
1378  if cols[0] == "Baskets":
1379  r["Baskets"] = int(cols[1])
1380  r["Basket size"] = int(sizes[2])
1381  r["Compression"] = float(sizes[-1])
1382 
1383  return r
1384 
1385  def nextblock(lines, i):
1386  delta_i = 1
1387  dots = re.compile(r"^\.+$")
1388  stars = re.compile(r"^\*+$")
1389  count = len(lines)
1390  while (
1391  i + delta_i < count
1392  and not dots.match(lines[i + delta_i][1:-1])
1393  and not stars.match(lines[i + delta_i])
1394  ):
1395  delta_i += 1
1396  return i + delta_i
1397 
1398  if i < (count - 3) and lines[i].startswith("*Tree"):
1399  i_nextblock = nextblock(lines, i)
1400  result = parseblock(lines[i:i_nextblock])
1401  result["Branches"] = {}
1402  i = i_nextblock + 1
1403  while i < (count - 3) and lines[i].startswith("*Br"):
1404  if i < (count - 2) and lines[i].startswith("*Branch "):
1405  # skip branch header
1406  i += 3
1407  continue
1408  i_nextblock = nextblock(lines, i)
1409  if i_nextblock >= count:
1410  break
1411  branch = parseblock(lines[i:i_nextblock])
1412  result["Branches"][branch["Name"]] = branch
1413  i = i_nextblock + 1
1414 
1415  return (result, i)
1416 
1417 
1418 def parseHistosSummary(lines, pos):
1419  """
1420  Extract the histograms infos from the lines starting at pos.
1421  Returns the position of the first line after the summary block.
1422  """
1423  global h_count_re
1424  h_table_head = re.compile(
1425  r'SUCCESS\s+(1D|2D|3D|1D profile|2D profile) histograms in directory\s+"(\w*)"'
1426  )
1427  h_short_summ = re.compile(r"ID=([^\"]+)\s+\"([^\"]+)\"\s+(.*)")
1428 
1429  nlines = len(lines)
1430 
1431  # decode header
1432  m = h_count_re.search(lines[pos])
1433  name = m.group(1).strip()
1434  total = int(m.group(2))
1435  header = {}
1436  for k, v in [x.split("=") for x in m.group(3).split()]:
1437  header[k] = int(v)
1438  pos += 1
1439  header["Total"] = total
1440 
1441  summ = {}
1442  while pos < nlines:
1443  m = h_table_head.search(lines[pos])
1444  if m:
1445  t, d = m.groups(1) # type and directory
1446  t = t.replace(" profile", "Prof")
1447  pos += 1
1448  if pos < nlines:
1449  l = lines[pos]
1450  else:
1451  l = ""
1452  cont = {}
1453  if l.startswith(" | ID"):
1454  # table format
1455  titles = [x.strip() for x in l.split("|")][1:]
1456  pos += 1
1457  while pos < nlines and lines[pos].startswith(" |"):
1458  l = lines[pos]
1459  values = [x.strip() for x in l.split("|")][1:]
1460  hcont = {}
1461  for i in range(len(titles)):
1462  hcont[titles[i]] = values[i]
1463  cont[hcont["ID"]] = hcont
1464  pos += 1
1465  elif l.startswith(" ID="):
1466  while pos < nlines and lines[pos].startswith(" ID="):
1467  values = [
1468  x.strip() for x in h_short_summ.search(lines[pos]).groups()
1469  ]
1470  cont[values[0]] = values
1471  pos += 1
1472  else: # not interpreted
1473  raise RuntimeError("Cannot understand line %d: '%s'" % (pos, l))
1474  if d not in summ:
1475  summ[d] = {}
1476  summ[d][t] = cont
1477  summ[d]["header"] = header
1478  else:
1479  break
1480  if not summ:
1481  # If the full table is not present, we use only the header
1482  summ[name] = {"header": header}
1483  return summ, pos
1484 
1485 
1487  """
1488  Scan stdout to find ROOT TTree summaries and digest them.
1489  """
1490  outlines = stdout.splitlines()
1491  nlines = len(outlines) - 1
1492  summaries = {}
1493  global h_count_re
1494 
1495  pos = 0
1496  while pos < nlines:
1497  summ = {}
1498  # find first line of block:
1499  match = h_count_re.search(outlines[pos])
1500  while pos < nlines and not match:
1501  pos += 1
1502  match = h_count_re.search(outlines[pos])
1503  if match:
1504  summ, pos = parseHistosSummary(outlines, pos)
1505  summaries.update(summ)
1506  return summaries
1507 
1508 
1509 def GetPlatform(self):
1510  """
1511  Return the platform Id defined in CMTCONFIG or SCRAM_ARCH.
1512  """
1513  arch = "None"
1514  # check architecture name
1515  if "BINARY_TAG" in os.environ:
1516  arch = os.environ["BINARY_TAG"]
1517  elif "CMTCONFIG" in os.environ:
1518  arch = os.environ["CMTCONFIG"]
1519  elif "SCRAM_ARCH" in os.environ:
1520  arch = os.environ["SCRAM_ARCH"]
1521  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1522  "Debug",
1523  "FastDebug",
1524  "Developer",
1525  ):
1526  arch = "dummy-dbg"
1527  elif os.environ.get("ENV_CMAKE_BUILD_TYPE", "") in (
1528  "Release",
1529  "MinSizeRel",
1530  "RelWithDebInfo",
1531  "",
1532  ): # RelWithDebInfo == -O2 -g -DNDEBUG
1533  arch = "dummy-opt"
1534  return arch
1535 
1536 
1537 def isWinPlatform(self):
1538  """
1539  Return True if the current platform is Windows.
1540 
1541  This function was needed because of the change in the CMTCONFIG format,
1542  from win32_vc71_dbg to i686-winxp-vc9-dbg.
1543  """
1544  platform = GetPlatform(self)
1545  return "winxp" in platform or platform.startswith("win")
1546 
1547 
1549  def __call__(self, ref, out, result, detailed=True):
1550  """Validate JSON output.
1551  returns -- A list of strings giving causes of failure."""
1552 
1553  causes = []
1554  try:
1555  with open(ref) as f:
1556  expected = json.load(f)
1557  except json.JSONDecodeError as err:
1558  causes.append("json parser error")
1559  result["reference_parse_error"] = f"json parser error in {ref}: {err}"
1560  return causes
1561 
1562  if not detailed:
1563  if expected != out:
1564  causes.append("json content")
1565  result["json_diff"] = "detailed diff was turned off"
1566  return causes
1567 
1568  # piggyback on TestCase dict diff report
1569  t = TestCase()
1570  # sort both lists (these are list of entities) as the order is not supposed to matter
1571  # indeed, the JSONSink implementation does not garantee any particular order
1572  # but as JSON does not have sets, we get back a sorted list here
1573  expected = sorted(expected, key=lambda item: (item["component"], item["name"]))
1574  out = sorted(out, key=lambda item: (item["component"], item["name"]))
1575  try:
1576  t.assertEqual(expected, out)
1577  except AssertionError as err:
1578  causes.append("json content")
1579  result["json_diff"] = str(err).splitlines()[0]
1580 
1581  return causes
GaudiTesting.BaseTest.ReferenceFileValidator.reffile
reffile
Definition: BaseTest.py:1227
GaudiTesting.BaseTest.BaseTest.causes
causes
Definition: BaseTest.py:122
GaudiTesting.BaseTest.SortGroupOfLines.__init__
def __init__(self, exp)
Definition: BaseTest.py:1043
GaudiTesting.BaseTest.BaseTest.options
options
Definition: BaseTest.py:110
GaudiTesting.BaseTest.FilePreprocessor
Definition: BaseTest.py:906
MSG::hex
MsgStream & hex(MsgStream &log)
Definition: MsgStream.h:282
GaudiTesting.BaseTest.Result.__getitem__
def __getitem__(self, key)
Definition: BaseTest.py:833
GaudiTesting.BaseTest.BasicOutputValidator.ref
ref
Definition: BaseTest.py:860
GaudiTesting.BaseTest.dumpProcs
def dumpProcs(name)
Definition: BaseTest.py:67
GaudiTesting.BaseTest.LineSorter.siglen
siglen
Definition: BaseTest.py:1026
GaudiTesting.BaseTest.FilePreprocessor.__call__
def __call__(self, input)
Definition: BaseTest.py:921
GaudiTesting.BaseTest.LineSorter
Definition: BaseTest.py:1023
GaudiTesting.BaseTest._parseTTreeSummary
def _parseTTreeSummary(lines, pos)
Definition: BaseTest.py:1337
GaudiTesting.BaseTest.LineSorter.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:1028
GaudiTesting.BaseTest.BaseTest.out
out
Definition: BaseTest.py:125
GaudiTesting.BaseTest.BaseTest.CheckHistosSummaries
def CheckHistosSummaries(self, stdout=None, result=None, causes=None, dict=None, ignore=None)
Definition: BaseTest.py:538
GaudiTesting.BaseTest.sanitize_for_xml
def sanitize_for_xml(data)
Definition: BaseTest.py:50
GaudiTesting.BaseTest.BaseTest._common_tmpdir
_common_tmpdir
Definition: BaseTest.py:103
GaudiTesting.BaseTest.BaseTest.reference
reference
Definition: BaseTest.py:108
GaudiTesting.BaseTest.BasicOutputValidator.__init__
def __init__(self, ref, cause, result_key)
Definition: BaseTest.py:859
GaudiTesting.BaseTest.BaseTest.timeout
timeout
Definition: BaseTest.py:112
GaudiTesting.BaseTest.ReferenceFileValidator.preproc
preproc
Definition: BaseTest.py:1230
GaudiTesting.BaseTest.BaseTest.validateWithReference
def validateWithReference(self, stdout=None, stderr=None, result=None, causes=None, preproc=None)
Definition: BaseTest.py:580
GaudiTesting.BaseTest.getCmpFailingValues
def getCmpFailingValues(reference, to_check, fail_path)
Definition: BaseTest.py:1322
GaudiPartProp.decorators.get
get
decorate the vector of properties
Definition: decorators.py:283
GaudiTesting.BaseTest.BasicOutputValidator.result_key
result_key
Definition: BaseTest.py:862
GaudiTesting.BaseTest.BaseTest.proc
proc
Definition: BaseTest.py:127
GaudiTesting.BaseTest._new_backslashreplace_errors
def _new_backslashreplace_errors(exc)
Definition: BaseTest.py:32
GaudiTesting.BaseTest.BaseTest.stack_trace
stack_trace
Definition: BaseTest.py:128
GaudiTesting.BaseTest.FilePreprocessor.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:913
GaudiTesting.BaseTest.BaseTest.environment
environment
Definition: BaseTest.py:114
GaudiTesting.BaseTest.LineSorter.signature
signature
Definition: BaseTest.py:1025
GaudiTesting.BaseTest.BaseTest.exit_code
exit_code
Definition: BaseTest.py:113
GaudiTesting.BaseTest.BlockSkipper.start
start
Definition: BaseTest.py:970
GaudiTesting.BaseTest.kill_tree
def kill_tree(ppid, sig)
Definition: BaseTest.py:77
GaudiTesting.BaseTest.Result.Quote
def Quote(self, text)
Definition: BaseTest.py:842
GaudiTesting.BaseTest.FilePreprocessorSequence.__add__
def __add__(self, rhs)
Definition: BaseTest.py:941
Containers::map
struct GAUDI_API map
Parametrisation class for map-like implementation.
Definition: KeyedObjectManager.h:35
GaudiTesting.BaseTest.BaseTest.validateJSONWithReference
def validateJSONWithReference(self, output_file, reference_file, result=None, causes=None, detailed=True)
Definition: BaseTest.py:663
GaudiTesting.BaseTest.FilePreprocessorSequence
Definition: BaseTest.py:937
GaudiTesting.BaseTest.BaseTest.__init__
def __init__(self)
Definition: BaseTest.py:105
GaudiTesting.BaseTest.RegexpReplacer._operations
_operations
Definition: BaseTest.py:989
GaudiTesting.BaseTest.BaseTest.err
err
Definition: BaseTest.py:126
compareOutputFiles.target
target
Definition: compareOutputFiles.py:489
GaudiTesting.BaseTest.SortGroupOfLines.__processFile__
def __processFile__(self, lines)
Definition: BaseTest.py:1046
GaudiTesting.BaseTest.BlockSkipper
Definition: BaseTest.py:968
GaudiTesting.BaseTest.BaseTest.args
args
Definition: BaseTest.py:107
GaudiTesting.BaseTest.BaseTest.result
result
Definition: BaseTest.py:123
GaudiTesting.BaseTest.FilePreprocessor.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:910
GaudiTesting.BaseTest.FilePreprocessorSequence.__call__
def __call__(self, input)
Definition: BaseTest.py:944
GaudiTesting.BaseTest.BaseTest.workdir
workdir
Definition: BaseTest.py:117
GaudiTesting.BaseTest.BlockSkipper._skipping
_skipping
Definition: BaseTest.py:972
GaudiTesting.BaseTest.ReferenceFileValidator.cause
cause
Definition: BaseTest.py:1228
GaudiTesting.BaseTest.parseHistosSummary
def parseHistosSummary(lines, pos)
Definition: BaseTest.py:1418
GaudiTesting.BaseTest.BaseTest.validate_time
validate_time
Definition: BaseTest.py:130
GaudiTesting.BaseTest.RegexpReplacer
Definition: BaseTest.py:985
GaudiTesting.BaseTest.isWinPlatform
def isWinPlatform(self)
Definition: BaseTest.py:1537
GaudiTesting.BaseTest.LineSkipper.regexps
regexps
Definition: BaseTest.py:956
GaudiTesting.BaseTest.BaseTest.basedir
basedir
Definition: BaseTest.py:129
GaudiTesting.BaseTest.which
def which(executable)
Definition: BaseTest.py:787
GaudiTesting.BaseTest.SortGroupOfLines.exp
exp
Definition: BaseTest.py:1044
GaudiTesting.BaseTest.BaseTest.unsupported_platforms
unsupported_platforms
Definition: BaseTest.py:115
GaudiTesting.BaseTest.Result.__init__
def __init__(self, kind=None, id=None, outcome=PASS, annotations={})
Definition: BaseTest.py:830
GaudiTesting.BaseTest.BlockSkipper.end
end
Definition: BaseTest.py:971
GaudiTesting.BaseTest.BaseTest.returnedCode
returnedCode
Definition: BaseTest.py:124
GaudiTesting.BaseTest.LineSkipper.strings
strings
Definition: BaseTest.py:955
GaudiTesting.BaseTest.BasicOutputValidator.__call__
def __call__(self, out, result)
Definition: BaseTest.py:864
GaudiTesting.BaseTest.cmpTreesDicts
def cmpTreesDicts(reference, to_check, ignore=None)
Definition: BaseTest.py:1289
GaudiTesting.BaseTest.Result.annotations
annotations
Definition: BaseTest.py:831
GaudiTesting.BaseTest.BaseTest.name
name
Definition: BaseTest.py:121
format
GAUDI_API std::string format(const char *,...)
MsgStream format utility "a la sprintf(...)".
Definition: MsgStream.cpp:119
GaudiTesting.BaseTest.JSONOutputValidator
Definition: BaseTest.py:1548
GaudiTesting.BaseTest.RegexpReplacer.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:999
GaudiTesting.BaseTest.FilePreprocessorSequence.members
members
Definition: BaseTest.py:939
GaudiTesting.BaseTest.BaseTest._expandReferenceFileName
def _expandReferenceFileName(self, reffile)
Definition: BaseTest.py:717
GaudiTesting.BaseTest.BaseTest.signal
signal
Definition: BaseTest.py:116
GaudiTesting.BaseTest.SortGroupOfLines
Definition: BaseTest.py:1038
GaudiTesting.BaseTest.BaseTest.findReferenceBlock
def findReferenceBlock(self, reference=None, stdout=None, result=None, causes=None, signature_offset=0, signature=None, id=None)
Definition: BaseTest.py:391
GaudiTesting.BaseTest.ReferenceFileValidator.__init__
def __init__(self, reffile, cause, result_key, preproc=normalizeTestSuite)
Definition: BaseTest.py:1226
GaudiTesting.BaseTest.RationalizePath
def RationalizePath(p)
Definition: BaseTest.py:777
GaudiTesting.BaseTest.LineSkipper
Definition: BaseTest.py:951
GaudiTesting.BaseTest.ReferenceFileValidator
Definition: BaseTest.py:1225
hivetimeline.read
def read(f, regex=".*", skipevents=0)
Definition: hivetimeline.py:32
GaudiTesting.BaseTest.BaseTest.program
program
Definition: BaseTest.py:106
GaudiTesting.BaseTest.FilePreprocessorSequence.__init__
def __init__(self, members=[])
Definition: BaseTest.py:938
GaudiTesting.BaseTest.ReferenceFileValidator.__call__
def __call__(self, stdout, result)
Definition: BaseTest.py:1232
GaudiTesting.BaseTest.BasicOutputValidator.cause
cause
Definition: BaseTest.py:861
GaudiTesting.BaseTest.FilePreprocessor.__add__
def __add__(self, rhs)
Definition: BaseTest.py:933
GaudiTesting.BaseTest.ReferenceFileValidator.result_key
result_key
Definition: BaseTest.py:1229
GaudiTesting.BaseTest.BlockSkipper.__init__
def __init__(self, start, end)
Definition: BaseTest.py:969
GaudiTesting.BaseTest.RegexpReplacer.__init__
def __init__(self, orig, repl="", when=None)
Definition: BaseTest.py:986
GaudiTesting.BaseTest.findHistosSummaries
def findHistosSummaries(stdout)
Definition: BaseTest.py:1486
GaudiTesting.BaseTest.Result.__setitem__
def __setitem__(self, key, value)
Definition: BaseTest.py:837
GaudiTesting.BaseTest.BaseTest.CheckTTreesSummaries
def CheckTTreesSummaries(self, stdout=None, result=None, causes=None, trees_dict=None, ignore=r"Basket|.*size|Compression")
Definition: BaseTest.py:492
GaudiTesting.BaseTest.BaseTest
Definition: BaseTest.py:102
GaudiTesting.BaseTest.BaseTest.countErrorLines
def countErrorLines(self, expected={"ERROR":0, "FATAL":0}, stdout=None, result=None, causes=None)
Definition: BaseTest.py:448
GaudiTesting.BaseTest.BaseTest.error_reference
error_reference
Definition: BaseTest.py:109
GaudiTesting.BaseTest.BaseTest.ValidateOutput
def ValidateOutput(self, stdout, stderr, result)
Definition: BaseTest.py:384
GaudiTesting.BaseTest.JSONOutputValidator.__call__
def __call__(self, ref, out, result, detailed=True)
Definition: BaseTest.py:1549
GaudiTesting.BaseTest.BasicOutputValidator
Definition: BaseTest.py:858
GaudiTesting.BaseTest.LineSkipper.__init__
def __init__(self, strings=[], regexps=[])
Definition: BaseTest.py:952
GaudiTesting.BaseTest.Result
Definition: BaseTest.py:816
GaudiTesting.BaseTest.BaseTest.run
def run(self)
Definition: BaseTest.py:132
GaudiTesting.BaseTest.findTTreeSummaries
def findTTreeSummaries(stdout)
Definition: BaseTest.py:1267
GaudiTesting.BaseTest.BasicOutputValidator.__CompareText
def __CompareText(self, s1, s2)
Definition: BaseTest.py:882
GaudiTesting.BaseTest.RegexpReplacer.__add__
def __add__(self, rhs)
Definition: BaseTest.py:991
compareOutputFiles.pp
pp
Definition: compareOutputFiles.py:507
GaudiTesting.BaseTest.BaseTest.stderr
stderr
Definition: BaseTest.py:111
GaudiTesting.BaseTest.LineSorter.__init__
def __init__(self, signature)
Definition: BaseTest.py:1024
GaudiTesting.BaseTest.LineSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:958
GaudiTesting.BaseTest.ROOT6WorkAroundEnabled
def ROOT6WorkAroundEnabled(id=None)
Definition: BaseTest.py:769
GaudiTesting.BaseTest.BaseTest.use_temp_dir
use_temp_dir
Definition: BaseTest.py:118
GaudiTesting.BaseTest.GetPlatform
def GetPlatform(self)
Definition: BaseTest.py:1509
GaudiTesting.BaseTest.BaseTest.status
status
Definition: BaseTest.py:120
Gaudi::Functional::details::zip::range
decltype(auto) range(Args &&... args)
Zips multiple containers together to form a single range.
Definition: details.h:98
GaudiTesting.BaseTest.BlockSkipper.__processLine__
def __processLine__(self, line)
Definition: BaseTest.py:974